1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Common code for the NVMe target. |
4 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. |
5 | */ |
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
7 | #include <linux/module.h> |
8 | #include <linux/random.h> |
9 | #include <linux/rculist.h> |
10 | #include <linux/pci-p2pdma.h> |
11 | #include <linux/scatterlist.h> |
12 | |
13 | #include <generated/utsrelease.h> |
14 | |
15 | #define CREATE_TRACE_POINTS |
16 | #include "trace.h" |
17 | |
18 | #include "nvmet.h" |
19 | |
20 | struct kmem_cache *nvmet_bvec_cache; |
21 | struct workqueue_struct *buffered_io_wq; |
22 | struct workqueue_struct *zbd_wq; |
23 | static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; |
24 | static DEFINE_IDA(cntlid_ida); |
25 | |
26 | struct workqueue_struct *nvmet_wq; |
27 | EXPORT_SYMBOL_GPL(nvmet_wq); |
28 | |
29 | /* |
30 | * This read/write semaphore is used to synchronize access to configuration |
31 | * information on a target system that will result in discovery log page |
32 | * information change for at least one host. |
33 | * The full list of resources to protected by this semaphore is: |
34 | * |
35 | * - subsystems list |
36 | * - per-subsystem allowed hosts list |
37 | * - allow_any_host subsystem attribute |
38 | * - nvmet_genctr |
39 | * - the nvmet_transports array |
40 | * |
41 | * When updating any of those lists/structures write lock should be obtained, |
42 | * while when reading (popolating discovery log page or checking host-subsystem |
43 | * link) read lock is obtained to allow concurrent reads. |
44 | */ |
45 | DECLARE_RWSEM(nvmet_config_sem); |
46 | |
47 | u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1]; |
48 | u64 nvmet_ana_chgcnt; |
49 | DECLARE_RWSEM(nvmet_ana_sem); |
50 | |
51 | inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) |
52 | { |
53 | switch (errno) { |
54 | case 0: |
55 | return NVME_SC_SUCCESS; |
56 | case -ENOSPC: |
57 | req->error_loc = offsetof(struct nvme_rw_command, length); |
58 | return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; |
59 | case -EREMOTEIO: |
60 | req->error_loc = offsetof(struct nvme_rw_command, slba); |
61 | return NVME_SC_LBA_RANGE | NVME_SC_DNR; |
62 | case -EOPNOTSUPP: |
63 | req->error_loc = offsetof(struct nvme_common_command, opcode); |
64 | switch (req->cmd->common.opcode) { |
65 | case nvme_cmd_dsm: |
66 | case nvme_cmd_write_zeroes: |
67 | return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; |
68 | default: |
69 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
70 | } |
71 | break; |
72 | case -ENODATA: |
73 | req->error_loc = offsetof(struct nvme_rw_command, nsid); |
74 | return NVME_SC_ACCESS_DENIED; |
75 | case -EIO: |
76 | fallthrough; |
77 | default: |
78 | req->error_loc = offsetof(struct nvme_common_command, opcode); |
79 | return NVME_SC_INTERNAL | NVME_SC_DNR; |
80 | } |
81 | } |
82 | |
83 | u16 nvmet_report_invalid_opcode(struct nvmet_req *req) |
84 | { |
85 | pr_debug("unhandled cmd %d on qid %d\n" , req->cmd->common.opcode, |
86 | req->sq->qid); |
87 | |
88 | req->error_loc = offsetof(struct nvme_common_command, opcode); |
89 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
90 | } |
91 | |
92 | static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, |
93 | const char *subsysnqn); |
94 | |
95 | u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, |
96 | size_t len) |
97 | { |
98 | if (sg_pcopy_from_buffer(sgl: req->sg, nents: req->sg_cnt, buf, buflen: len, skip: off) != len) { |
99 | req->error_loc = offsetof(struct nvme_common_command, dptr); |
100 | return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; |
101 | } |
102 | return 0; |
103 | } |
104 | |
105 | u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len) |
106 | { |
107 | if (sg_pcopy_to_buffer(sgl: req->sg, nents: req->sg_cnt, buf, buflen: len, skip: off) != len) { |
108 | req->error_loc = offsetof(struct nvme_common_command, dptr); |
109 | return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; |
110 | } |
111 | return 0; |
112 | } |
113 | |
114 | u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len) |
115 | { |
116 | if (sg_zero_buffer(sgl: req->sg, nents: req->sg_cnt, buflen: len, skip: off) != len) { |
117 | req->error_loc = offsetof(struct nvme_common_command, dptr); |
118 | return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; |
119 | } |
120 | return 0; |
121 | } |
122 | |
123 | static u32 nvmet_max_nsid(struct nvmet_subsys *subsys) |
124 | { |
125 | struct nvmet_ns *cur; |
126 | unsigned long idx; |
127 | u32 nsid = 0; |
128 | |
129 | xa_for_each(&subsys->namespaces, idx, cur) |
130 | nsid = cur->nsid; |
131 | |
132 | return nsid; |
133 | } |
134 | |
135 | static u32 nvmet_async_event_result(struct nvmet_async_event *aen) |
136 | { |
137 | return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16); |
138 | } |
139 | |
140 | static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl) |
141 | { |
142 | struct nvmet_req *req; |
143 | |
144 | mutex_lock(&ctrl->lock); |
145 | while (ctrl->nr_async_event_cmds) { |
146 | req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; |
147 | mutex_unlock(lock: &ctrl->lock); |
148 | nvmet_req_complete(req, status: NVME_SC_INTERNAL | NVME_SC_DNR); |
149 | mutex_lock(&ctrl->lock); |
150 | } |
151 | mutex_unlock(lock: &ctrl->lock); |
152 | } |
153 | |
154 | static void nvmet_async_events_process(struct nvmet_ctrl *ctrl) |
155 | { |
156 | struct nvmet_async_event *aen; |
157 | struct nvmet_req *req; |
158 | |
159 | mutex_lock(&ctrl->lock); |
160 | while (ctrl->nr_async_event_cmds && !list_empty(head: &ctrl->async_events)) { |
161 | aen = list_first_entry(&ctrl->async_events, |
162 | struct nvmet_async_event, entry); |
163 | req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; |
164 | nvmet_set_result(req, result: nvmet_async_event_result(aen)); |
165 | |
166 | list_del(entry: &aen->entry); |
167 | kfree(objp: aen); |
168 | |
169 | mutex_unlock(lock: &ctrl->lock); |
170 | trace_nvmet_async_event(ctrl, result: req->cqe->result.u32); |
171 | nvmet_req_complete(req, status: 0); |
172 | mutex_lock(&ctrl->lock); |
173 | } |
174 | mutex_unlock(lock: &ctrl->lock); |
175 | } |
176 | |
177 | static void nvmet_async_events_free(struct nvmet_ctrl *ctrl) |
178 | { |
179 | struct nvmet_async_event *aen, *tmp; |
180 | |
181 | mutex_lock(&ctrl->lock); |
182 | list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) { |
183 | list_del(entry: &aen->entry); |
184 | kfree(objp: aen); |
185 | } |
186 | mutex_unlock(lock: &ctrl->lock); |
187 | } |
188 | |
189 | static void nvmet_async_event_work(struct work_struct *work) |
190 | { |
191 | struct nvmet_ctrl *ctrl = |
192 | container_of(work, struct nvmet_ctrl, async_event_work); |
193 | |
194 | nvmet_async_events_process(ctrl); |
195 | } |
196 | |
197 | void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, |
198 | u8 event_info, u8 log_page) |
199 | { |
200 | struct nvmet_async_event *aen; |
201 | |
202 | aen = kmalloc(size: sizeof(*aen), GFP_KERNEL); |
203 | if (!aen) |
204 | return; |
205 | |
206 | aen->event_type = event_type; |
207 | aen->event_info = event_info; |
208 | aen->log_page = log_page; |
209 | |
210 | mutex_lock(&ctrl->lock); |
211 | list_add_tail(new: &aen->entry, head: &ctrl->async_events); |
212 | mutex_unlock(lock: &ctrl->lock); |
213 | |
214 | queue_work(wq: nvmet_wq, work: &ctrl->async_event_work); |
215 | } |
216 | |
217 | static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid) |
218 | { |
219 | u32 i; |
220 | |
221 | mutex_lock(&ctrl->lock); |
222 | if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES) |
223 | goto out_unlock; |
224 | |
225 | for (i = 0; i < ctrl->nr_changed_ns; i++) { |
226 | if (ctrl->changed_ns_list[i] == nsid) |
227 | goto out_unlock; |
228 | } |
229 | |
230 | if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) { |
231 | ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff); |
232 | ctrl->nr_changed_ns = U32_MAX; |
233 | goto out_unlock; |
234 | } |
235 | |
236 | ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid; |
237 | out_unlock: |
238 | mutex_unlock(lock: &ctrl->lock); |
239 | } |
240 | |
241 | void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid) |
242 | { |
243 | struct nvmet_ctrl *ctrl; |
244 | |
245 | lockdep_assert_held(&subsys->lock); |
246 | |
247 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { |
248 | nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid)); |
249 | if (nvmet_aen_bit_disabled(ctrl, bn: NVME_AEN_BIT_NS_ATTR)) |
250 | continue; |
251 | nvmet_add_async_event(ctrl, event_type: NVME_AER_TYPE_NOTICE, |
252 | event_info: NVME_AER_NOTICE_NS_CHANGED, |
253 | log_page: NVME_LOG_CHANGED_NS); |
254 | } |
255 | } |
256 | |
257 | void nvmet_send_ana_event(struct nvmet_subsys *subsys, |
258 | struct nvmet_port *port) |
259 | { |
260 | struct nvmet_ctrl *ctrl; |
261 | |
262 | mutex_lock(&subsys->lock); |
263 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { |
264 | if (port && ctrl->port != port) |
265 | continue; |
266 | if (nvmet_aen_bit_disabled(ctrl, bn: NVME_AEN_BIT_ANA_CHANGE)) |
267 | continue; |
268 | nvmet_add_async_event(ctrl, event_type: NVME_AER_TYPE_NOTICE, |
269 | event_info: NVME_AER_NOTICE_ANA, log_page: NVME_LOG_ANA); |
270 | } |
271 | mutex_unlock(lock: &subsys->lock); |
272 | } |
273 | |
274 | void nvmet_port_send_ana_event(struct nvmet_port *port) |
275 | { |
276 | struct nvmet_subsys_link *p; |
277 | |
278 | down_read(sem: &nvmet_config_sem); |
279 | list_for_each_entry(p, &port->subsystems, entry) |
280 | nvmet_send_ana_event(subsys: p->subsys, port); |
281 | up_read(sem: &nvmet_config_sem); |
282 | } |
283 | |
284 | int nvmet_register_transport(const struct nvmet_fabrics_ops *ops) |
285 | { |
286 | int ret = 0; |
287 | |
288 | down_write(sem: &nvmet_config_sem); |
289 | if (nvmet_transports[ops->type]) |
290 | ret = -EINVAL; |
291 | else |
292 | nvmet_transports[ops->type] = ops; |
293 | up_write(sem: &nvmet_config_sem); |
294 | |
295 | return ret; |
296 | } |
297 | EXPORT_SYMBOL_GPL(nvmet_register_transport); |
298 | |
299 | void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops) |
300 | { |
301 | down_write(sem: &nvmet_config_sem); |
302 | nvmet_transports[ops->type] = NULL; |
303 | up_write(sem: &nvmet_config_sem); |
304 | } |
305 | EXPORT_SYMBOL_GPL(nvmet_unregister_transport); |
306 | |
307 | void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys) |
308 | { |
309 | struct nvmet_ctrl *ctrl; |
310 | |
311 | mutex_lock(&subsys->lock); |
312 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { |
313 | if (ctrl->port == port) |
314 | ctrl->ops->delete_ctrl(ctrl); |
315 | } |
316 | mutex_unlock(lock: &subsys->lock); |
317 | } |
318 | |
319 | int nvmet_enable_port(struct nvmet_port *port) |
320 | { |
321 | const struct nvmet_fabrics_ops *ops; |
322 | int ret; |
323 | |
324 | lockdep_assert_held(&nvmet_config_sem); |
325 | |
326 | ops = nvmet_transports[port->disc_addr.trtype]; |
327 | if (!ops) { |
328 | up_write(sem: &nvmet_config_sem); |
329 | request_module("nvmet-transport-%d" , port->disc_addr.trtype); |
330 | down_write(sem: &nvmet_config_sem); |
331 | ops = nvmet_transports[port->disc_addr.trtype]; |
332 | if (!ops) { |
333 | pr_err("transport type %d not supported\n" , |
334 | port->disc_addr.trtype); |
335 | return -EINVAL; |
336 | } |
337 | } |
338 | |
339 | if (!try_module_get(module: ops->owner)) |
340 | return -EINVAL; |
341 | |
342 | /* |
343 | * If the user requested PI support and the transport isn't pi capable, |
344 | * don't enable the port. |
345 | */ |
346 | if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) { |
347 | pr_err("T10-PI is not supported by transport type %d\n" , |
348 | port->disc_addr.trtype); |
349 | ret = -EINVAL; |
350 | goto out_put; |
351 | } |
352 | |
353 | ret = ops->add_port(port); |
354 | if (ret) |
355 | goto out_put; |
356 | |
357 | /* If the transport didn't set inline_data_size, then disable it. */ |
358 | if (port->inline_data_size < 0) |
359 | port->inline_data_size = 0; |
360 | |
361 | port->enabled = true; |
362 | port->tr_ops = ops; |
363 | return 0; |
364 | |
365 | out_put: |
366 | module_put(module: ops->owner); |
367 | return ret; |
368 | } |
369 | |
370 | void nvmet_disable_port(struct nvmet_port *port) |
371 | { |
372 | const struct nvmet_fabrics_ops *ops; |
373 | |
374 | lockdep_assert_held(&nvmet_config_sem); |
375 | |
376 | port->enabled = false; |
377 | port->tr_ops = NULL; |
378 | |
379 | ops = nvmet_transports[port->disc_addr.trtype]; |
380 | ops->remove_port(port); |
381 | module_put(module: ops->owner); |
382 | } |
383 | |
384 | static void nvmet_keep_alive_timer(struct work_struct *work) |
385 | { |
386 | struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work), |
387 | struct nvmet_ctrl, ka_work); |
388 | bool reset_tbkas = ctrl->reset_tbkas; |
389 | |
390 | ctrl->reset_tbkas = false; |
391 | if (reset_tbkas) { |
392 | pr_debug("ctrl %d reschedule traffic based keep-alive timer\n" , |
393 | ctrl->cntlid); |
394 | queue_delayed_work(wq: nvmet_wq, dwork: &ctrl->ka_work, delay: ctrl->kato * HZ); |
395 | return; |
396 | } |
397 | |
398 | pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n" , |
399 | ctrl->cntlid, ctrl->kato); |
400 | |
401 | nvmet_ctrl_fatal_error(ctrl); |
402 | } |
403 | |
404 | void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) |
405 | { |
406 | if (unlikely(ctrl->kato == 0)) |
407 | return; |
408 | |
409 | pr_debug("ctrl %d start keep-alive timer for %d secs\n" , |
410 | ctrl->cntlid, ctrl->kato); |
411 | |
412 | queue_delayed_work(wq: nvmet_wq, dwork: &ctrl->ka_work, delay: ctrl->kato * HZ); |
413 | } |
414 | |
415 | void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) |
416 | { |
417 | if (unlikely(ctrl->kato == 0)) |
418 | return; |
419 | |
420 | pr_debug("ctrl %d stop keep-alive\n" , ctrl->cntlid); |
421 | |
422 | cancel_delayed_work_sync(dwork: &ctrl->ka_work); |
423 | } |
424 | |
425 | u16 nvmet_req_find_ns(struct nvmet_req *req) |
426 | { |
427 | u32 nsid = le32_to_cpu(req->cmd->common.nsid); |
428 | |
429 | req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, index: nsid); |
430 | if (unlikely(!req->ns)) { |
431 | req->error_loc = offsetof(struct nvme_common_command, nsid); |
432 | return NVME_SC_INVALID_NS | NVME_SC_DNR; |
433 | } |
434 | |
435 | percpu_ref_get(ref: &req->ns->ref); |
436 | return NVME_SC_SUCCESS; |
437 | } |
438 | |
439 | static void nvmet_destroy_namespace(struct percpu_ref *ref) |
440 | { |
441 | struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref); |
442 | |
443 | complete(&ns->disable_done); |
444 | } |
445 | |
446 | void nvmet_put_namespace(struct nvmet_ns *ns) |
447 | { |
448 | percpu_ref_put(ref: &ns->ref); |
449 | } |
450 | |
451 | static void nvmet_ns_dev_disable(struct nvmet_ns *ns) |
452 | { |
453 | nvmet_bdev_ns_disable(ns); |
454 | nvmet_file_ns_disable(ns); |
455 | } |
456 | |
457 | static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns) |
458 | { |
459 | int ret; |
460 | struct pci_dev *p2p_dev; |
461 | |
462 | if (!ns->use_p2pmem) |
463 | return 0; |
464 | |
465 | if (!ns->bdev) { |
466 | pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n" ); |
467 | return -EINVAL; |
468 | } |
469 | |
470 | if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) { |
471 | pr_err("peer-to-peer DMA is not supported by the driver of %s\n" , |
472 | ns->device_path); |
473 | return -EINVAL; |
474 | } |
475 | |
476 | if (ns->p2p_dev) { |
477 | ret = pci_p2pdma_distance(provider: ns->p2p_dev, client: nvmet_ns_dev(ns), verbose: true); |
478 | if (ret < 0) |
479 | return -EINVAL; |
480 | } else { |
481 | /* |
482 | * Right now we just check that there is p2pmem available so |
483 | * we can report an error to the user right away if there |
484 | * is not. We'll find the actual device to use once we |
485 | * setup the controller when the port's device is available. |
486 | */ |
487 | |
488 | p2p_dev = pci_p2pmem_find(client: nvmet_ns_dev(ns)); |
489 | if (!p2p_dev) { |
490 | pr_err("no peer-to-peer memory is available for %s\n" , |
491 | ns->device_path); |
492 | return -EINVAL; |
493 | } |
494 | |
495 | pci_dev_put(dev: p2p_dev); |
496 | } |
497 | |
498 | return 0; |
499 | } |
500 | |
501 | /* |
502 | * Note: ctrl->subsys->lock should be held when calling this function |
503 | */ |
504 | static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl, |
505 | struct nvmet_ns *ns) |
506 | { |
507 | struct device *clients[2]; |
508 | struct pci_dev *p2p_dev; |
509 | int ret; |
510 | |
511 | if (!ctrl->p2p_client || !ns->use_p2pmem) |
512 | return; |
513 | |
514 | if (ns->p2p_dev) { |
515 | ret = pci_p2pdma_distance(provider: ns->p2p_dev, client: ctrl->p2p_client, verbose: true); |
516 | if (ret < 0) |
517 | return; |
518 | |
519 | p2p_dev = pci_dev_get(dev: ns->p2p_dev); |
520 | } else { |
521 | clients[0] = ctrl->p2p_client; |
522 | clients[1] = nvmet_ns_dev(ns); |
523 | |
524 | p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients)); |
525 | if (!p2p_dev) { |
526 | pr_err("no peer-to-peer memory is available that's supported by %s and %s\n" , |
527 | dev_name(ctrl->p2p_client), ns->device_path); |
528 | return; |
529 | } |
530 | } |
531 | |
532 | ret = radix_tree_insert(&ctrl->p2p_ns_map, index: ns->nsid, p2p_dev); |
533 | if (ret < 0) |
534 | pci_dev_put(dev: p2p_dev); |
535 | |
536 | pr_info("using p2pmem on %s for nsid %d\n" , pci_name(p2p_dev), |
537 | ns->nsid); |
538 | } |
539 | |
540 | bool nvmet_ns_revalidate(struct nvmet_ns *ns) |
541 | { |
542 | loff_t oldsize = ns->size; |
543 | |
544 | if (ns->bdev) |
545 | nvmet_bdev_ns_revalidate(ns); |
546 | else |
547 | nvmet_file_ns_revalidate(ns); |
548 | |
549 | return oldsize != ns->size; |
550 | } |
551 | |
552 | int nvmet_ns_enable(struct nvmet_ns *ns) |
553 | { |
554 | struct nvmet_subsys *subsys = ns->subsys; |
555 | struct nvmet_ctrl *ctrl; |
556 | int ret; |
557 | |
558 | mutex_lock(&subsys->lock); |
559 | ret = 0; |
560 | |
561 | if (nvmet_is_passthru_subsys(subsys)) { |
562 | pr_info("cannot enable both passthru and regular namespaces for a single subsystem" ); |
563 | goto out_unlock; |
564 | } |
565 | |
566 | if (ns->enabled) |
567 | goto out_unlock; |
568 | |
569 | ret = -EMFILE; |
570 | if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES) |
571 | goto out_unlock; |
572 | |
573 | ret = nvmet_bdev_ns_enable(ns); |
574 | if (ret == -ENOTBLK) |
575 | ret = nvmet_file_ns_enable(ns); |
576 | if (ret) |
577 | goto out_unlock; |
578 | |
579 | ret = nvmet_p2pmem_ns_enable(ns); |
580 | if (ret) |
581 | goto out_dev_disable; |
582 | |
583 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) |
584 | nvmet_p2pmem_ns_add_p2p(ctrl, ns); |
585 | |
586 | ret = percpu_ref_init(ref: &ns->ref, release: nvmet_destroy_namespace, |
587 | flags: 0, GFP_KERNEL); |
588 | if (ret) |
589 | goto out_dev_put; |
590 | |
591 | if (ns->nsid > subsys->max_nsid) |
592 | subsys->max_nsid = ns->nsid; |
593 | |
594 | ret = xa_insert(xa: &subsys->namespaces, index: ns->nsid, entry: ns, GFP_KERNEL); |
595 | if (ret) |
596 | goto out_restore_subsys_maxnsid; |
597 | |
598 | subsys->nr_namespaces++; |
599 | |
600 | nvmet_ns_changed(subsys, nsid: ns->nsid); |
601 | ns->enabled = true; |
602 | ret = 0; |
603 | out_unlock: |
604 | mutex_unlock(lock: &subsys->lock); |
605 | return ret; |
606 | |
607 | out_restore_subsys_maxnsid: |
608 | subsys->max_nsid = nvmet_max_nsid(subsys); |
609 | percpu_ref_exit(ref: &ns->ref); |
610 | out_dev_put: |
611 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) |
612 | pci_dev_put(dev: radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); |
613 | out_dev_disable: |
614 | nvmet_ns_dev_disable(ns); |
615 | goto out_unlock; |
616 | } |
617 | |
618 | void nvmet_ns_disable(struct nvmet_ns *ns) |
619 | { |
620 | struct nvmet_subsys *subsys = ns->subsys; |
621 | struct nvmet_ctrl *ctrl; |
622 | |
623 | mutex_lock(&subsys->lock); |
624 | if (!ns->enabled) |
625 | goto out_unlock; |
626 | |
627 | ns->enabled = false; |
628 | xa_erase(&ns->subsys->namespaces, index: ns->nsid); |
629 | if (ns->nsid == subsys->max_nsid) |
630 | subsys->max_nsid = nvmet_max_nsid(subsys); |
631 | |
632 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) |
633 | pci_dev_put(dev: radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); |
634 | |
635 | mutex_unlock(lock: &subsys->lock); |
636 | |
637 | /* |
638 | * Now that we removed the namespaces from the lookup list, we |
639 | * can kill the per_cpu ref and wait for any remaining references |
640 | * to be dropped, as well as a RCU grace period for anyone only |
641 | * using the namepace under rcu_read_lock(). Note that we can't |
642 | * use call_rcu here as we need to ensure the namespaces have |
643 | * been fully destroyed before unloading the module. |
644 | */ |
645 | percpu_ref_kill(ref: &ns->ref); |
646 | synchronize_rcu(); |
647 | wait_for_completion(&ns->disable_done); |
648 | percpu_ref_exit(ref: &ns->ref); |
649 | |
650 | mutex_lock(&subsys->lock); |
651 | |
652 | subsys->nr_namespaces--; |
653 | nvmet_ns_changed(subsys, nsid: ns->nsid); |
654 | nvmet_ns_dev_disable(ns); |
655 | out_unlock: |
656 | mutex_unlock(lock: &subsys->lock); |
657 | } |
658 | |
659 | void nvmet_ns_free(struct nvmet_ns *ns) |
660 | { |
661 | nvmet_ns_disable(ns); |
662 | |
663 | down_write(sem: &nvmet_ana_sem); |
664 | nvmet_ana_group_enabled[ns->anagrpid]--; |
665 | up_write(sem: &nvmet_ana_sem); |
666 | |
667 | kfree(objp: ns->device_path); |
668 | kfree(objp: ns); |
669 | } |
670 | |
671 | struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) |
672 | { |
673 | struct nvmet_ns *ns; |
674 | |
675 | ns = kzalloc(size: sizeof(*ns), GFP_KERNEL); |
676 | if (!ns) |
677 | return NULL; |
678 | |
679 | init_completion(x: &ns->disable_done); |
680 | |
681 | ns->nsid = nsid; |
682 | ns->subsys = subsys; |
683 | |
684 | down_write(sem: &nvmet_ana_sem); |
685 | ns->anagrpid = NVMET_DEFAULT_ANA_GRPID; |
686 | nvmet_ana_group_enabled[ns->anagrpid]++; |
687 | up_write(sem: &nvmet_ana_sem); |
688 | |
689 | uuid_gen(u: &ns->uuid); |
690 | ns->buffered_io = false; |
691 | ns->csi = NVME_CSI_NVM; |
692 | |
693 | return ns; |
694 | } |
695 | |
696 | static void nvmet_update_sq_head(struct nvmet_req *req) |
697 | { |
698 | if (req->sq->size) { |
699 | u32 old_sqhd, new_sqhd; |
700 | |
701 | old_sqhd = READ_ONCE(req->sq->sqhd); |
702 | do { |
703 | new_sqhd = (old_sqhd + 1) % req->sq->size; |
704 | } while (!try_cmpxchg(&req->sq->sqhd, &old_sqhd, new_sqhd)); |
705 | } |
706 | req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF); |
707 | } |
708 | |
709 | static void nvmet_set_error(struct nvmet_req *req, u16 status) |
710 | { |
711 | struct nvmet_ctrl *ctrl = req->sq->ctrl; |
712 | struct nvme_error_slot *new_error_slot; |
713 | unsigned long flags; |
714 | |
715 | req->cqe->status = cpu_to_le16(status << 1); |
716 | |
717 | if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC) |
718 | return; |
719 | |
720 | spin_lock_irqsave(&ctrl->error_lock, flags); |
721 | ctrl->err_counter++; |
722 | new_error_slot = |
723 | &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS]; |
724 | |
725 | new_error_slot->error_count = cpu_to_le64(ctrl->err_counter); |
726 | new_error_slot->sqid = cpu_to_le16(req->sq->qid); |
727 | new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id); |
728 | new_error_slot->status_field = cpu_to_le16(status << 1); |
729 | new_error_slot->param_error_location = cpu_to_le16(req->error_loc); |
730 | new_error_slot->lba = cpu_to_le64(req->error_slba); |
731 | new_error_slot->nsid = req->cmd->common.nsid; |
732 | spin_unlock_irqrestore(lock: &ctrl->error_lock, flags); |
733 | |
734 | /* set the more bit for this request */ |
735 | req->cqe->status |= cpu_to_le16(1 << 14); |
736 | } |
737 | |
738 | static void __nvmet_req_complete(struct nvmet_req *req, u16 status) |
739 | { |
740 | struct nvmet_ns *ns = req->ns; |
741 | |
742 | if (!req->sq->sqhd_disabled) |
743 | nvmet_update_sq_head(req); |
744 | req->cqe->sq_id = cpu_to_le16(req->sq->qid); |
745 | req->cqe->command_id = req->cmd->common.command_id; |
746 | |
747 | if (unlikely(status)) |
748 | nvmet_set_error(req, status); |
749 | |
750 | trace_nvmet_req_complete(req); |
751 | |
752 | req->ops->queue_response(req); |
753 | if (ns) |
754 | nvmet_put_namespace(ns); |
755 | } |
756 | |
757 | void nvmet_req_complete(struct nvmet_req *req, u16 status) |
758 | { |
759 | struct nvmet_sq *sq = req->sq; |
760 | |
761 | __nvmet_req_complete(req, status); |
762 | percpu_ref_put(ref: &sq->ref); |
763 | } |
764 | EXPORT_SYMBOL_GPL(nvmet_req_complete); |
765 | |
766 | void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, |
767 | u16 qid, u16 size) |
768 | { |
769 | cq->qid = qid; |
770 | cq->size = size; |
771 | } |
772 | |
773 | void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, |
774 | u16 qid, u16 size) |
775 | { |
776 | sq->sqhd = 0; |
777 | sq->qid = qid; |
778 | sq->size = size; |
779 | |
780 | ctrl->sqs[qid] = sq; |
781 | } |
782 | |
783 | static void nvmet_confirm_sq(struct percpu_ref *ref) |
784 | { |
785 | struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); |
786 | |
787 | complete(&sq->confirm_done); |
788 | } |
789 | |
790 | void nvmet_sq_destroy(struct nvmet_sq *sq) |
791 | { |
792 | struct nvmet_ctrl *ctrl = sq->ctrl; |
793 | |
794 | /* |
795 | * If this is the admin queue, complete all AERs so that our |
796 | * queue doesn't have outstanding requests on it. |
797 | */ |
798 | if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq) |
799 | nvmet_async_events_failall(ctrl); |
800 | percpu_ref_kill_and_confirm(ref: &sq->ref, confirm_kill: nvmet_confirm_sq); |
801 | wait_for_completion(&sq->confirm_done); |
802 | wait_for_completion(&sq->free_done); |
803 | percpu_ref_exit(ref: &sq->ref); |
804 | nvmet_auth_sq_free(sq); |
805 | |
806 | if (ctrl) { |
807 | /* |
808 | * The teardown flow may take some time, and the host may not |
809 | * send us keep-alive during this period, hence reset the |
810 | * traffic based keep-alive timer so we don't trigger a |
811 | * controller teardown as a result of a keep-alive expiration. |
812 | */ |
813 | ctrl->reset_tbkas = true; |
814 | sq->ctrl->sqs[sq->qid] = NULL; |
815 | nvmet_ctrl_put(ctrl); |
816 | sq->ctrl = NULL; /* allows reusing the queue later */ |
817 | } |
818 | } |
819 | EXPORT_SYMBOL_GPL(nvmet_sq_destroy); |
820 | |
821 | static void nvmet_sq_free(struct percpu_ref *ref) |
822 | { |
823 | struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); |
824 | |
825 | complete(&sq->free_done); |
826 | } |
827 | |
828 | int nvmet_sq_init(struct nvmet_sq *sq) |
829 | { |
830 | int ret; |
831 | |
832 | ret = percpu_ref_init(ref: &sq->ref, release: nvmet_sq_free, flags: 0, GFP_KERNEL); |
833 | if (ret) { |
834 | pr_err("percpu_ref init failed!\n" ); |
835 | return ret; |
836 | } |
837 | init_completion(x: &sq->free_done); |
838 | init_completion(x: &sq->confirm_done); |
839 | nvmet_auth_sq_init(sq); |
840 | |
841 | return 0; |
842 | } |
843 | EXPORT_SYMBOL_GPL(nvmet_sq_init); |
844 | |
845 | static inline u16 nvmet_check_ana_state(struct nvmet_port *port, |
846 | struct nvmet_ns *ns) |
847 | { |
848 | enum nvme_ana_state state = port->ana_state[ns->anagrpid]; |
849 | |
850 | if (unlikely(state == NVME_ANA_INACCESSIBLE)) |
851 | return NVME_SC_ANA_INACCESSIBLE; |
852 | if (unlikely(state == NVME_ANA_PERSISTENT_LOSS)) |
853 | return NVME_SC_ANA_PERSISTENT_LOSS; |
854 | if (unlikely(state == NVME_ANA_CHANGE)) |
855 | return NVME_SC_ANA_TRANSITION; |
856 | return 0; |
857 | } |
858 | |
859 | static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req) |
860 | { |
861 | if (unlikely(req->ns->readonly)) { |
862 | switch (req->cmd->common.opcode) { |
863 | case nvme_cmd_read: |
864 | case nvme_cmd_flush: |
865 | break; |
866 | default: |
867 | return NVME_SC_NS_WRITE_PROTECTED; |
868 | } |
869 | } |
870 | |
871 | return 0; |
872 | } |
873 | |
874 | static u16 nvmet_parse_io_cmd(struct nvmet_req *req) |
875 | { |
876 | struct nvme_command *cmd = req->cmd; |
877 | u16 ret; |
878 | |
879 | if (nvme_is_fabrics(cmd)) |
880 | return nvmet_parse_fabrics_io_cmd(req); |
881 | |
882 | if (unlikely(!nvmet_check_auth_status(req))) |
883 | return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; |
884 | |
885 | ret = nvmet_check_ctrl_status(req); |
886 | if (unlikely(ret)) |
887 | return ret; |
888 | |
889 | if (nvmet_is_passthru_req(req)) |
890 | return nvmet_parse_passthru_io_cmd(req); |
891 | |
892 | ret = nvmet_req_find_ns(req); |
893 | if (unlikely(ret)) |
894 | return ret; |
895 | |
896 | ret = nvmet_check_ana_state(port: req->port, ns: req->ns); |
897 | if (unlikely(ret)) { |
898 | req->error_loc = offsetof(struct nvme_common_command, nsid); |
899 | return ret; |
900 | } |
901 | ret = nvmet_io_cmd_check_access(req); |
902 | if (unlikely(ret)) { |
903 | req->error_loc = offsetof(struct nvme_common_command, nsid); |
904 | return ret; |
905 | } |
906 | |
907 | switch (req->ns->csi) { |
908 | case NVME_CSI_NVM: |
909 | if (req->ns->file) |
910 | return nvmet_file_parse_io_cmd(req); |
911 | return nvmet_bdev_parse_io_cmd(req); |
912 | case NVME_CSI_ZNS: |
913 | if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) |
914 | return nvmet_bdev_zns_parse_io_cmd(req); |
915 | return NVME_SC_INVALID_IO_CMD_SET; |
916 | default: |
917 | return NVME_SC_INVALID_IO_CMD_SET; |
918 | } |
919 | } |
920 | |
921 | bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, |
922 | struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops) |
923 | { |
924 | u8 flags = req->cmd->common.flags; |
925 | u16 status; |
926 | |
927 | req->cq = cq; |
928 | req->sq = sq; |
929 | req->ops = ops; |
930 | req->sg = NULL; |
931 | req->metadata_sg = NULL; |
932 | req->sg_cnt = 0; |
933 | req->metadata_sg_cnt = 0; |
934 | req->transfer_len = 0; |
935 | req->metadata_len = 0; |
936 | req->cqe->status = 0; |
937 | req->cqe->sq_head = 0; |
938 | req->ns = NULL; |
939 | req->error_loc = NVMET_NO_ERROR_LOC; |
940 | req->error_slba = 0; |
941 | |
942 | /* no support for fused commands yet */ |
943 | if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { |
944 | req->error_loc = offsetof(struct nvme_common_command, flags); |
945 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
946 | goto fail; |
947 | } |
948 | |
949 | /* |
950 | * For fabrics, PSDT field shall describe metadata pointer (MPTR) that |
951 | * contains an address of a single contiguous physical buffer that is |
952 | * byte aligned. |
953 | */ |
954 | if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { |
955 | req->error_loc = offsetof(struct nvme_common_command, flags); |
956 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
957 | goto fail; |
958 | } |
959 | |
960 | if (unlikely(!req->sq->ctrl)) |
961 | /* will return an error for any non-connect command: */ |
962 | status = nvmet_parse_connect_cmd(req); |
963 | else if (likely(req->sq->qid != 0)) |
964 | status = nvmet_parse_io_cmd(req); |
965 | else |
966 | status = nvmet_parse_admin_cmd(req); |
967 | |
968 | if (status) |
969 | goto fail; |
970 | |
971 | trace_nvmet_req_init(req, cmd: req->cmd); |
972 | |
973 | if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { |
974 | status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
975 | goto fail; |
976 | } |
977 | |
978 | if (sq->ctrl) |
979 | sq->ctrl->reset_tbkas = true; |
980 | |
981 | return true; |
982 | |
983 | fail: |
984 | __nvmet_req_complete(req, status); |
985 | return false; |
986 | } |
987 | EXPORT_SYMBOL_GPL(nvmet_req_init); |
988 | |
989 | void nvmet_req_uninit(struct nvmet_req *req) |
990 | { |
991 | percpu_ref_put(ref: &req->sq->ref); |
992 | if (req->ns) |
993 | nvmet_put_namespace(ns: req->ns); |
994 | } |
995 | EXPORT_SYMBOL_GPL(nvmet_req_uninit); |
996 | |
997 | bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len) |
998 | { |
999 | if (unlikely(len != req->transfer_len)) { |
1000 | req->error_loc = offsetof(struct nvme_common_command, dptr); |
1001 | nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); |
1002 | return false; |
1003 | } |
1004 | |
1005 | return true; |
1006 | } |
1007 | EXPORT_SYMBOL_GPL(nvmet_check_transfer_len); |
1008 | |
1009 | bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len) |
1010 | { |
1011 | if (unlikely(data_len > req->transfer_len)) { |
1012 | req->error_loc = offsetof(struct nvme_common_command, dptr); |
1013 | nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); |
1014 | return false; |
1015 | } |
1016 | |
1017 | return true; |
1018 | } |
1019 | |
1020 | static unsigned int nvmet_data_transfer_len(struct nvmet_req *req) |
1021 | { |
1022 | return req->transfer_len - req->metadata_len; |
1023 | } |
1024 | |
1025 | static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev, |
1026 | struct nvmet_req *req) |
1027 | { |
1028 | req->sg = pci_p2pmem_alloc_sgl(pdev: p2p_dev, nents: &req->sg_cnt, |
1029 | length: nvmet_data_transfer_len(req)); |
1030 | if (!req->sg) |
1031 | goto out_err; |
1032 | |
1033 | if (req->metadata_len) { |
1034 | req->metadata_sg = pci_p2pmem_alloc_sgl(pdev: p2p_dev, |
1035 | nents: &req->metadata_sg_cnt, length: req->metadata_len); |
1036 | if (!req->metadata_sg) |
1037 | goto out_free_sg; |
1038 | } |
1039 | |
1040 | req->p2p_dev = p2p_dev; |
1041 | |
1042 | return 0; |
1043 | out_free_sg: |
1044 | pci_p2pmem_free_sgl(pdev: req->p2p_dev, sgl: req->sg); |
1045 | out_err: |
1046 | return -ENOMEM; |
1047 | } |
1048 | |
1049 | static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req) |
1050 | { |
1051 | if (!IS_ENABLED(CONFIG_PCI_P2PDMA) || |
1052 | !req->sq->ctrl || !req->sq->qid || !req->ns) |
1053 | return NULL; |
1054 | return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid); |
1055 | } |
1056 | |
1057 | int nvmet_req_alloc_sgls(struct nvmet_req *req) |
1058 | { |
1059 | struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req); |
1060 | |
1061 | if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req)) |
1062 | return 0; |
1063 | |
1064 | req->sg = sgl_alloc(length: nvmet_data_transfer_len(req), GFP_KERNEL, |
1065 | nent_p: &req->sg_cnt); |
1066 | if (unlikely(!req->sg)) |
1067 | goto out; |
1068 | |
1069 | if (req->metadata_len) { |
1070 | req->metadata_sg = sgl_alloc(length: req->metadata_len, GFP_KERNEL, |
1071 | nent_p: &req->metadata_sg_cnt); |
1072 | if (unlikely(!req->metadata_sg)) |
1073 | goto out_free; |
1074 | } |
1075 | |
1076 | return 0; |
1077 | out_free: |
1078 | sgl_free(sgl: req->sg); |
1079 | out: |
1080 | return -ENOMEM; |
1081 | } |
1082 | EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls); |
1083 | |
1084 | void nvmet_req_free_sgls(struct nvmet_req *req) |
1085 | { |
1086 | if (req->p2p_dev) { |
1087 | pci_p2pmem_free_sgl(pdev: req->p2p_dev, sgl: req->sg); |
1088 | if (req->metadata_sg) |
1089 | pci_p2pmem_free_sgl(pdev: req->p2p_dev, sgl: req->metadata_sg); |
1090 | req->p2p_dev = NULL; |
1091 | } else { |
1092 | sgl_free(sgl: req->sg); |
1093 | if (req->metadata_sg) |
1094 | sgl_free(sgl: req->metadata_sg); |
1095 | } |
1096 | |
1097 | req->sg = NULL; |
1098 | req->metadata_sg = NULL; |
1099 | req->sg_cnt = 0; |
1100 | req->metadata_sg_cnt = 0; |
1101 | } |
1102 | EXPORT_SYMBOL_GPL(nvmet_req_free_sgls); |
1103 | |
1104 | static inline bool nvmet_cc_en(u32 cc) |
1105 | { |
1106 | return (cc >> NVME_CC_EN_SHIFT) & 0x1; |
1107 | } |
1108 | |
1109 | static inline u8 nvmet_cc_css(u32 cc) |
1110 | { |
1111 | return (cc >> NVME_CC_CSS_SHIFT) & 0x7; |
1112 | } |
1113 | |
1114 | static inline u8 nvmet_cc_mps(u32 cc) |
1115 | { |
1116 | return (cc >> NVME_CC_MPS_SHIFT) & 0xf; |
1117 | } |
1118 | |
1119 | static inline u8 nvmet_cc_ams(u32 cc) |
1120 | { |
1121 | return (cc >> NVME_CC_AMS_SHIFT) & 0x7; |
1122 | } |
1123 | |
1124 | static inline u8 nvmet_cc_shn(u32 cc) |
1125 | { |
1126 | return (cc >> NVME_CC_SHN_SHIFT) & 0x3; |
1127 | } |
1128 | |
1129 | static inline u8 nvmet_cc_iosqes(u32 cc) |
1130 | { |
1131 | return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf; |
1132 | } |
1133 | |
1134 | static inline u8 nvmet_cc_iocqes(u32 cc) |
1135 | { |
1136 | return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf; |
1137 | } |
1138 | |
1139 | static inline bool nvmet_css_supported(u8 cc_css) |
1140 | { |
1141 | switch (cc_css << NVME_CC_CSS_SHIFT) { |
1142 | case NVME_CC_CSS_NVM: |
1143 | case NVME_CC_CSS_CSI: |
1144 | return true; |
1145 | default: |
1146 | return false; |
1147 | } |
1148 | } |
1149 | |
1150 | static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) |
1151 | { |
1152 | lockdep_assert_held(&ctrl->lock); |
1153 | |
1154 | /* |
1155 | * Only I/O controllers should verify iosqes,iocqes. |
1156 | * Strictly speaking, the spec says a discovery controller |
1157 | * should verify iosqes,iocqes are zeroed, however that |
1158 | * would break backwards compatibility, so don't enforce it. |
1159 | */ |
1160 | if (!nvmet_is_disc_subsys(subsys: ctrl->subsys) && |
1161 | (nvmet_cc_iosqes(cc: ctrl->cc) != NVME_NVM_IOSQES || |
1162 | nvmet_cc_iocqes(cc: ctrl->cc) != NVME_NVM_IOCQES)) { |
1163 | ctrl->csts = NVME_CSTS_CFS; |
1164 | return; |
1165 | } |
1166 | |
1167 | if (nvmet_cc_mps(cc: ctrl->cc) != 0 || |
1168 | nvmet_cc_ams(cc: ctrl->cc) != 0 || |
1169 | !nvmet_css_supported(cc_css: nvmet_cc_css(cc: ctrl->cc))) { |
1170 | ctrl->csts = NVME_CSTS_CFS; |
1171 | return; |
1172 | } |
1173 | |
1174 | ctrl->csts = NVME_CSTS_RDY; |
1175 | |
1176 | /* |
1177 | * Controllers that are not yet enabled should not really enforce the |
1178 | * keep alive timeout, but we still want to track a timeout and cleanup |
1179 | * in case a host died before it enabled the controller. Hence, simply |
1180 | * reset the keep alive timer when the controller is enabled. |
1181 | */ |
1182 | if (ctrl->kato) |
1183 | mod_delayed_work(wq: nvmet_wq, dwork: &ctrl->ka_work, delay: ctrl->kato * HZ); |
1184 | } |
1185 | |
1186 | static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) |
1187 | { |
1188 | lockdep_assert_held(&ctrl->lock); |
1189 | |
1190 | /* XXX: tear down queues? */ |
1191 | ctrl->csts &= ~NVME_CSTS_RDY; |
1192 | ctrl->cc = 0; |
1193 | } |
1194 | |
1195 | void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new) |
1196 | { |
1197 | u32 old; |
1198 | |
1199 | mutex_lock(&ctrl->lock); |
1200 | old = ctrl->cc; |
1201 | ctrl->cc = new; |
1202 | |
1203 | if (nvmet_cc_en(cc: new) && !nvmet_cc_en(cc: old)) |
1204 | nvmet_start_ctrl(ctrl); |
1205 | if (!nvmet_cc_en(cc: new) && nvmet_cc_en(cc: old)) |
1206 | nvmet_clear_ctrl(ctrl); |
1207 | if (nvmet_cc_shn(cc: new) && !nvmet_cc_shn(cc: old)) { |
1208 | nvmet_clear_ctrl(ctrl); |
1209 | ctrl->csts |= NVME_CSTS_SHST_CMPLT; |
1210 | } |
1211 | if (!nvmet_cc_shn(cc: new) && nvmet_cc_shn(cc: old)) |
1212 | ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; |
1213 | mutex_unlock(lock: &ctrl->lock); |
1214 | } |
1215 | |
1216 | static void nvmet_init_cap(struct nvmet_ctrl *ctrl) |
1217 | { |
1218 | /* command sets supported: NVMe command set: */ |
1219 | ctrl->cap = (1ULL << 37); |
1220 | /* Controller supports one or more I/O Command Sets */ |
1221 | ctrl->cap |= (1ULL << 43); |
1222 | /* CC.EN timeout in 500msec units: */ |
1223 | ctrl->cap |= (15ULL << 24); |
1224 | /* maximum queue entries supported: */ |
1225 | if (ctrl->ops->get_max_queue_size) |
1226 | ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1; |
1227 | else |
1228 | ctrl->cap |= NVMET_QUEUE_SIZE - 1; |
1229 | |
1230 | if (nvmet_is_passthru_subsys(subsys: ctrl->subsys)) |
1231 | nvmet_passthrough_override_cap(ctrl); |
1232 | } |
1233 | |
1234 | struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn, |
1235 | const char *hostnqn, u16 cntlid, |
1236 | struct nvmet_req *req) |
1237 | { |
1238 | struct nvmet_ctrl *ctrl = NULL; |
1239 | struct nvmet_subsys *subsys; |
1240 | |
1241 | subsys = nvmet_find_get_subsys(port: req->port, subsysnqn); |
1242 | if (!subsys) { |
1243 | pr_warn("connect request for invalid subsystem %s!\n" , |
1244 | subsysnqn); |
1245 | req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); |
1246 | goto out; |
1247 | } |
1248 | |
1249 | mutex_lock(&subsys->lock); |
1250 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { |
1251 | if (ctrl->cntlid == cntlid) { |
1252 | if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) { |
1253 | pr_warn("hostnqn mismatch.\n" ); |
1254 | continue; |
1255 | } |
1256 | if (!kref_get_unless_zero(kref: &ctrl->ref)) |
1257 | continue; |
1258 | |
1259 | /* ctrl found */ |
1260 | goto found; |
1261 | } |
1262 | } |
1263 | |
1264 | ctrl = NULL; /* ctrl not found */ |
1265 | pr_warn("could not find controller %d for subsys %s / host %s\n" , |
1266 | cntlid, subsysnqn, hostnqn); |
1267 | req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); |
1268 | |
1269 | found: |
1270 | mutex_unlock(lock: &subsys->lock); |
1271 | nvmet_subsys_put(subsys); |
1272 | out: |
1273 | return ctrl; |
1274 | } |
1275 | |
1276 | u16 nvmet_check_ctrl_status(struct nvmet_req *req) |
1277 | { |
1278 | if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { |
1279 | pr_err("got cmd %d while CC.EN == 0 on qid = %d\n" , |
1280 | req->cmd->common.opcode, req->sq->qid); |
1281 | return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; |
1282 | } |
1283 | |
1284 | if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { |
1285 | pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n" , |
1286 | req->cmd->common.opcode, req->sq->qid); |
1287 | return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; |
1288 | } |
1289 | |
1290 | if (unlikely(!nvmet_check_auth_status(req))) { |
1291 | pr_warn("qid %d not authenticated\n" , req->sq->qid); |
1292 | return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; |
1293 | } |
1294 | return 0; |
1295 | } |
1296 | |
1297 | bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn) |
1298 | { |
1299 | struct nvmet_host_link *p; |
1300 | |
1301 | lockdep_assert_held(&nvmet_config_sem); |
1302 | |
1303 | if (subsys->allow_any_host) |
1304 | return true; |
1305 | |
1306 | if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */ |
1307 | return true; |
1308 | |
1309 | list_for_each_entry(p, &subsys->hosts, entry) { |
1310 | if (!strcmp(nvmet_host_name(host: p->host), hostnqn)) |
1311 | return true; |
1312 | } |
1313 | |
1314 | return false; |
1315 | } |
1316 | |
1317 | /* |
1318 | * Note: ctrl->subsys->lock should be held when calling this function |
1319 | */ |
1320 | static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl, |
1321 | struct nvmet_req *req) |
1322 | { |
1323 | struct nvmet_ns *ns; |
1324 | unsigned long idx; |
1325 | |
1326 | if (!req->p2p_client) |
1327 | return; |
1328 | |
1329 | ctrl->p2p_client = get_device(dev: req->p2p_client); |
1330 | |
1331 | xa_for_each(&ctrl->subsys->namespaces, idx, ns) |
1332 | nvmet_p2pmem_ns_add_p2p(ctrl, ns); |
1333 | } |
1334 | |
1335 | /* |
1336 | * Note: ctrl->subsys->lock should be held when calling this function |
1337 | */ |
1338 | static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl) |
1339 | { |
1340 | struct radix_tree_iter iter; |
1341 | void __rcu **slot; |
1342 | |
1343 | radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0) |
1344 | pci_dev_put(dev: radix_tree_deref_slot(slot)); |
1345 | |
1346 | put_device(dev: ctrl->p2p_client); |
1347 | } |
1348 | |
1349 | static void nvmet_fatal_error_handler(struct work_struct *work) |
1350 | { |
1351 | struct nvmet_ctrl *ctrl = |
1352 | container_of(work, struct nvmet_ctrl, fatal_err_work); |
1353 | |
1354 | pr_err("ctrl %d fatal error occurred!\n" , ctrl->cntlid); |
1355 | ctrl->ops->delete_ctrl(ctrl); |
1356 | } |
1357 | |
1358 | u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, |
1359 | struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp) |
1360 | { |
1361 | struct nvmet_subsys *subsys; |
1362 | struct nvmet_ctrl *ctrl; |
1363 | int ret; |
1364 | u16 status; |
1365 | |
1366 | status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; |
1367 | subsys = nvmet_find_get_subsys(port: req->port, subsysnqn); |
1368 | if (!subsys) { |
1369 | pr_warn("connect request for invalid subsystem %s!\n" , |
1370 | subsysnqn); |
1371 | req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); |
1372 | req->error_loc = offsetof(struct nvme_common_command, dptr); |
1373 | goto out; |
1374 | } |
1375 | |
1376 | down_read(sem: &nvmet_config_sem); |
1377 | if (!nvmet_host_allowed(subsys, hostnqn)) { |
1378 | pr_info("connect by host %s for subsystem %s not allowed\n" , |
1379 | hostnqn, subsysnqn); |
1380 | req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); |
1381 | up_read(sem: &nvmet_config_sem); |
1382 | status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR; |
1383 | req->error_loc = offsetof(struct nvme_common_command, dptr); |
1384 | goto out_put_subsystem; |
1385 | } |
1386 | up_read(sem: &nvmet_config_sem); |
1387 | |
1388 | status = NVME_SC_INTERNAL; |
1389 | ctrl = kzalloc(size: sizeof(*ctrl), GFP_KERNEL); |
1390 | if (!ctrl) |
1391 | goto out_put_subsystem; |
1392 | mutex_init(&ctrl->lock); |
1393 | |
1394 | ctrl->port = req->port; |
1395 | ctrl->ops = req->ops; |
1396 | |
1397 | #ifdef CONFIG_NVME_TARGET_PASSTHRU |
1398 | /* By default, set loop targets to clear IDS by default */ |
1399 | if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP) |
1400 | subsys->clear_ids = 1; |
1401 | #endif |
1402 | |
1403 | INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); |
1404 | INIT_LIST_HEAD(list: &ctrl->async_events); |
1405 | INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); |
1406 | INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); |
1407 | INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); |
1408 | |
1409 | memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); |
1410 | memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); |
1411 | |
1412 | kref_init(kref: &ctrl->ref); |
1413 | ctrl->subsys = subsys; |
1414 | nvmet_init_cap(ctrl); |
1415 | WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL); |
1416 | |
1417 | ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES, |
1418 | size: sizeof(__le32), GFP_KERNEL); |
1419 | if (!ctrl->changed_ns_list) |
1420 | goto out_free_ctrl; |
1421 | |
1422 | ctrl->sqs = kcalloc(n: subsys->max_qid + 1, |
1423 | size: sizeof(struct nvmet_sq *), |
1424 | GFP_KERNEL); |
1425 | if (!ctrl->sqs) |
1426 | goto out_free_changed_ns_list; |
1427 | |
1428 | if (subsys->cntlid_min > subsys->cntlid_max) |
1429 | goto out_free_sqs; |
1430 | |
1431 | ret = ida_alloc_range(&cntlid_ida, |
1432 | min: subsys->cntlid_min, max: subsys->cntlid_max, |
1433 | GFP_KERNEL); |
1434 | if (ret < 0) { |
1435 | status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; |
1436 | goto out_free_sqs; |
1437 | } |
1438 | ctrl->cntlid = ret; |
1439 | |
1440 | /* |
1441 | * Discovery controllers may use some arbitrary high value |
1442 | * in order to cleanup stale discovery sessions |
1443 | */ |
1444 | if (nvmet_is_disc_subsys(subsys: ctrl->subsys) && !kato) |
1445 | kato = NVMET_DISC_KATO_MS; |
1446 | |
1447 | /* keep-alive timeout in seconds */ |
1448 | ctrl->kato = DIV_ROUND_UP(kato, 1000); |
1449 | |
1450 | ctrl->err_counter = 0; |
1451 | spin_lock_init(&ctrl->error_lock); |
1452 | |
1453 | nvmet_start_keep_alive_timer(ctrl); |
1454 | |
1455 | mutex_lock(&subsys->lock); |
1456 | list_add_tail(new: &ctrl->subsys_entry, head: &subsys->ctrls); |
1457 | nvmet_setup_p2p_ns_map(ctrl, req); |
1458 | mutex_unlock(lock: &subsys->lock); |
1459 | |
1460 | *ctrlp = ctrl; |
1461 | return 0; |
1462 | |
1463 | out_free_sqs: |
1464 | kfree(objp: ctrl->sqs); |
1465 | out_free_changed_ns_list: |
1466 | kfree(objp: ctrl->changed_ns_list); |
1467 | out_free_ctrl: |
1468 | kfree(objp: ctrl); |
1469 | out_put_subsystem: |
1470 | nvmet_subsys_put(subsys); |
1471 | out: |
1472 | return status; |
1473 | } |
1474 | |
1475 | static void nvmet_ctrl_free(struct kref *ref) |
1476 | { |
1477 | struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref); |
1478 | struct nvmet_subsys *subsys = ctrl->subsys; |
1479 | |
1480 | mutex_lock(&subsys->lock); |
1481 | nvmet_release_p2p_ns_map(ctrl); |
1482 | list_del(entry: &ctrl->subsys_entry); |
1483 | mutex_unlock(lock: &subsys->lock); |
1484 | |
1485 | nvmet_stop_keep_alive_timer(ctrl); |
1486 | |
1487 | flush_work(work: &ctrl->async_event_work); |
1488 | cancel_work_sync(work: &ctrl->fatal_err_work); |
1489 | |
1490 | nvmet_destroy_auth(ctrl); |
1491 | |
1492 | ida_free(&cntlid_ida, id: ctrl->cntlid); |
1493 | |
1494 | nvmet_async_events_free(ctrl); |
1495 | kfree(objp: ctrl->sqs); |
1496 | kfree(objp: ctrl->changed_ns_list); |
1497 | kfree(objp: ctrl); |
1498 | |
1499 | nvmet_subsys_put(subsys); |
1500 | } |
1501 | |
1502 | void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) |
1503 | { |
1504 | kref_put(kref: &ctrl->ref, release: nvmet_ctrl_free); |
1505 | } |
1506 | |
1507 | void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) |
1508 | { |
1509 | mutex_lock(&ctrl->lock); |
1510 | if (!(ctrl->csts & NVME_CSTS_CFS)) { |
1511 | ctrl->csts |= NVME_CSTS_CFS; |
1512 | queue_work(wq: nvmet_wq, work: &ctrl->fatal_err_work); |
1513 | } |
1514 | mutex_unlock(lock: &ctrl->lock); |
1515 | } |
1516 | EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); |
1517 | |
1518 | static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, |
1519 | const char *subsysnqn) |
1520 | { |
1521 | struct nvmet_subsys_link *p; |
1522 | |
1523 | if (!port) |
1524 | return NULL; |
1525 | |
1526 | if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) { |
1527 | if (!kref_get_unless_zero(kref: &nvmet_disc_subsys->ref)) |
1528 | return NULL; |
1529 | return nvmet_disc_subsys; |
1530 | } |
1531 | |
1532 | down_read(sem: &nvmet_config_sem); |
1533 | list_for_each_entry(p, &port->subsystems, entry) { |
1534 | if (!strncmp(p->subsys->subsysnqn, subsysnqn, |
1535 | NVMF_NQN_SIZE)) { |
1536 | if (!kref_get_unless_zero(kref: &p->subsys->ref)) |
1537 | break; |
1538 | up_read(sem: &nvmet_config_sem); |
1539 | return p->subsys; |
1540 | } |
1541 | } |
1542 | up_read(sem: &nvmet_config_sem); |
1543 | return NULL; |
1544 | } |
1545 | |
1546 | struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, |
1547 | enum nvme_subsys_type type) |
1548 | { |
1549 | struct nvmet_subsys *subsys; |
1550 | char serial[NVMET_SN_MAX_SIZE / 2]; |
1551 | int ret; |
1552 | |
1553 | subsys = kzalloc(size: sizeof(*subsys), GFP_KERNEL); |
1554 | if (!subsys) |
1555 | return ERR_PTR(error: -ENOMEM); |
1556 | |
1557 | subsys->ver = NVMET_DEFAULT_VS; |
1558 | /* generate a random serial number as our controllers are ephemeral: */ |
1559 | get_random_bytes(buf: &serial, len: sizeof(serial)); |
1560 | bin2hex(dst: subsys->serial, src: &serial, count: sizeof(serial)); |
1561 | |
1562 | subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL); |
1563 | if (!subsys->model_number) { |
1564 | ret = -ENOMEM; |
1565 | goto free_subsys; |
1566 | } |
1567 | |
1568 | subsys->ieee_oui = 0; |
1569 | |
1570 | subsys->firmware_rev = kstrndup(UTS_RELEASE, NVMET_FR_MAX_SIZE, GFP_KERNEL); |
1571 | if (!subsys->firmware_rev) { |
1572 | ret = -ENOMEM; |
1573 | goto free_mn; |
1574 | } |
1575 | |
1576 | switch (type) { |
1577 | case NVME_NQN_NVME: |
1578 | subsys->max_qid = NVMET_NR_QUEUES; |
1579 | break; |
1580 | case NVME_NQN_DISC: |
1581 | case NVME_NQN_CURR: |
1582 | subsys->max_qid = 0; |
1583 | break; |
1584 | default: |
1585 | pr_err("%s: Unknown Subsystem type - %d\n" , __func__, type); |
1586 | ret = -EINVAL; |
1587 | goto free_fr; |
1588 | } |
1589 | subsys->type = type; |
1590 | subsys->subsysnqn = kstrndup(s: subsysnqn, NVMF_NQN_SIZE, |
1591 | GFP_KERNEL); |
1592 | if (!subsys->subsysnqn) { |
1593 | ret = -ENOMEM; |
1594 | goto free_fr; |
1595 | } |
1596 | subsys->cntlid_min = NVME_CNTLID_MIN; |
1597 | subsys->cntlid_max = NVME_CNTLID_MAX; |
1598 | kref_init(kref: &subsys->ref); |
1599 | |
1600 | mutex_init(&subsys->lock); |
1601 | xa_init(xa: &subsys->namespaces); |
1602 | INIT_LIST_HEAD(list: &subsys->ctrls); |
1603 | INIT_LIST_HEAD(list: &subsys->hosts); |
1604 | |
1605 | return subsys; |
1606 | |
1607 | free_fr: |
1608 | kfree(objp: subsys->firmware_rev); |
1609 | free_mn: |
1610 | kfree(objp: subsys->model_number); |
1611 | free_subsys: |
1612 | kfree(objp: subsys); |
1613 | return ERR_PTR(error: ret); |
1614 | } |
1615 | |
1616 | static void nvmet_subsys_free(struct kref *ref) |
1617 | { |
1618 | struct nvmet_subsys *subsys = |
1619 | container_of(ref, struct nvmet_subsys, ref); |
1620 | |
1621 | WARN_ON_ONCE(!xa_empty(&subsys->namespaces)); |
1622 | |
1623 | xa_destroy(&subsys->namespaces); |
1624 | nvmet_passthru_subsys_free(subsys); |
1625 | |
1626 | kfree(objp: subsys->subsysnqn); |
1627 | kfree(objp: subsys->model_number); |
1628 | kfree(objp: subsys->firmware_rev); |
1629 | kfree(objp: subsys); |
1630 | } |
1631 | |
1632 | void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys) |
1633 | { |
1634 | struct nvmet_ctrl *ctrl; |
1635 | |
1636 | mutex_lock(&subsys->lock); |
1637 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) |
1638 | ctrl->ops->delete_ctrl(ctrl); |
1639 | mutex_unlock(lock: &subsys->lock); |
1640 | } |
1641 | |
1642 | void nvmet_subsys_put(struct nvmet_subsys *subsys) |
1643 | { |
1644 | kref_put(kref: &subsys->ref, release: nvmet_subsys_free); |
1645 | } |
1646 | |
1647 | static int __init nvmet_init(void) |
1648 | { |
1649 | int error = -ENOMEM; |
1650 | |
1651 | nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1; |
1652 | |
1653 | nvmet_bvec_cache = kmem_cache_create(name: "nvmet-bvec" , |
1654 | NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), align: 0, |
1655 | SLAB_HWCACHE_ALIGN, NULL); |
1656 | if (!nvmet_bvec_cache) |
1657 | return -ENOMEM; |
1658 | |
1659 | zbd_wq = alloc_workqueue(fmt: "nvmet-zbd-wq" , flags: WQ_MEM_RECLAIM, max_active: 0); |
1660 | if (!zbd_wq) |
1661 | goto out_destroy_bvec_cache; |
1662 | |
1663 | buffered_io_wq = alloc_workqueue(fmt: "nvmet-buffered-io-wq" , |
1664 | flags: WQ_MEM_RECLAIM, max_active: 0); |
1665 | if (!buffered_io_wq) |
1666 | goto out_free_zbd_work_queue; |
1667 | |
1668 | nvmet_wq = alloc_workqueue(fmt: "nvmet-wq" , flags: WQ_MEM_RECLAIM, max_active: 0); |
1669 | if (!nvmet_wq) |
1670 | goto out_free_buffered_work_queue; |
1671 | |
1672 | error = nvmet_init_discovery(); |
1673 | if (error) |
1674 | goto out_free_nvmet_work_queue; |
1675 | |
1676 | error = nvmet_init_configfs(); |
1677 | if (error) |
1678 | goto out_exit_discovery; |
1679 | return 0; |
1680 | |
1681 | out_exit_discovery: |
1682 | nvmet_exit_discovery(); |
1683 | out_free_nvmet_work_queue: |
1684 | destroy_workqueue(wq: nvmet_wq); |
1685 | out_free_buffered_work_queue: |
1686 | destroy_workqueue(wq: buffered_io_wq); |
1687 | out_free_zbd_work_queue: |
1688 | destroy_workqueue(wq: zbd_wq); |
1689 | out_destroy_bvec_cache: |
1690 | kmem_cache_destroy(s: nvmet_bvec_cache); |
1691 | return error; |
1692 | } |
1693 | |
1694 | static void __exit nvmet_exit(void) |
1695 | { |
1696 | nvmet_exit_configfs(); |
1697 | nvmet_exit_discovery(); |
1698 | ida_destroy(ida: &cntlid_ida); |
1699 | destroy_workqueue(wq: nvmet_wq); |
1700 | destroy_workqueue(wq: buffered_io_wq); |
1701 | destroy_workqueue(wq: zbd_wq); |
1702 | kmem_cache_destroy(s: nvmet_bvec_cache); |
1703 | |
1704 | BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024); |
1705 | BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024); |
1706 | } |
1707 | |
1708 | module_init(nvmet_init); |
1709 | module_exit(nvmet_exit); |
1710 | |
1711 | MODULE_LICENSE("GPL v2" ); |
1712 | |