1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Apple ANS NVM Express device driver |
4 | * Copyright The Asahi Linux Contributors |
5 | * |
6 | * Based on the pci.c NVM Express device driver |
7 | * Copyright (c) 2011-2014, Intel Corporation. |
8 | * and on the rdma.c NVMe over Fabrics RDMA host code. |
9 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. |
10 | */ |
11 | |
12 | #include <linux/async.h> |
13 | #include <linux/blkdev.h> |
14 | #include <linux/blk-mq.h> |
15 | #include <linux/device.h> |
16 | #include <linux/dma-mapping.h> |
17 | #include <linux/dmapool.h> |
18 | #include <linux/interrupt.h> |
19 | #include <linux/io-64-nonatomic-lo-hi.h> |
20 | #include <linux/io.h> |
21 | #include <linux/iopoll.h> |
22 | #include <linux/jiffies.h> |
23 | #include <linux/mempool.h> |
24 | #include <linux/module.h> |
25 | #include <linux/of.h> |
26 | #include <linux/of_platform.h> |
27 | #include <linux/once.h> |
28 | #include <linux/platform_device.h> |
29 | #include <linux/pm_domain.h> |
30 | #include <linux/soc/apple/rtkit.h> |
31 | #include <linux/soc/apple/sart.h> |
32 | #include <linux/reset.h> |
33 | #include <linux/time64.h> |
34 | |
35 | #include "nvme.h" |
36 | |
37 | #define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC |
38 | #define APPLE_ANS_MAX_QUEUE_DEPTH 64 |
39 | |
40 | #define APPLE_ANS_COPROC_CPU_CONTROL 0x44 |
41 | #define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4) |
42 | |
43 | #define APPLE_ANS_ACQ_DB 0x1004 |
44 | #define APPLE_ANS_IOCQ_DB 0x100c |
45 | |
46 | #define APPLE_ANS_MAX_PEND_CMDS_CTRL 0x1210 |
47 | |
48 | #define APPLE_ANS_BOOT_STATUS 0x1300 |
49 | #define APPLE_ANS_BOOT_STATUS_OK 0xde71ce55 |
50 | |
51 | #define APPLE_ANS_UNKNOWN_CTRL 0x24008 |
52 | #define APPLE_ANS_PRP_NULL_CHECK BIT(11) |
53 | |
54 | #define APPLE_ANS_LINEAR_SQ_CTRL 0x24908 |
55 | #define APPLE_ANS_LINEAR_SQ_EN BIT(0) |
56 | |
57 | #define APPLE_ANS_LINEAR_ASQ_DB 0x2490c |
58 | #define APPLE_ANS_LINEAR_IOSQ_DB 0x24910 |
59 | |
60 | #define APPLE_NVMMU_NUM_TCBS 0x28100 |
61 | #define APPLE_NVMMU_ASQ_TCB_BASE 0x28108 |
62 | #define APPLE_NVMMU_IOSQ_TCB_BASE 0x28110 |
63 | #define APPLE_NVMMU_TCB_INVAL 0x28118 |
64 | #define APPLE_NVMMU_TCB_STAT 0x28120 |
65 | |
66 | /* |
67 | * This controller is a bit weird in the way command tags works: Both the |
68 | * admin and the IO queue share the same tag space. Additionally, tags |
69 | * cannot be higher than 0x40 which effectively limits the combined |
70 | * queue depth to 0x40. Instead of wasting half of that on the admin queue |
71 | * which gets much less traffic we instead reduce its size here. |
72 | * The controller also doesn't support async event such that no space must |
73 | * be reserved for NVME_NR_AEN_COMMANDS. |
74 | */ |
75 | #define APPLE_NVME_AQ_DEPTH 2 |
76 | #define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1) |
77 | |
78 | /* |
79 | * These can be higher, but we need to ensure that any command doesn't |
80 | * require an sg allocation that needs more than a page of data. |
81 | */ |
82 | #define NVME_MAX_KB_SZ 4096 |
83 | #define NVME_MAX_SEGS 127 |
84 | |
85 | /* |
86 | * This controller comes with an embedded IOMMU known as NVMMU. |
87 | * The NVMMU is pointed to an array of TCBs indexed by the command tag. |
88 | * Each command must be configured inside this structure before it's allowed |
89 | * to execute, including commands that don't require DMA transfers. |
90 | * |
91 | * An exception to this are Apple's vendor-specific commands (opcode 0xD8 on the |
92 | * admin queue): Those commands must still be added to the NVMMU but the DMA |
93 | * buffers cannot be represented as PRPs and must instead be allowed using SART. |
94 | * |
95 | * Programming the PRPs to the same values as those in the submission queue |
96 | * looks rather silly at first. This hardware is however designed for a kernel |
97 | * that runs the NVMMU code in a higher exception level than the NVMe driver. |
98 | * In that setting the NVMe driver first programs the submission queue entry |
99 | * and then executes a hypercall to the code that is allowed to program the |
100 | * NVMMU. The NVMMU driver then creates a shadow copy of the PRPs while |
101 | * verifying that they don't point to kernel text, data, pagetables, or similar |
102 | * protected areas before programming the TCB to point to this shadow copy. |
103 | * Since Linux doesn't do any of that we may as well just point both the queue |
104 | * and the TCB PRP pointer to the same memory. |
105 | */ |
106 | struct apple_nvmmu_tcb { |
107 | u8 opcode; |
108 | |
109 | #define APPLE_ANS_TCB_DMA_FROM_DEVICE BIT(0) |
110 | #define APPLE_ANS_TCB_DMA_TO_DEVICE BIT(1) |
111 | u8 dma_flags; |
112 | |
113 | u8 command_id; |
114 | u8 _unk0; |
115 | __le16 length; |
116 | u8 _unk1[18]; |
117 | __le64 prp1; |
118 | __le64 prp2; |
119 | u8 _unk2[16]; |
120 | u8 aes_iv[8]; |
121 | u8 _aes_unk[64]; |
122 | }; |
123 | |
124 | /* |
125 | * The Apple NVMe controller only supports a single admin and a single IO queue |
126 | * which are both limited to 64 entries and share a single interrupt. |
127 | * |
128 | * The completion queue works as usual. The submission "queue" instead is |
129 | * an array indexed by the command tag on this hardware. Commands must also be |
130 | * present in the NVMMU's tcb array. They are triggered by writing their tag to |
131 | * a MMIO register. |
132 | */ |
133 | struct apple_nvme_queue { |
134 | struct nvme_command *sqes; |
135 | struct nvme_completion *cqes; |
136 | struct apple_nvmmu_tcb *tcbs; |
137 | |
138 | dma_addr_t sq_dma_addr; |
139 | dma_addr_t cq_dma_addr; |
140 | dma_addr_t tcb_dma_addr; |
141 | |
142 | u32 __iomem *sq_db; |
143 | u32 __iomem *cq_db; |
144 | |
145 | u16 cq_head; |
146 | u8 cq_phase; |
147 | |
148 | bool is_adminq; |
149 | bool enabled; |
150 | }; |
151 | |
152 | /* |
153 | * The apple_nvme_iod describes the data in an I/O. |
154 | * |
155 | * The sg pointer contains the list of PRP chunk allocations in addition |
156 | * to the actual struct scatterlist. |
157 | */ |
158 | struct apple_nvme_iod { |
159 | struct nvme_request req; |
160 | struct nvme_command cmd; |
161 | struct apple_nvme_queue *q; |
162 | int npages; /* In the PRP list. 0 means small pool in use */ |
163 | int nents; /* Used in scatterlist */ |
164 | dma_addr_t first_dma; |
165 | unsigned int dma_len; /* length of single DMA segment mapping */ |
166 | struct scatterlist *sg; |
167 | }; |
168 | |
169 | struct apple_nvme { |
170 | struct device *dev; |
171 | |
172 | void __iomem *mmio_coproc; |
173 | void __iomem *mmio_nvme; |
174 | |
175 | struct device **pd_dev; |
176 | struct device_link **pd_link; |
177 | int pd_count; |
178 | |
179 | struct apple_sart *sart; |
180 | struct apple_rtkit *rtk; |
181 | struct reset_control *reset; |
182 | |
183 | struct dma_pool *prp_page_pool; |
184 | struct dma_pool *prp_small_pool; |
185 | mempool_t *iod_mempool; |
186 | |
187 | struct nvme_ctrl ctrl; |
188 | struct work_struct remove_work; |
189 | |
190 | struct apple_nvme_queue adminq; |
191 | struct apple_nvme_queue ioq; |
192 | |
193 | struct blk_mq_tag_set admin_tagset; |
194 | struct blk_mq_tag_set tagset; |
195 | |
196 | int irq; |
197 | spinlock_t lock; |
198 | }; |
199 | |
200 | static_assert(sizeof(struct nvme_command) == 64); |
201 | static_assert(sizeof(struct apple_nvmmu_tcb) == 128); |
202 | |
203 | static inline struct apple_nvme *ctrl_to_apple_nvme(struct nvme_ctrl *ctrl) |
204 | { |
205 | return container_of(ctrl, struct apple_nvme, ctrl); |
206 | } |
207 | |
208 | static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q) |
209 | { |
210 | if (q->is_adminq) |
211 | return container_of(q, struct apple_nvme, adminq); |
212 | |
213 | return container_of(q, struct apple_nvme, ioq); |
214 | } |
215 | |
216 | static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q) |
217 | { |
218 | if (q->is_adminq) |
219 | return APPLE_NVME_AQ_DEPTH; |
220 | |
221 | return APPLE_ANS_MAX_QUEUE_DEPTH; |
222 | } |
223 | |
224 | static void apple_nvme_rtkit_crashed(void *cookie) |
225 | { |
226 | struct apple_nvme *anv = cookie; |
227 | |
228 | dev_warn(anv->dev, "RTKit crashed; unable to recover without a reboot" ); |
229 | nvme_reset_ctrl(ctrl: &anv->ctrl); |
230 | } |
231 | |
232 | static int apple_nvme_sart_dma_setup(void *cookie, |
233 | struct apple_rtkit_shmem *bfr) |
234 | { |
235 | struct apple_nvme *anv = cookie; |
236 | int ret; |
237 | |
238 | if (bfr->iova) |
239 | return -EINVAL; |
240 | if (!bfr->size) |
241 | return -EINVAL; |
242 | |
243 | bfr->buffer = |
244 | dma_alloc_coherent(dev: anv->dev, size: bfr->size, dma_handle: &bfr->iova, GFP_KERNEL); |
245 | if (!bfr->buffer) |
246 | return -ENOMEM; |
247 | |
248 | ret = apple_sart_add_allowed_region(sart: anv->sart, paddr: bfr->iova, size: bfr->size); |
249 | if (ret) { |
250 | dma_free_coherent(dev: anv->dev, size: bfr->size, cpu_addr: bfr->buffer, dma_handle: bfr->iova); |
251 | bfr->buffer = NULL; |
252 | return -ENOMEM; |
253 | } |
254 | |
255 | return 0; |
256 | } |
257 | |
258 | static void apple_nvme_sart_dma_destroy(void *cookie, |
259 | struct apple_rtkit_shmem *bfr) |
260 | { |
261 | struct apple_nvme *anv = cookie; |
262 | |
263 | apple_sart_remove_allowed_region(sart: anv->sart, paddr: bfr->iova, size: bfr->size); |
264 | dma_free_coherent(dev: anv->dev, size: bfr->size, cpu_addr: bfr->buffer, dma_handle: bfr->iova); |
265 | } |
266 | |
267 | static const struct apple_rtkit_ops apple_nvme_rtkit_ops = { |
268 | .crashed = apple_nvme_rtkit_crashed, |
269 | .shmem_setup = apple_nvme_sart_dma_setup, |
270 | .shmem_destroy = apple_nvme_sart_dma_destroy, |
271 | }; |
272 | |
273 | static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag) |
274 | { |
275 | struct apple_nvme *anv = queue_to_apple_nvme(q); |
276 | |
277 | writel(val: tag, addr: anv->mmio_nvme + APPLE_NVMMU_TCB_INVAL); |
278 | if (readl(addr: anv->mmio_nvme + APPLE_NVMMU_TCB_STAT)) |
279 | dev_warn_ratelimited(anv->dev, |
280 | "NVMMU TCB invalidation failed\n" ); |
281 | } |
282 | |
283 | static void apple_nvme_submit_cmd(struct apple_nvme_queue *q, |
284 | struct nvme_command *cmd) |
285 | { |
286 | struct apple_nvme *anv = queue_to_apple_nvme(q); |
287 | u32 tag = nvme_tag_from_cid(cmd->common.command_id); |
288 | struct apple_nvmmu_tcb *tcb = &q->tcbs[tag]; |
289 | |
290 | tcb->opcode = cmd->common.opcode; |
291 | tcb->prp1 = cmd->common.dptr.prp1; |
292 | tcb->prp2 = cmd->common.dptr.prp2; |
293 | tcb->length = cmd->rw.length; |
294 | tcb->command_id = tag; |
295 | |
296 | if (nvme_is_write(cmd)) |
297 | tcb->dma_flags = APPLE_ANS_TCB_DMA_TO_DEVICE; |
298 | else |
299 | tcb->dma_flags = APPLE_ANS_TCB_DMA_FROM_DEVICE; |
300 | |
301 | memcpy(&q->sqes[tag], cmd, sizeof(*cmd)); |
302 | |
303 | /* |
304 | * This lock here doesn't make much sense at a first glace but |
305 | * removing it will result in occasional missed completetion |
306 | * interrupts even though the commands still appear on the CQ. |
307 | * It's unclear why this happens but our best guess is that |
308 | * there is a bug in the firmware triggered when a new command |
309 | * is issued while we're inside the irq handler between the |
310 | * NVMMU invalidation (and making the tag available again) |
311 | * and the final CQ update. |
312 | */ |
313 | spin_lock_irq(lock: &anv->lock); |
314 | writel(val: tag, addr: q->sq_db); |
315 | spin_unlock_irq(lock: &anv->lock); |
316 | } |
317 | |
318 | /* |
319 | * From pci.c: |
320 | * Will slightly overestimate the number of pages needed. This is OK |
321 | * as it only leads to a small amount of wasted memory for the lifetime of |
322 | * the I/O. |
323 | */ |
324 | static inline size_t apple_nvme_iod_alloc_size(void) |
325 | { |
326 | const unsigned int nprps = DIV_ROUND_UP( |
327 | NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE); |
328 | const int npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); |
329 | const size_t alloc_size = sizeof(__le64 *) * npages + |
330 | sizeof(struct scatterlist) * NVME_MAX_SEGS; |
331 | |
332 | return alloc_size; |
333 | } |
334 | |
335 | static void **apple_nvme_iod_list(struct request *req) |
336 | { |
337 | struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
338 | |
339 | return (void **)(iod->sg + blk_rq_nr_phys_segments(rq: req)); |
340 | } |
341 | |
342 | static void apple_nvme_free_prps(struct apple_nvme *anv, struct request *req) |
343 | { |
344 | const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; |
345 | struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
346 | dma_addr_t dma_addr = iod->first_dma; |
347 | int i; |
348 | |
349 | for (i = 0; i < iod->npages; i++) { |
350 | __le64 *prp_list = apple_nvme_iod_list(req)[i]; |
351 | dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); |
352 | |
353 | dma_pool_free(pool: anv->prp_page_pool, vaddr: prp_list, addr: dma_addr); |
354 | dma_addr = next_dma_addr; |
355 | } |
356 | } |
357 | |
358 | static void apple_nvme_unmap_data(struct apple_nvme *anv, struct request *req) |
359 | { |
360 | struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
361 | |
362 | if (iod->dma_len) { |
363 | dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len, |
364 | rq_dma_dir(req)); |
365 | return; |
366 | } |
367 | |
368 | WARN_ON_ONCE(!iod->nents); |
369 | |
370 | dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req)); |
371 | if (iod->npages == 0) |
372 | dma_pool_free(pool: anv->prp_small_pool, vaddr: apple_nvme_iod_list(req)[0], |
373 | addr: iod->first_dma); |
374 | else |
375 | apple_nvme_free_prps(anv, req); |
376 | mempool_free(element: iod->sg, pool: anv->iod_mempool); |
377 | } |
378 | |
379 | static void apple_nvme_print_sgl(struct scatterlist *sgl, int nents) |
380 | { |
381 | int i; |
382 | struct scatterlist *sg; |
383 | |
384 | for_each_sg(sgl, sg, nents, i) { |
385 | dma_addr_t phys = sg_phys(sg); |
386 | |
387 | pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d dma_address:%pad dma_length:%d\n" , |
388 | i, &phys, sg->offset, sg->length, &sg_dma_address(sg), |
389 | sg_dma_len(sg)); |
390 | } |
391 | } |
392 | |
393 | static blk_status_t apple_nvme_setup_prps(struct apple_nvme *anv, |
394 | struct request *req, |
395 | struct nvme_rw_command *cmnd) |
396 | { |
397 | struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
398 | struct dma_pool *pool; |
399 | int length = blk_rq_payload_bytes(rq: req); |
400 | struct scatterlist *sg = iod->sg; |
401 | int dma_len = sg_dma_len(sg); |
402 | u64 dma_addr = sg_dma_address(sg); |
403 | int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); |
404 | __le64 *prp_list; |
405 | void **list = apple_nvme_iod_list(req); |
406 | dma_addr_t prp_dma; |
407 | int nprps, i; |
408 | |
409 | length -= (NVME_CTRL_PAGE_SIZE - offset); |
410 | if (length <= 0) { |
411 | iod->first_dma = 0; |
412 | goto done; |
413 | } |
414 | |
415 | dma_len -= (NVME_CTRL_PAGE_SIZE - offset); |
416 | if (dma_len) { |
417 | dma_addr += (NVME_CTRL_PAGE_SIZE - offset); |
418 | } else { |
419 | sg = sg_next(sg); |
420 | dma_addr = sg_dma_address(sg); |
421 | dma_len = sg_dma_len(sg); |
422 | } |
423 | |
424 | if (length <= NVME_CTRL_PAGE_SIZE) { |
425 | iod->first_dma = dma_addr; |
426 | goto done; |
427 | } |
428 | |
429 | nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); |
430 | if (nprps <= (256 / 8)) { |
431 | pool = anv->prp_small_pool; |
432 | iod->npages = 0; |
433 | } else { |
434 | pool = anv->prp_page_pool; |
435 | iod->npages = 1; |
436 | } |
437 | |
438 | prp_list = dma_pool_alloc(pool, GFP_ATOMIC, handle: &prp_dma); |
439 | if (!prp_list) { |
440 | iod->first_dma = dma_addr; |
441 | iod->npages = -1; |
442 | return BLK_STS_RESOURCE; |
443 | } |
444 | list[0] = prp_list; |
445 | iod->first_dma = prp_dma; |
446 | i = 0; |
447 | for (;;) { |
448 | if (i == NVME_CTRL_PAGE_SIZE >> 3) { |
449 | __le64 *old_prp_list = prp_list; |
450 | |
451 | prp_list = dma_pool_alloc(pool, GFP_ATOMIC, handle: &prp_dma); |
452 | if (!prp_list) |
453 | goto free_prps; |
454 | list[iod->npages++] = prp_list; |
455 | prp_list[0] = old_prp_list[i - 1]; |
456 | old_prp_list[i - 1] = cpu_to_le64(prp_dma); |
457 | i = 1; |
458 | } |
459 | prp_list[i++] = cpu_to_le64(dma_addr); |
460 | dma_len -= NVME_CTRL_PAGE_SIZE; |
461 | dma_addr += NVME_CTRL_PAGE_SIZE; |
462 | length -= NVME_CTRL_PAGE_SIZE; |
463 | if (length <= 0) |
464 | break; |
465 | if (dma_len > 0) |
466 | continue; |
467 | if (unlikely(dma_len < 0)) |
468 | goto bad_sgl; |
469 | sg = sg_next(sg); |
470 | dma_addr = sg_dma_address(sg); |
471 | dma_len = sg_dma_len(sg); |
472 | } |
473 | done: |
474 | cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); |
475 | cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); |
476 | return BLK_STS_OK; |
477 | free_prps: |
478 | apple_nvme_free_prps(anv, req); |
479 | return BLK_STS_RESOURCE; |
480 | bad_sgl: |
481 | WARN(DO_ONCE(apple_nvme_print_sgl, iod->sg, iod->nents), |
482 | "Invalid SGL for payload:%d nents:%d\n" , blk_rq_payload_bytes(req), |
483 | iod->nents); |
484 | return BLK_STS_IOERR; |
485 | } |
486 | |
487 | static blk_status_t apple_nvme_setup_prp_simple(struct apple_nvme *anv, |
488 | struct request *req, |
489 | struct nvme_rw_command *cmnd, |
490 | struct bio_vec *bv) |
491 | { |
492 | struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
493 | unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); |
494 | unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; |
495 | |
496 | iod->first_dma = dma_map_bvec(anv->dev, bv, rq_dma_dir(req), 0); |
497 | if (dma_mapping_error(dev: anv->dev, dma_addr: iod->first_dma)) |
498 | return BLK_STS_RESOURCE; |
499 | iod->dma_len = bv->bv_len; |
500 | |
501 | cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); |
502 | if (bv->bv_len > first_prp_len) |
503 | cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); |
504 | return BLK_STS_OK; |
505 | } |
506 | |
507 | static blk_status_t apple_nvme_map_data(struct apple_nvme *anv, |
508 | struct request *req, |
509 | struct nvme_command *cmnd) |
510 | { |
511 | struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
512 | blk_status_t ret = BLK_STS_RESOURCE; |
513 | int nr_mapped; |
514 | |
515 | if (blk_rq_nr_phys_segments(rq: req) == 1) { |
516 | struct bio_vec bv = req_bvec(rq: req); |
517 | |
518 | if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) |
519 | return apple_nvme_setup_prp_simple(anv, req, cmnd: &cmnd->rw, |
520 | bv: &bv); |
521 | } |
522 | |
523 | iod->dma_len = 0; |
524 | iod->sg = mempool_alloc(pool: anv->iod_mempool, GFP_ATOMIC); |
525 | if (!iod->sg) |
526 | return BLK_STS_RESOURCE; |
527 | sg_init_table(iod->sg, blk_rq_nr_phys_segments(rq: req)); |
528 | iod->nents = blk_rq_map_sg(q: req->q, rq: req, sglist: iod->sg); |
529 | if (!iod->nents) |
530 | goto out_free_sg; |
531 | |
532 | nr_mapped = dma_map_sg_attrs(dev: anv->dev, sg: iod->sg, nents: iod->nents, |
533 | rq_dma_dir(req), DMA_ATTR_NO_WARN); |
534 | if (!nr_mapped) |
535 | goto out_free_sg; |
536 | |
537 | ret = apple_nvme_setup_prps(anv, req, cmnd: &cmnd->rw); |
538 | if (ret != BLK_STS_OK) |
539 | goto out_unmap_sg; |
540 | return BLK_STS_OK; |
541 | |
542 | out_unmap_sg: |
543 | dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req)); |
544 | out_free_sg: |
545 | mempool_free(element: iod->sg, pool: anv->iod_mempool); |
546 | return ret; |
547 | } |
548 | |
549 | static __always_inline void apple_nvme_unmap_rq(struct request *req) |
550 | { |
551 | struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
552 | struct apple_nvme *anv = queue_to_apple_nvme(q: iod->q); |
553 | |
554 | if (blk_rq_nr_phys_segments(rq: req)) |
555 | apple_nvme_unmap_data(anv, req); |
556 | } |
557 | |
558 | static void apple_nvme_complete_rq(struct request *req) |
559 | { |
560 | apple_nvme_unmap_rq(req); |
561 | nvme_complete_rq(req); |
562 | } |
563 | |
564 | static void apple_nvme_complete_batch(struct io_comp_batch *iob) |
565 | { |
566 | nvme_complete_batch(iob, fn: apple_nvme_unmap_rq); |
567 | } |
568 | |
569 | static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q) |
570 | { |
571 | struct nvme_completion *hcqe = &q->cqes[q->cq_head]; |
572 | |
573 | return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == q->cq_phase; |
574 | } |
575 | |
576 | static inline struct blk_mq_tags * |
577 | apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q) |
578 | { |
579 | if (q->is_adminq) |
580 | return anv->admin_tagset.tags[0]; |
581 | else |
582 | return anv->tagset.tags[0]; |
583 | } |
584 | |
585 | static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q, |
586 | struct io_comp_batch *iob, u16 idx) |
587 | { |
588 | struct apple_nvme *anv = queue_to_apple_nvme(q); |
589 | struct nvme_completion *cqe = &q->cqes[idx]; |
590 | __u16 command_id = READ_ONCE(cqe->command_id); |
591 | struct request *req; |
592 | |
593 | apple_nvmmu_inval(q, tag: command_id); |
594 | |
595 | req = nvme_find_rq(tags: apple_nvme_queue_tagset(anv, q), command_id); |
596 | if (unlikely(!req)) { |
597 | dev_warn(anv->dev, "invalid id %d completed" , command_id); |
598 | return; |
599 | } |
600 | |
601 | if (!nvme_try_complete_req(req, status: cqe->status, result: cqe->result) && |
602 | !blk_mq_add_to_batch(req, iob, ioerror: nvme_req(req)->status, |
603 | complete: apple_nvme_complete_batch)) |
604 | apple_nvme_complete_rq(req); |
605 | } |
606 | |
607 | static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q) |
608 | { |
609 | u32 tmp = q->cq_head + 1; |
610 | |
611 | if (tmp == apple_nvme_queue_depth(q)) { |
612 | q->cq_head = 0; |
613 | q->cq_phase ^= 1; |
614 | } else { |
615 | q->cq_head = tmp; |
616 | } |
617 | } |
618 | |
619 | static bool apple_nvme_poll_cq(struct apple_nvme_queue *q, |
620 | struct io_comp_batch *iob) |
621 | { |
622 | bool found = false; |
623 | |
624 | while (apple_nvme_cqe_pending(q)) { |
625 | found = true; |
626 | |
627 | /* |
628 | * load-load control dependency between phase and the rest of |
629 | * the cqe requires a full read memory barrier |
630 | */ |
631 | dma_rmb(); |
632 | apple_nvme_handle_cqe(q, iob, idx: q->cq_head); |
633 | apple_nvme_update_cq_head(q); |
634 | } |
635 | |
636 | if (found) |
637 | writel(val: q->cq_head, addr: q->cq_db); |
638 | |
639 | return found; |
640 | } |
641 | |
642 | static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force) |
643 | { |
644 | bool found; |
645 | DEFINE_IO_COMP_BATCH(iob); |
646 | |
647 | if (!READ_ONCE(q->enabled) && !force) |
648 | return false; |
649 | |
650 | found = apple_nvme_poll_cq(q, iob: &iob); |
651 | |
652 | if (!rq_list_empty(iob.req_list)) |
653 | apple_nvme_complete_batch(iob: &iob); |
654 | |
655 | return found; |
656 | } |
657 | |
658 | static irqreturn_t apple_nvme_irq(int irq, void *data) |
659 | { |
660 | struct apple_nvme *anv = data; |
661 | bool handled = false; |
662 | unsigned long flags; |
663 | |
664 | spin_lock_irqsave(&anv->lock, flags); |
665 | if (apple_nvme_handle_cq(q: &anv->ioq, force: false)) |
666 | handled = true; |
667 | if (apple_nvme_handle_cq(q: &anv->adminq, force: false)) |
668 | handled = true; |
669 | spin_unlock_irqrestore(lock: &anv->lock, flags); |
670 | |
671 | if (handled) |
672 | return IRQ_HANDLED; |
673 | return IRQ_NONE; |
674 | } |
675 | |
676 | static int apple_nvme_create_cq(struct apple_nvme *anv) |
677 | { |
678 | struct nvme_command c = {}; |
679 | |
680 | /* |
681 | * Note: we (ab)use the fact that the prp fields survive if no data |
682 | * is attached to the request. |
683 | */ |
684 | c.create_cq.opcode = nvme_admin_create_cq; |
685 | c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr); |
686 | c.create_cq.cqid = cpu_to_le16(1); |
687 | c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1); |
688 | c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED); |
689 | c.create_cq.irq_vector = cpu_to_le16(0); |
690 | |
691 | return nvme_submit_sync_cmd(q: anv->ctrl.admin_q, cmd: &c, NULL, bufflen: 0); |
692 | } |
693 | |
694 | static int apple_nvme_remove_cq(struct apple_nvme *anv) |
695 | { |
696 | struct nvme_command c = {}; |
697 | |
698 | c.delete_queue.opcode = nvme_admin_delete_cq; |
699 | c.delete_queue.qid = cpu_to_le16(1); |
700 | |
701 | return nvme_submit_sync_cmd(q: anv->ctrl.admin_q, cmd: &c, NULL, bufflen: 0); |
702 | } |
703 | |
704 | static int apple_nvme_create_sq(struct apple_nvme *anv) |
705 | { |
706 | struct nvme_command c = {}; |
707 | |
708 | /* |
709 | * Note: we (ab)use the fact that the prp fields survive if no data |
710 | * is attached to the request. |
711 | */ |
712 | c.create_sq.opcode = nvme_admin_create_sq; |
713 | c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr); |
714 | c.create_sq.sqid = cpu_to_le16(1); |
715 | c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1); |
716 | c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG); |
717 | c.create_sq.cqid = cpu_to_le16(1); |
718 | |
719 | return nvme_submit_sync_cmd(q: anv->ctrl.admin_q, cmd: &c, NULL, bufflen: 0); |
720 | } |
721 | |
722 | static int apple_nvme_remove_sq(struct apple_nvme *anv) |
723 | { |
724 | struct nvme_command c = {}; |
725 | |
726 | c.delete_queue.opcode = nvme_admin_delete_sq; |
727 | c.delete_queue.qid = cpu_to_le16(1); |
728 | |
729 | return nvme_submit_sync_cmd(q: anv->ctrl.admin_q, cmd: &c, NULL, bufflen: 0); |
730 | } |
731 | |
732 | static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx, |
733 | const struct blk_mq_queue_data *bd) |
734 | { |
735 | struct nvme_ns *ns = hctx->queue->queuedata; |
736 | struct apple_nvme_queue *q = hctx->driver_data; |
737 | struct apple_nvme *anv = queue_to_apple_nvme(q); |
738 | struct request *req = bd->rq; |
739 | struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
740 | struct nvme_command *cmnd = &iod->cmd; |
741 | blk_status_t ret; |
742 | |
743 | iod->npages = -1; |
744 | iod->nents = 0; |
745 | |
746 | /* |
747 | * We should not need to do this, but we're still using this to |
748 | * ensure we can drain requests on a dying queue. |
749 | */ |
750 | if (unlikely(!READ_ONCE(q->enabled))) |
751 | return BLK_STS_IOERR; |
752 | |
753 | if (!nvme_check_ready(ctrl: &anv->ctrl, rq: req, queue_live: true)) |
754 | return nvme_fail_nonready_command(ctrl: &anv->ctrl, req); |
755 | |
756 | ret = nvme_setup_cmd(ns, req); |
757 | if (ret) |
758 | return ret; |
759 | |
760 | if (blk_rq_nr_phys_segments(rq: req)) { |
761 | ret = apple_nvme_map_data(anv, req, cmnd); |
762 | if (ret) |
763 | goto out_free_cmd; |
764 | } |
765 | |
766 | nvme_start_request(rq: req); |
767 | apple_nvme_submit_cmd(q, cmd: cmnd); |
768 | return BLK_STS_OK; |
769 | |
770 | out_free_cmd: |
771 | nvme_cleanup_cmd(req); |
772 | return ret; |
773 | } |
774 | |
775 | static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
776 | unsigned int hctx_idx) |
777 | { |
778 | hctx->driver_data = data; |
779 | return 0; |
780 | } |
781 | |
782 | static int apple_nvme_init_request(struct blk_mq_tag_set *set, |
783 | struct request *req, unsigned int hctx_idx, |
784 | unsigned int numa_node) |
785 | { |
786 | struct apple_nvme_queue *q = set->driver_data; |
787 | struct apple_nvme *anv = queue_to_apple_nvme(q); |
788 | struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
789 | struct nvme_request *nreq = nvme_req(req); |
790 | |
791 | iod->q = q; |
792 | nreq->ctrl = &anv->ctrl; |
793 | nreq->cmd = &iod->cmd; |
794 | |
795 | return 0; |
796 | } |
797 | |
798 | static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown) |
799 | { |
800 | enum nvme_ctrl_state state = nvme_ctrl_state(ctrl: &anv->ctrl); |
801 | u32 csts = readl(addr: anv->mmio_nvme + NVME_REG_CSTS); |
802 | bool dead = false, freeze = false; |
803 | unsigned long flags; |
804 | |
805 | if (apple_rtkit_is_crashed(rtk: anv->rtk)) |
806 | dead = true; |
807 | if (!(csts & NVME_CSTS_RDY)) |
808 | dead = true; |
809 | if (csts & NVME_CSTS_CFS) |
810 | dead = true; |
811 | |
812 | if (state == NVME_CTRL_LIVE || |
813 | state == NVME_CTRL_RESETTING) { |
814 | freeze = true; |
815 | nvme_start_freeze(ctrl: &anv->ctrl); |
816 | } |
817 | |
818 | /* |
819 | * Give the controller a chance to complete all entered requests if |
820 | * doing a safe shutdown. |
821 | */ |
822 | if (!dead && shutdown && freeze) |
823 | nvme_wait_freeze_timeout(ctrl: &anv->ctrl, NVME_IO_TIMEOUT); |
824 | |
825 | nvme_quiesce_io_queues(ctrl: &anv->ctrl); |
826 | |
827 | if (!dead) { |
828 | if (READ_ONCE(anv->ioq.enabled)) { |
829 | apple_nvme_remove_sq(anv); |
830 | apple_nvme_remove_cq(anv); |
831 | } |
832 | |
833 | /* |
834 | * Always disable the NVMe controller after shutdown. |
835 | * We need to do this to bring it back up later anyway, and we |
836 | * can't do it while the firmware is not running (e.g. in the |
837 | * resume reset path before RTKit is initialized), so for Apple |
838 | * controllers it makes sense to unconditionally do it here. |
839 | * Additionally, this sequence of events is reliable, while |
840 | * others (like disabling after bringing back the firmware on |
841 | * resume) seem to run into trouble under some circumstances. |
842 | * |
843 | * Both U-Boot and m1n1 also use this convention (i.e. an ANS |
844 | * NVMe controller is handed off with firmware shut down, in an |
845 | * NVMe disabled state, after a clean shutdown). |
846 | */ |
847 | if (shutdown) |
848 | nvme_disable_ctrl(ctrl: &anv->ctrl, shutdown); |
849 | nvme_disable_ctrl(ctrl: &anv->ctrl, shutdown: false); |
850 | } |
851 | |
852 | WRITE_ONCE(anv->ioq.enabled, false); |
853 | WRITE_ONCE(anv->adminq.enabled, false); |
854 | mb(); /* ensure that nvme_queue_rq() sees that enabled is cleared */ |
855 | nvme_quiesce_admin_queue(ctrl: &anv->ctrl); |
856 | |
857 | /* last chance to complete any requests before nvme_cancel_request */ |
858 | spin_lock_irqsave(&anv->lock, flags); |
859 | apple_nvme_handle_cq(q: &anv->ioq, force: true); |
860 | apple_nvme_handle_cq(q: &anv->adminq, force: true); |
861 | spin_unlock_irqrestore(lock: &anv->lock, flags); |
862 | |
863 | nvme_cancel_tagset(ctrl: &anv->ctrl); |
864 | nvme_cancel_admin_tagset(ctrl: &anv->ctrl); |
865 | |
866 | /* |
867 | * The driver will not be starting up queues again if shutting down so |
868 | * must flush all entered requests to their failed completion to avoid |
869 | * deadlocking blk-mq hot-cpu notifier. |
870 | */ |
871 | if (shutdown) { |
872 | nvme_unquiesce_io_queues(ctrl: &anv->ctrl); |
873 | nvme_unquiesce_admin_queue(ctrl: &anv->ctrl); |
874 | } |
875 | } |
876 | |
877 | static enum blk_eh_timer_return apple_nvme_timeout(struct request *req) |
878 | { |
879 | struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(rq: req); |
880 | struct apple_nvme_queue *q = iod->q; |
881 | struct apple_nvme *anv = queue_to_apple_nvme(q); |
882 | unsigned long flags; |
883 | u32 csts = readl(addr: anv->mmio_nvme + NVME_REG_CSTS); |
884 | |
885 | if (nvme_ctrl_state(ctrl: &anv->ctrl) != NVME_CTRL_LIVE) { |
886 | /* |
887 | * From rdma.c: |
888 | * If we are resetting, connecting or deleting we should |
889 | * complete immediately because we may block controller |
890 | * teardown or setup sequence |
891 | * - ctrl disable/shutdown fabrics requests |
892 | * - connect requests |
893 | * - initialization admin requests |
894 | * - I/O requests that entered after unquiescing and |
895 | * the controller stopped responding |
896 | * |
897 | * All other requests should be cancelled by the error |
898 | * recovery work, so it's fine that we fail it here. |
899 | */ |
900 | dev_warn(anv->dev, |
901 | "I/O %d(aq:%d) timeout while not in live state\n" , |
902 | req->tag, q->is_adminq); |
903 | if (blk_mq_request_started(rq: req) && |
904 | !blk_mq_request_completed(rq: req)) { |
905 | nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; |
906 | nvme_req(req)->flags |= NVME_REQ_CANCELLED; |
907 | blk_mq_complete_request(rq: req); |
908 | } |
909 | return BLK_EH_DONE; |
910 | } |
911 | |
912 | /* check if we just missed an interrupt if we're still alive */ |
913 | if (!apple_rtkit_is_crashed(rtk: anv->rtk) && !(csts & NVME_CSTS_CFS)) { |
914 | spin_lock_irqsave(&anv->lock, flags); |
915 | apple_nvme_handle_cq(q, force: false); |
916 | spin_unlock_irqrestore(lock: &anv->lock, flags); |
917 | if (blk_mq_request_completed(rq: req)) { |
918 | dev_warn(anv->dev, |
919 | "I/O %d(aq:%d) timeout: completion polled\n" , |
920 | req->tag, q->is_adminq); |
921 | return BLK_EH_DONE; |
922 | } |
923 | } |
924 | |
925 | /* |
926 | * aborting commands isn't supported which leaves a full reset as our |
927 | * only option here |
928 | */ |
929 | dev_warn(anv->dev, "I/O %d(aq:%d) timeout: resetting controller\n" , |
930 | req->tag, q->is_adminq); |
931 | nvme_req(req)->flags |= NVME_REQ_CANCELLED; |
932 | apple_nvme_disable(anv, shutdown: false); |
933 | nvme_reset_ctrl(ctrl: &anv->ctrl); |
934 | return BLK_EH_DONE; |
935 | } |
936 | |
937 | static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx, |
938 | struct io_comp_batch *iob) |
939 | { |
940 | struct apple_nvme_queue *q = hctx->driver_data; |
941 | struct apple_nvme *anv = queue_to_apple_nvme(q); |
942 | bool found; |
943 | unsigned long flags; |
944 | |
945 | spin_lock_irqsave(&anv->lock, flags); |
946 | found = apple_nvme_poll_cq(q, iob); |
947 | spin_unlock_irqrestore(lock: &anv->lock, flags); |
948 | |
949 | return found; |
950 | } |
951 | |
952 | static const struct blk_mq_ops apple_nvme_mq_admin_ops = { |
953 | .queue_rq = apple_nvme_queue_rq, |
954 | .complete = apple_nvme_complete_rq, |
955 | .init_hctx = apple_nvme_init_hctx, |
956 | .init_request = apple_nvme_init_request, |
957 | .timeout = apple_nvme_timeout, |
958 | }; |
959 | |
960 | static const struct blk_mq_ops apple_nvme_mq_ops = { |
961 | .queue_rq = apple_nvme_queue_rq, |
962 | .complete = apple_nvme_complete_rq, |
963 | .init_hctx = apple_nvme_init_hctx, |
964 | .init_request = apple_nvme_init_request, |
965 | .timeout = apple_nvme_timeout, |
966 | .poll = apple_nvme_poll, |
967 | }; |
968 | |
969 | static void apple_nvme_init_queue(struct apple_nvme_queue *q) |
970 | { |
971 | unsigned int depth = apple_nvme_queue_depth(q); |
972 | |
973 | q->cq_head = 0; |
974 | q->cq_phase = 1; |
975 | memset(q->tcbs, 0, |
976 | APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb)); |
977 | memset(q->cqes, 0, depth * sizeof(struct nvme_completion)); |
978 | WRITE_ONCE(q->enabled, true); |
979 | wmb(); /* ensure the first interrupt sees the initialization */ |
980 | } |
981 | |
982 | static void apple_nvme_reset_work(struct work_struct *work) |
983 | { |
984 | unsigned int nr_io_queues = 1; |
985 | int ret; |
986 | u32 boot_status, aqa; |
987 | struct apple_nvme *anv = |
988 | container_of(work, struct apple_nvme, ctrl.reset_work); |
989 | enum nvme_ctrl_state state = nvme_ctrl_state(ctrl: &anv->ctrl); |
990 | |
991 | if (state != NVME_CTRL_RESETTING) { |
992 | dev_warn(anv->dev, "ctrl state %d is not RESETTING\n" , state); |
993 | ret = -ENODEV; |
994 | goto out; |
995 | } |
996 | |
997 | /* there's unfortunately no known way to recover if RTKit crashed :( */ |
998 | if (apple_rtkit_is_crashed(rtk: anv->rtk)) { |
999 | dev_err(anv->dev, |
1000 | "RTKit has crashed without any way to recover." ); |
1001 | ret = -EIO; |
1002 | goto out; |
1003 | } |
1004 | |
1005 | /* RTKit must be shut down cleanly for the (soft)-reset to work */ |
1006 | if (apple_rtkit_is_running(rtk: anv->rtk)) { |
1007 | /* reset the controller if it is enabled */ |
1008 | if (anv->ctrl.ctrl_config & NVME_CC_ENABLE) |
1009 | apple_nvme_disable(anv, shutdown: false); |
1010 | dev_dbg(anv->dev, "Trying to shut down RTKit before reset." ); |
1011 | ret = apple_rtkit_shutdown(rtk: anv->rtk); |
1012 | if (ret) |
1013 | goto out; |
1014 | } |
1015 | |
1016 | writel(val: 0, addr: anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); |
1017 | |
1018 | ret = reset_control_assert(rstc: anv->reset); |
1019 | if (ret) |
1020 | goto out; |
1021 | |
1022 | ret = apple_rtkit_reinit(rtk: anv->rtk); |
1023 | if (ret) |
1024 | goto out; |
1025 | |
1026 | ret = reset_control_deassert(rstc: anv->reset); |
1027 | if (ret) |
1028 | goto out; |
1029 | |
1030 | writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN, |
1031 | addr: anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); |
1032 | ret = apple_rtkit_boot(rtk: anv->rtk); |
1033 | if (ret) { |
1034 | dev_err(anv->dev, "ANS did not boot" ); |
1035 | goto out; |
1036 | } |
1037 | |
1038 | ret = readl_poll_timeout(anv->mmio_nvme + APPLE_ANS_BOOT_STATUS, |
1039 | boot_status, |
1040 | boot_status == APPLE_ANS_BOOT_STATUS_OK, |
1041 | USEC_PER_MSEC, APPLE_ANS_BOOT_TIMEOUT); |
1042 | if (ret) { |
1043 | dev_err(anv->dev, "ANS did not initialize" ); |
1044 | goto out; |
1045 | } |
1046 | |
1047 | dev_dbg(anv->dev, "ANS booted successfully." ); |
1048 | |
1049 | /* |
1050 | * Limit the max command size to prevent iod->sg allocations going |
1051 | * over a single page. |
1052 | */ |
1053 | anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1, |
1054 | dma_max_mapping_size(anv->dev) >> 9); |
1055 | anv->ctrl.max_segments = NVME_MAX_SEGS; |
1056 | |
1057 | dma_set_max_seg_size(dev: anv->dev, size: 0xffffffff); |
1058 | |
1059 | /* |
1060 | * Enable NVMMU and linear submission queues. |
1061 | * While we could keep those disabled and pretend this is slightly |
1062 | * more common NVMe controller we'd still need some quirks (e.g. |
1063 | * sq entries will be 128 bytes) and Apple might drop support for |
1064 | * that mode in the future. |
1065 | */ |
1066 | writel(APPLE_ANS_LINEAR_SQ_EN, |
1067 | addr: anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL); |
1068 | |
1069 | /* Allow as many pending command as possible for both queues */ |
1070 | writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16), |
1071 | addr: anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL); |
1072 | |
1073 | /* Setup the NVMMU for the maximum admin and IO queue depth */ |
1074 | writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1, |
1075 | addr: anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS); |
1076 | |
1077 | /* |
1078 | * This is probably a chicken bit: without it all commands where any PRP |
1079 | * is set to zero (including those that don't use that field) fail and |
1080 | * the co-processor complains about "completed with err BAD_CMD-" or |
1081 | * a "NULL_PRP_PTR_ERR" in the syslog |
1082 | */ |
1083 | writel(readl(addr: anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) & |
1084 | ~APPLE_ANS_PRP_NULL_CHECK, |
1085 | addr: anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL); |
1086 | |
1087 | /* Setup the admin queue */ |
1088 | aqa = APPLE_NVME_AQ_DEPTH - 1; |
1089 | aqa |= aqa << 16; |
1090 | writel(val: aqa, addr: anv->mmio_nvme + NVME_REG_AQA); |
1091 | writeq(val: anv->adminq.sq_dma_addr, addr: anv->mmio_nvme + NVME_REG_ASQ); |
1092 | writeq(val: anv->adminq.cq_dma_addr, addr: anv->mmio_nvme + NVME_REG_ACQ); |
1093 | |
1094 | /* Setup NVMMU for both queues */ |
1095 | writeq(val: anv->adminq.tcb_dma_addr, |
1096 | addr: anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE); |
1097 | writeq(val: anv->ioq.tcb_dma_addr, |
1098 | addr: anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE); |
1099 | |
1100 | anv->ctrl.sqsize = |
1101 | APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */ |
1102 | anv->ctrl.cap = readq(addr: anv->mmio_nvme + NVME_REG_CAP); |
1103 | |
1104 | dev_dbg(anv->dev, "Enabling controller now" ); |
1105 | ret = nvme_enable_ctrl(ctrl: &anv->ctrl); |
1106 | if (ret) |
1107 | goto out; |
1108 | |
1109 | dev_dbg(anv->dev, "Starting admin queue" ); |
1110 | apple_nvme_init_queue(q: &anv->adminq); |
1111 | nvme_unquiesce_admin_queue(ctrl: &anv->ctrl); |
1112 | |
1113 | if (!nvme_change_ctrl_state(ctrl: &anv->ctrl, new_state: NVME_CTRL_CONNECTING)) { |
1114 | dev_warn(anv->ctrl.device, |
1115 | "failed to mark controller CONNECTING\n" ); |
1116 | ret = -ENODEV; |
1117 | goto out; |
1118 | } |
1119 | |
1120 | ret = nvme_init_ctrl_finish(ctrl: &anv->ctrl, was_suspended: false); |
1121 | if (ret) |
1122 | goto out; |
1123 | |
1124 | dev_dbg(anv->dev, "Creating IOCQ" ); |
1125 | ret = apple_nvme_create_cq(anv); |
1126 | if (ret) |
1127 | goto out; |
1128 | dev_dbg(anv->dev, "Creating IOSQ" ); |
1129 | ret = apple_nvme_create_sq(anv); |
1130 | if (ret) |
1131 | goto out_remove_cq; |
1132 | |
1133 | apple_nvme_init_queue(q: &anv->ioq); |
1134 | nr_io_queues = 1; |
1135 | ret = nvme_set_queue_count(ctrl: &anv->ctrl, count: &nr_io_queues); |
1136 | if (ret) |
1137 | goto out_remove_sq; |
1138 | if (nr_io_queues != 1) { |
1139 | ret = -ENXIO; |
1140 | goto out_remove_sq; |
1141 | } |
1142 | |
1143 | anv->ctrl.queue_count = nr_io_queues + 1; |
1144 | |
1145 | nvme_unquiesce_io_queues(ctrl: &anv->ctrl); |
1146 | nvme_wait_freeze(ctrl: &anv->ctrl); |
1147 | blk_mq_update_nr_hw_queues(set: &anv->tagset, nr_hw_queues: 1); |
1148 | nvme_unfreeze(ctrl: &anv->ctrl); |
1149 | |
1150 | if (!nvme_change_ctrl_state(ctrl: &anv->ctrl, new_state: NVME_CTRL_LIVE)) { |
1151 | dev_warn(anv->ctrl.device, |
1152 | "failed to mark controller live state\n" ); |
1153 | ret = -ENODEV; |
1154 | goto out_remove_sq; |
1155 | } |
1156 | |
1157 | nvme_start_ctrl(ctrl: &anv->ctrl); |
1158 | |
1159 | dev_dbg(anv->dev, "ANS boot and NVMe init completed." ); |
1160 | return; |
1161 | |
1162 | out_remove_sq: |
1163 | apple_nvme_remove_sq(anv); |
1164 | out_remove_cq: |
1165 | apple_nvme_remove_cq(anv); |
1166 | out: |
1167 | dev_warn(anv->ctrl.device, "Reset failure status: %d\n" , ret); |
1168 | nvme_change_ctrl_state(ctrl: &anv->ctrl, new_state: NVME_CTRL_DELETING); |
1169 | nvme_get_ctrl(ctrl: &anv->ctrl); |
1170 | apple_nvme_disable(anv, shutdown: false); |
1171 | nvme_mark_namespaces_dead(ctrl: &anv->ctrl); |
1172 | if (!queue_work(wq: nvme_wq, work: &anv->remove_work)) |
1173 | nvme_put_ctrl(ctrl: &anv->ctrl); |
1174 | } |
1175 | |
1176 | static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work) |
1177 | { |
1178 | struct apple_nvme *anv = |
1179 | container_of(work, struct apple_nvme, remove_work); |
1180 | |
1181 | nvme_put_ctrl(ctrl: &anv->ctrl); |
1182 | device_release_driver(dev: anv->dev); |
1183 | } |
1184 | |
1185 | static int apple_nvme_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) |
1186 | { |
1187 | *val = readl(addr: ctrl_to_apple_nvme(ctrl)->mmio_nvme + off); |
1188 | return 0; |
1189 | } |
1190 | |
1191 | static int apple_nvme_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) |
1192 | { |
1193 | writel(val, addr: ctrl_to_apple_nvme(ctrl)->mmio_nvme + off); |
1194 | return 0; |
1195 | } |
1196 | |
1197 | static int apple_nvme_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) |
1198 | { |
1199 | *val = readq(addr: ctrl_to_apple_nvme(ctrl)->mmio_nvme + off); |
1200 | return 0; |
1201 | } |
1202 | |
1203 | static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size) |
1204 | { |
1205 | struct device *dev = ctrl_to_apple_nvme(ctrl)->dev; |
1206 | |
1207 | return snprintf(buf, size, fmt: "%s\n" , dev_name(dev)); |
1208 | } |
1209 | |
1210 | static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl) |
1211 | { |
1212 | struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl); |
1213 | |
1214 | if (anv->ctrl.admin_q) |
1215 | blk_put_queue(anv->ctrl.admin_q); |
1216 | put_device(dev: anv->dev); |
1217 | } |
1218 | |
1219 | static const struct nvme_ctrl_ops nvme_ctrl_ops = { |
1220 | .name = "apple-nvme" , |
1221 | .module = THIS_MODULE, |
1222 | .flags = 0, |
1223 | .reg_read32 = apple_nvme_reg_read32, |
1224 | .reg_write32 = apple_nvme_reg_write32, |
1225 | .reg_read64 = apple_nvme_reg_read64, |
1226 | .free_ctrl = apple_nvme_free_ctrl, |
1227 | .get_address = apple_nvme_get_address, |
1228 | }; |
1229 | |
1230 | static void apple_nvme_async_probe(void *data, async_cookie_t cookie) |
1231 | { |
1232 | struct apple_nvme *anv = data; |
1233 | |
1234 | flush_work(work: &anv->ctrl.reset_work); |
1235 | flush_work(work: &anv->ctrl.scan_work); |
1236 | nvme_put_ctrl(ctrl: &anv->ctrl); |
1237 | } |
1238 | |
1239 | static void devm_apple_nvme_put_tag_set(void *data) |
1240 | { |
1241 | blk_mq_free_tag_set(set: data); |
1242 | } |
1243 | |
1244 | static int apple_nvme_alloc_tagsets(struct apple_nvme *anv) |
1245 | { |
1246 | int ret; |
1247 | |
1248 | anv->admin_tagset.ops = &apple_nvme_mq_admin_ops; |
1249 | anv->admin_tagset.nr_hw_queues = 1; |
1250 | anv->admin_tagset.queue_depth = APPLE_NVME_AQ_MQ_TAG_DEPTH; |
1251 | anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT; |
1252 | anv->admin_tagset.numa_node = NUMA_NO_NODE; |
1253 | anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod); |
1254 | anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED; |
1255 | anv->admin_tagset.driver_data = &anv->adminq; |
1256 | |
1257 | ret = blk_mq_alloc_tag_set(set: &anv->admin_tagset); |
1258 | if (ret) |
1259 | return ret; |
1260 | ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set, |
1261 | &anv->admin_tagset); |
1262 | if (ret) |
1263 | return ret; |
1264 | |
1265 | anv->tagset.ops = &apple_nvme_mq_ops; |
1266 | anv->tagset.nr_hw_queues = 1; |
1267 | anv->tagset.nr_maps = 1; |
1268 | /* |
1269 | * Tags are used as an index to the NVMMU and must be unique across |
1270 | * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which |
1271 | * must be marked as reserved in the IO queue. |
1272 | */ |
1273 | anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH; |
1274 | anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1; |
1275 | anv->tagset.timeout = NVME_IO_TIMEOUT; |
1276 | anv->tagset.numa_node = NUMA_NO_NODE; |
1277 | anv->tagset.cmd_size = sizeof(struct apple_nvme_iod); |
1278 | anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE; |
1279 | anv->tagset.driver_data = &anv->ioq; |
1280 | |
1281 | ret = blk_mq_alloc_tag_set(set: &anv->tagset); |
1282 | if (ret) |
1283 | return ret; |
1284 | ret = devm_add_action_or_reset(anv->dev, devm_apple_nvme_put_tag_set, |
1285 | &anv->tagset); |
1286 | if (ret) |
1287 | return ret; |
1288 | |
1289 | anv->ctrl.admin_tagset = &anv->admin_tagset; |
1290 | anv->ctrl.tagset = &anv->tagset; |
1291 | |
1292 | return 0; |
1293 | } |
1294 | |
1295 | static int apple_nvme_queue_alloc(struct apple_nvme *anv, |
1296 | struct apple_nvme_queue *q) |
1297 | { |
1298 | unsigned int depth = apple_nvme_queue_depth(q); |
1299 | |
1300 | q->cqes = dmam_alloc_coherent(dev: anv->dev, |
1301 | size: depth * sizeof(struct nvme_completion), |
1302 | dma_handle: &q->cq_dma_addr, GFP_KERNEL); |
1303 | if (!q->cqes) |
1304 | return -ENOMEM; |
1305 | |
1306 | q->sqes = dmam_alloc_coherent(dev: anv->dev, |
1307 | size: depth * sizeof(struct nvme_command), |
1308 | dma_handle: &q->sq_dma_addr, GFP_KERNEL); |
1309 | if (!q->sqes) |
1310 | return -ENOMEM; |
1311 | |
1312 | /* |
1313 | * We need the maximum queue depth here because the NVMMU only has a |
1314 | * single depth configuration shared between both queues. |
1315 | */ |
1316 | q->tcbs = dmam_alloc_coherent(dev: anv->dev, |
1317 | APPLE_ANS_MAX_QUEUE_DEPTH * |
1318 | sizeof(struct apple_nvmmu_tcb), |
1319 | dma_handle: &q->tcb_dma_addr, GFP_KERNEL); |
1320 | if (!q->tcbs) |
1321 | return -ENOMEM; |
1322 | |
1323 | /* |
1324 | * initialize phase to make sure the allocated and empty memory |
1325 | * doesn't look like a full cq already. |
1326 | */ |
1327 | q->cq_phase = 1; |
1328 | return 0; |
1329 | } |
1330 | |
1331 | static void apple_nvme_detach_genpd(struct apple_nvme *anv) |
1332 | { |
1333 | int i; |
1334 | |
1335 | if (anv->pd_count <= 1) |
1336 | return; |
1337 | |
1338 | for (i = anv->pd_count - 1; i >= 0; i--) { |
1339 | if (anv->pd_link[i]) |
1340 | device_link_del(link: anv->pd_link[i]); |
1341 | if (!IS_ERR_OR_NULL(ptr: anv->pd_dev[i])) |
1342 | dev_pm_domain_detach(dev: anv->pd_dev[i], power_off: true); |
1343 | } |
1344 | } |
1345 | |
1346 | static int apple_nvme_attach_genpd(struct apple_nvme *anv) |
1347 | { |
1348 | struct device *dev = anv->dev; |
1349 | int i; |
1350 | |
1351 | anv->pd_count = of_count_phandle_with_args( |
1352 | np: dev->of_node, list_name: "power-domains" , cells_name: "#power-domain-cells" ); |
1353 | if (anv->pd_count <= 1) |
1354 | return 0; |
1355 | |
1356 | anv->pd_dev = devm_kcalloc(dev, n: anv->pd_count, size: sizeof(*anv->pd_dev), |
1357 | GFP_KERNEL); |
1358 | if (!anv->pd_dev) |
1359 | return -ENOMEM; |
1360 | |
1361 | anv->pd_link = devm_kcalloc(dev, n: anv->pd_count, size: sizeof(*anv->pd_link), |
1362 | GFP_KERNEL); |
1363 | if (!anv->pd_link) |
1364 | return -ENOMEM; |
1365 | |
1366 | for (i = 0; i < anv->pd_count; i++) { |
1367 | anv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, index: i); |
1368 | if (IS_ERR(ptr: anv->pd_dev[i])) { |
1369 | apple_nvme_detach_genpd(anv); |
1370 | return PTR_ERR(ptr: anv->pd_dev[i]); |
1371 | } |
1372 | |
1373 | anv->pd_link[i] = device_link_add(consumer: dev, supplier: anv->pd_dev[i], |
1374 | DL_FLAG_STATELESS | |
1375 | DL_FLAG_PM_RUNTIME | |
1376 | DL_FLAG_RPM_ACTIVE); |
1377 | if (!anv->pd_link[i]) { |
1378 | apple_nvme_detach_genpd(anv); |
1379 | return -EINVAL; |
1380 | } |
1381 | } |
1382 | |
1383 | return 0; |
1384 | } |
1385 | |
1386 | static void devm_apple_nvme_mempool_destroy(void *data) |
1387 | { |
1388 | mempool_destroy(pool: data); |
1389 | } |
1390 | |
1391 | static int apple_nvme_probe(struct platform_device *pdev) |
1392 | { |
1393 | struct device *dev = &pdev->dev; |
1394 | struct apple_nvme *anv; |
1395 | int ret; |
1396 | |
1397 | anv = devm_kzalloc(dev, size: sizeof(*anv), GFP_KERNEL); |
1398 | if (!anv) |
1399 | return -ENOMEM; |
1400 | |
1401 | anv->dev = get_device(dev); |
1402 | anv->adminq.is_adminq = true; |
1403 | platform_set_drvdata(pdev, data: anv); |
1404 | |
1405 | ret = apple_nvme_attach_genpd(anv); |
1406 | if (ret < 0) { |
1407 | dev_err_probe(dev, err: ret, fmt: "Failed to attach power domains" ); |
1408 | goto put_dev; |
1409 | } |
1410 | if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) { |
1411 | ret = -ENXIO; |
1412 | goto put_dev; |
1413 | } |
1414 | |
1415 | anv->irq = platform_get_irq(pdev, 0); |
1416 | if (anv->irq < 0) { |
1417 | ret = anv->irq; |
1418 | goto put_dev; |
1419 | } |
1420 | if (!anv->irq) { |
1421 | ret = -ENXIO; |
1422 | goto put_dev; |
1423 | } |
1424 | |
1425 | anv->mmio_coproc = devm_platform_ioremap_resource_byname(pdev, name: "ans" ); |
1426 | if (IS_ERR(ptr: anv->mmio_coproc)) { |
1427 | ret = PTR_ERR(ptr: anv->mmio_coproc); |
1428 | goto put_dev; |
1429 | } |
1430 | anv->mmio_nvme = devm_platform_ioremap_resource_byname(pdev, name: "nvme" ); |
1431 | if (IS_ERR(ptr: anv->mmio_nvme)) { |
1432 | ret = PTR_ERR(ptr: anv->mmio_nvme); |
1433 | goto put_dev; |
1434 | } |
1435 | |
1436 | anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB; |
1437 | anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB; |
1438 | anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB; |
1439 | anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB; |
1440 | |
1441 | anv->sart = devm_apple_sart_get(dev); |
1442 | if (IS_ERR(ptr: anv->sart)) { |
1443 | ret = dev_err_probe(dev, err: PTR_ERR(ptr: anv->sart), |
1444 | fmt: "Failed to initialize SART" ); |
1445 | goto put_dev; |
1446 | } |
1447 | |
1448 | anv->reset = devm_reset_control_array_get_exclusive(dev: anv->dev); |
1449 | if (IS_ERR(ptr: anv->reset)) { |
1450 | ret = dev_err_probe(dev, err: PTR_ERR(ptr: anv->reset), |
1451 | fmt: "Failed to get reset control" ); |
1452 | goto put_dev; |
1453 | } |
1454 | |
1455 | INIT_WORK(&anv->ctrl.reset_work, apple_nvme_reset_work); |
1456 | INIT_WORK(&anv->remove_work, apple_nvme_remove_dead_ctrl_work); |
1457 | spin_lock_init(&anv->lock); |
1458 | |
1459 | ret = apple_nvme_queue_alloc(anv, q: &anv->adminq); |
1460 | if (ret) |
1461 | goto put_dev; |
1462 | ret = apple_nvme_queue_alloc(anv, q: &anv->ioq); |
1463 | if (ret) |
1464 | goto put_dev; |
1465 | |
1466 | anv->prp_page_pool = dmam_pool_create(name: "prp list page" , dev: anv->dev, |
1467 | NVME_CTRL_PAGE_SIZE, |
1468 | NVME_CTRL_PAGE_SIZE, allocation: 0); |
1469 | if (!anv->prp_page_pool) { |
1470 | ret = -ENOMEM; |
1471 | goto put_dev; |
1472 | } |
1473 | |
1474 | anv->prp_small_pool = |
1475 | dmam_pool_create(name: "prp list 256" , dev: anv->dev, size: 256, align: 256, allocation: 0); |
1476 | if (!anv->prp_small_pool) { |
1477 | ret = -ENOMEM; |
1478 | goto put_dev; |
1479 | } |
1480 | |
1481 | WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE); |
1482 | anv->iod_mempool = |
1483 | mempool_create_kmalloc_pool(min_nr: 1, size: apple_nvme_iod_alloc_size()); |
1484 | if (!anv->iod_mempool) { |
1485 | ret = -ENOMEM; |
1486 | goto put_dev; |
1487 | } |
1488 | ret = devm_add_action_or_reset(anv->dev, |
1489 | devm_apple_nvme_mempool_destroy, anv->iod_mempool); |
1490 | if (ret) |
1491 | goto put_dev; |
1492 | |
1493 | ret = apple_nvme_alloc_tagsets(anv); |
1494 | if (ret) |
1495 | goto put_dev; |
1496 | |
1497 | ret = devm_request_irq(dev: anv->dev, irq: anv->irq, handler: apple_nvme_irq, irqflags: 0, |
1498 | devname: "nvme-apple" , dev_id: anv); |
1499 | if (ret) { |
1500 | dev_err_probe(dev, err: ret, fmt: "Failed to request IRQ" ); |
1501 | goto put_dev; |
1502 | } |
1503 | |
1504 | anv->rtk = |
1505 | devm_apple_rtkit_init(dev, cookie: anv, NULL, mbox_idx: 0, ops: &apple_nvme_rtkit_ops); |
1506 | if (IS_ERR(ptr: anv->rtk)) { |
1507 | ret = dev_err_probe(dev, err: PTR_ERR(ptr: anv->rtk), |
1508 | fmt: "Failed to initialize RTKit" ); |
1509 | goto put_dev; |
1510 | } |
1511 | |
1512 | ret = nvme_init_ctrl(ctrl: &anv->ctrl, dev: anv->dev, ops: &nvme_ctrl_ops, |
1513 | quirks: NVME_QUIRK_SKIP_CID_GEN | NVME_QUIRK_IDENTIFY_CNS); |
1514 | if (ret) { |
1515 | dev_err_probe(dev, err: ret, fmt: "Failed to initialize nvme_ctrl" ); |
1516 | goto put_dev; |
1517 | } |
1518 | |
1519 | anv->ctrl.admin_q = blk_mq_alloc_queue(set: &anv->admin_tagset, NULL, NULL); |
1520 | if (IS_ERR(ptr: anv->ctrl.admin_q)) { |
1521 | ret = -ENOMEM; |
1522 | goto put_dev; |
1523 | } |
1524 | |
1525 | nvme_reset_ctrl(ctrl: &anv->ctrl); |
1526 | async_schedule(func: apple_nvme_async_probe, data: anv); |
1527 | |
1528 | return 0; |
1529 | |
1530 | put_dev: |
1531 | put_device(dev: anv->dev); |
1532 | return ret; |
1533 | } |
1534 | |
1535 | static void apple_nvme_remove(struct platform_device *pdev) |
1536 | { |
1537 | struct apple_nvme *anv = platform_get_drvdata(pdev); |
1538 | |
1539 | nvme_change_ctrl_state(ctrl: &anv->ctrl, new_state: NVME_CTRL_DELETING); |
1540 | flush_work(work: &anv->ctrl.reset_work); |
1541 | nvme_stop_ctrl(ctrl: &anv->ctrl); |
1542 | nvme_remove_namespaces(ctrl: &anv->ctrl); |
1543 | apple_nvme_disable(anv, shutdown: true); |
1544 | nvme_uninit_ctrl(ctrl: &anv->ctrl); |
1545 | |
1546 | if (apple_rtkit_is_running(rtk: anv->rtk)) |
1547 | apple_rtkit_shutdown(rtk: anv->rtk); |
1548 | |
1549 | apple_nvme_detach_genpd(anv); |
1550 | } |
1551 | |
1552 | static void apple_nvme_shutdown(struct platform_device *pdev) |
1553 | { |
1554 | struct apple_nvme *anv = platform_get_drvdata(pdev); |
1555 | |
1556 | apple_nvme_disable(anv, shutdown: true); |
1557 | if (apple_rtkit_is_running(rtk: anv->rtk)) |
1558 | apple_rtkit_shutdown(rtk: anv->rtk); |
1559 | } |
1560 | |
1561 | static int apple_nvme_resume(struct device *dev) |
1562 | { |
1563 | struct apple_nvme *anv = dev_get_drvdata(dev); |
1564 | |
1565 | return nvme_reset_ctrl(ctrl: &anv->ctrl); |
1566 | } |
1567 | |
1568 | static int apple_nvme_suspend(struct device *dev) |
1569 | { |
1570 | struct apple_nvme *anv = dev_get_drvdata(dev); |
1571 | int ret = 0; |
1572 | |
1573 | apple_nvme_disable(anv, shutdown: true); |
1574 | |
1575 | if (apple_rtkit_is_running(rtk: anv->rtk)) |
1576 | ret = apple_rtkit_shutdown(rtk: anv->rtk); |
1577 | |
1578 | writel(val: 0, addr: anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); |
1579 | |
1580 | return ret; |
1581 | } |
1582 | |
1583 | static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend, |
1584 | apple_nvme_resume); |
1585 | |
1586 | static const struct of_device_id apple_nvme_of_match[] = { |
1587 | { .compatible = "apple,nvme-ans2" }, |
1588 | {}, |
1589 | }; |
1590 | MODULE_DEVICE_TABLE(of, apple_nvme_of_match); |
1591 | |
1592 | static struct platform_driver apple_nvme_driver = { |
1593 | .driver = { |
1594 | .name = "nvme-apple" , |
1595 | .of_match_table = apple_nvme_of_match, |
1596 | .pm = pm_sleep_ptr(&apple_nvme_pm_ops), |
1597 | }, |
1598 | .probe = apple_nvme_probe, |
1599 | .remove_new = apple_nvme_remove, |
1600 | .shutdown = apple_nvme_shutdown, |
1601 | }; |
1602 | module_platform_driver(apple_nvme_driver); |
1603 | |
1604 | MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>" ); |
1605 | MODULE_LICENSE("GPL" ); |
1606 | |