1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (c) 2015, The Linux Foundation. All rights reserved. |
3 | */ |
4 | |
5 | #include <linux/delay.h> |
6 | #include <linux/highmem.h> |
7 | #include <linux/io.h> |
8 | #include <linux/iopoll.h> |
9 | #include <linux/module.h> |
10 | #include <linux/dma-mapping.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/scatterlist.h> |
13 | #include <linux/platform_device.h> |
14 | #include <linux/ktime.h> |
15 | |
16 | #include <linux/mmc/mmc.h> |
17 | #include <linux/mmc/host.h> |
18 | #include <linux/mmc/card.h> |
19 | |
20 | #include "cqhci.h" |
21 | #include "cqhci-crypto.h" |
22 | |
23 | #define DCMD_SLOT 31 |
24 | #define NUM_SLOTS 32 |
25 | |
26 | struct cqhci_slot { |
27 | struct mmc_request *mrq; |
28 | unsigned int flags; |
29 | #define CQHCI_EXTERNAL_TIMEOUT BIT(0) |
30 | #define CQHCI_COMPLETED BIT(1) |
31 | #define CQHCI_HOST_CRC BIT(2) |
32 | #define CQHCI_HOST_TIMEOUT BIT(3) |
33 | #define CQHCI_HOST_OTHER BIT(4) |
34 | }; |
35 | |
36 | static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag) |
37 | { |
38 | return cq_host->desc_base + (tag * cq_host->slot_sz); |
39 | } |
40 | |
41 | static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag) |
42 | { |
43 | u8 *desc = get_desc(cq_host, tag); |
44 | |
45 | return desc + cq_host->task_desc_len; |
46 | } |
47 | |
48 | static inline size_t get_trans_desc_offset(struct cqhci_host *cq_host, u8 tag) |
49 | { |
50 | return cq_host->trans_desc_len * cq_host->mmc->max_segs * tag; |
51 | } |
52 | |
53 | static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag) |
54 | { |
55 | size_t offset = get_trans_desc_offset(cq_host, tag); |
56 | |
57 | return cq_host->trans_desc_dma_base + offset; |
58 | } |
59 | |
60 | static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag) |
61 | { |
62 | size_t offset = get_trans_desc_offset(cq_host, tag); |
63 | |
64 | return cq_host->trans_desc_base + offset; |
65 | } |
66 | |
67 | static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag) |
68 | { |
69 | u8 *link_temp; |
70 | dma_addr_t trans_temp; |
71 | |
72 | link_temp = get_link_desc(cq_host, tag); |
73 | trans_temp = get_trans_desc_dma(cq_host, tag); |
74 | |
75 | memset(link_temp, 0, cq_host->link_desc_len); |
76 | if (cq_host->link_desc_len > 8) |
77 | *(link_temp + 8) = 0; |
78 | |
79 | if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) { |
80 | *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1); |
81 | return; |
82 | } |
83 | |
84 | *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0); |
85 | |
86 | if (cq_host->dma64) { |
87 | __le64 *data_addr = (__le64 __force *)(link_temp + 4); |
88 | |
89 | data_addr[0] = cpu_to_le64(trans_temp); |
90 | } else { |
91 | __le32 *data_addr = (__le32 __force *)(link_temp + 4); |
92 | |
93 | data_addr[0] = cpu_to_le32(trans_temp); |
94 | } |
95 | } |
96 | |
97 | static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set) |
98 | { |
99 | cqhci_writel(host: cq_host, val: set, CQHCI_ISTE); |
100 | cqhci_writel(host: cq_host, val: set, CQHCI_ISGE); |
101 | } |
102 | |
103 | #define DRV_NAME "cqhci" |
104 | |
105 | #define CQHCI_DUMP(f, x...) \ |
106 | pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x) |
107 | |
108 | static void cqhci_dumpregs(struct cqhci_host *cq_host) |
109 | { |
110 | struct mmc_host *mmc = cq_host->mmc; |
111 | |
112 | CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n" ); |
113 | |
114 | CQHCI_DUMP("Caps: 0x%08x | Version: 0x%08x\n" , |
115 | cqhci_readl(cq_host, CQHCI_CAP), |
116 | cqhci_readl(cq_host, CQHCI_VER)); |
117 | CQHCI_DUMP("Config: 0x%08x | Control: 0x%08x\n" , |
118 | cqhci_readl(cq_host, CQHCI_CFG), |
119 | cqhci_readl(cq_host, CQHCI_CTL)); |
120 | CQHCI_DUMP("Int stat: 0x%08x | Int enab: 0x%08x\n" , |
121 | cqhci_readl(cq_host, CQHCI_IS), |
122 | cqhci_readl(cq_host, CQHCI_ISTE)); |
123 | CQHCI_DUMP("Int sig: 0x%08x | Int Coal: 0x%08x\n" , |
124 | cqhci_readl(cq_host, CQHCI_ISGE), |
125 | cqhci_readl(cq_host, CQHCI_IC)); |
126 | CQHCI_DUMP("TDL base: 0x%08x | TDL up32: 0x%08x\n" , |
127 | cqhci_readl(cq_host, CQHCI_TDLBA), |
128 | cqhci_readl(cq_host, CQHCI_TDLBAU)); |
129 | CQHCI_DUMP("Doorbell: 0x%08x | TCN: 0x%08x\n" , |
130 | cqhci_readl(cq_host, CQHCI_TDBR), |
131 | cqhci_readl(cq_host, CQHCI_TCN)); |
132 | CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n" , |
133 | cqhci_readl(cq_host, CQHCI_DQS), |
134 | cqhci_readl(cq_host, CQHCI_DPT)); |
135 | CQHCI_DUMP("Task clr: 0x%08x | SSC1: 0x%08x\n" , |
136 | cqhci_readl(cq_host, CQHCI_TCLR), |
137 | cqhci_readl(cq_host, CQHCI_SSC1)); |
138 | CQHCI_DUMP("SSC2: 0x%08x | DCMD rsp: 0x%08x\n" , |
139 | cqhci_readl(cq_host, CQHCI_SSC2), |
140 | cqhci_readl(cq_host, CQHCI_CRDCT)); |
141 | CQHCI_DUMP("RED mask: 0x%08x | TERRI: 0x%08x\n" , |
142 | cqhci_readl(cq_host, CQHCI_RMEM), |
143 | cqhci_readl(cq_host, CQHCI_TERRI)); |
144 | CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n" , |
145 | cqhci_readl(cq_host, CQHCI_CRI), |
146 | cqhci_readl(cq_host, CQHCI_CRA)); |
147 | |
148 | if (cq_host->ops->dumpregs) |
149 | cq_host->ops->dumpregs(mmc); |
150 | else |
151 | CQHCI_DUMP(": ===========================================\n" ); |
152 | } |
153 | |
154 | /* |
155 | * The allocated descriptor table for task, link & transfer descriptors |
156 | * looks like: |
157 | * |----------| |
158 | * |task desc | |->|----------| |
159 | * |----------| | |trans desc| |
160 | * |link desc-|->| |----------| |
161 | * |----------| . |
162 | * . . |
163 | * no. of slots max-segs |
164 | * . |----------| |
165 | * |----------| |
166 | * The idea here is to create the [task+trans] table and mark & point the |
167 | * link desc to the transfer desc table on a per slot basis. |
168 | */ |
169 | static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) |
170 | { |
171 | int i = 0; |
172 | |
173 | /* task descriptor can be 64/128 bit irrespective of arch */ |
174 | if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) { |
175 | cqhci_writel(host: cq_host, val: cqhci_readl(host: cq_host, CQHCI_CFG) | |
176 | CQHCI_TASK_DESC_SZ, CQHCI_CFG); |
177 | cq_host->task_desc_len = 16; |
178 | } else { |
179 | cq_host->task_desc_len = 8; |
180 | } |
181 | |
182 | /* |
183 | * 96 bits length of transfer desc instead of 128 bits which means |
184 | * ADMA would expect next valid descriptor at the 96th bit |
185 | * or 128th bit |
186 | */ |
187 | if (cq_host->dma64) { |
188 | if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ) |
189 | cq_host->trans_desc_len = 12; |
190 | else |
191 | cq_host->trans_desc_len = 16; |
192 | cq_host->link_desc_len = 16; |
193 | } else { |
194 | cq_host->trans_desc_len = 8; |
195 | cq_host->link_desc_len = 8; |
196 | } |
197 | |
198 | /* total size of a slot: 1 task & 1 transfer (link) */ |
199 | cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len; |
200 | |
201 | cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots; |
202 | |
203 | cq_host->data_size = get_trans_desc_offset(cq_host, tag: cq_host->mmc->cqe_qdepth); |
204 | |
205 | pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n" , |
206 | mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size, |
207 | cq_host->slot_sz); |
208 | |
209 | /* |
210 | * allocate a dma-mapped chunk of memory for the descriptors |
211 | * allocate a dma-mapped chunk of memory for link descriptors |
212 | * setup each link-desc memory offset per slot-number to |
213 | * the descriptor table. |
214 | */ |
215 | cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), |
216 | size: cq_host->desc_size, |
217 | dma_handle: &cq_host->desc_dma_base, |
218 | GFP_KERNEL); |
219 | if (!cq_host->desc_base) |
220 | return -ENOMEM; |
221 | |
222 | cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), |
223 | size: cq_host->data_size, |
224 | dma_handle: &cq_host->trans_desc_dma_base, |
225 | GFP_KERNEL); |
226 | if (!cq_host->trans_desc_base) { |
227 | dmam_free_coherent(mmc_dev(cq_host->mmc), size: cq_host->desc_size, |
228 | vaddr: cq_host->desc_base, |
229 | dma_handle: cq_host->desc_dma_base); |
230 | cq_host->desc_base = NULL; |
231 | cq_host->desc_dma_base = 0; |
232 | return -ENOMEM; |
233 | } |
234 | |
235 | pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n" , |
236 | mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base, |
237 | (unsigned long long)cq_host->desc_dma_base, |
238 | (unsigned long long)cq_host->trans_desc_dma_base); |
239 | |
240 | for (; i < (cq_host->num_slots); i++) |
241 | setup_trans_desc(cq_host, tag: i); |
242 | |
243 | return 0; |
244 | } |
245 | |
246 | static void __cqhci_enable(struct cqhci_host *cq_host) |
247 | { |
248 | struct mmc_host *mmc = cq_host->mmc; |
249 | u32 cqcfg; |
250 | |
251 | cqcfg = cqhci_readl(host: cq_host, CQHCI_CFG); |
252 | |
253 | /* Configuration must not be changed while enabled */ |
254 | if (cqcfg & CQHCI_ENABLE) { |
255 | cqcfg &= ~CQHCI_ENABLE; |
256 | cqhci_writel(host: cq_host, val: cqcfg, CQHCI_CFG); |
257 | } |
258 | |
259 | cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ); |
260 | |
261 | if (mmc->caps2 & MMC_CAP2_CQE_DCMD) |
262 | cqcfg |= CQHCI_DCMD; |
263 | |
264 | if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) |
265 | cqcfg |= CQHCI_TASK_DESC_SZ; |
266 | |
267 | if (mmc->caps2 & MMC_CAP2_CRYPTO) |
268 | cqcfg |= CQHCI_CRYPTO_GENERAL_ENABLE; |
269 | |
270 | cqhci_writel(host: cq_host, val: cqcfg, CQHCI_CFG); |
271 | |
272 | cqhci_writel(host: cq_host, lower_32_bits(cq_host->desc_dma_base), |
273 | CQHCI_TDLBA); |
274 | cqhci_writel(host: cq_host, upper_32_bits(cq_host->desc_dma_base), |
275 | CQHCI_TDLBAU); |
276 | |
277 | cqhci_writel(host: cq_host, val: cq_host->rca, CQHCI_SSC2); |
278 | |
279 | cqhci_set_irqs(cq_host, set: 0); |
280 | |
281 | cqcfg |= CQHCI_ENABLE; |
282 | |
283 | cqhci_writel(host: cq_host, val: cqcfg, CQHCI_CFG); |
284 | |
285 | if (cqhci_readl(host: cq_host, CQHCI_CTL) & CQHCI_HALT) |
286 | cqhci_writel(host: cq_host, val: 0, CQHCI_CTL); |
287 | |
288 | mmc->cqe_on = true; |
289 | |
290 | if (cq_host->ops->enable) |
291 | cq_host->ops->enable(mmc); |
292 | |
293 | /* Ensure all writes are done before interrupts are enabled */ |
294 | wmb(); |
295 | |
296 | cqhci_set_irqs(cq_host, CQHCI_IS_MASK); |
297 | |
298 | cq_host->activated = true; |
299 | } |
300 | |
301 | static void __cqhci_disable(struct cqhci_host *cq_host) |
302 | { |
303 | u32 cqcfg; |
304 | |
305 | cqcfg = cqhci_readl(host: cq_host, CQHCI_CFG); |
306 | cqcfg &= ~CQHCI_ENABLE; |
307 | cqhci_writel(host: cq_host, val: cqcfg, CQHCI_CFG); |
308 | |
309 | cq_host->mmc->cqe_on = false; |
310 | |
311 | cq_host->activated = false; |
312 | } |
313 | |
314 | int cqhci_deactivate(struct mmc_host *mmc) |
315 | { |
316 | struct cqhci_host *cq_host = mmc->cqe_private; |
317 | |
318 | if (cq_host->enabled && cq_host->activated) |
319 | __cqhci_disable(cq_host); |
320 | |
321 | return 0; |
322 | } |
323 | EXPORT_SYMBOL(cqhci_deactivate); |
324 | |
325 | int cqhci_resume(struct mmc_host *mmc) |
326 | { |
327 | /* Re-enable is done upon first request */ |
328 | return 0; |
329 | } |
330 | EXPORT_SYMBOL(cqhci_resume); |
331 | |
332 | static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card) |
333 | { |
334 | struct cqhci_host *cq_host = mmc->cqe_private; |
335 | int err; |
336 | |
337 | if (!card->ext_csd.cmdq_en) |
338 | return -EINVAL; |
339 | |
340 | if (cq_host->enabled) |
341 | return 0; |
342 | |
343 | cq_host->rca = card->rca; |
344 | |
345 | err = cqhci_host_alloc_tdl(cq_host); |
346 | if (err) { |
347 | pr_err("%s: Failed to enable CQE, error %d\n" , |
348 | mmc_hostname(mmc), err); |
349 | return err; |
350 | } |
351 | |
352 | __cqhci_enable(cq_host); |
353 | |
354 | cq_host->enabled = true; |
355 | |
356 | #ifdef DEBUG |
357 | cqhci_dumpregs(cq_host); |
358 | #endif |
359 | return 0; |
360 | } |
361 | |
362 | /* CQHCI is idle and should halt immediately, so set a small timeout */ |
363 | #define CQHCI_OFF_TIMEOUT 100 |
364 | |
365 | static u32 cqhci_read_ctl(struct cqhci_host *cq_host) |
366 | { |
367 | return cqhci_readl(host: cq_host, CQHCI_CTL); |
368 | } |
369 | |
370 | static void cqhci_off(struct mmc_host *mmc) |
371 | { |
372 | struct cqhci_host *cq_host = mmc->cqe_private; |
373 | u32 reg; |
374 | int err; |
375 | |
376 | if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt) |
377 | return; |
378 | |
379 | if (cq_host->ops->disable) |
380 | cq_host->ops->disable(mmc, false); |
381 | |
382 | cqhci_writel(host: cq_host, CQHCI_HALT, CQHCI_CTL); |
383 | |
384 | err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg, |
385 | reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT); |
386 | if (err < 0) |
387 | pr_err("%s: cqhci: CQE stuck on\n" , mmc_hostname(mmc)); |
388 | else |
389 | pr_debug("%s: cqhci: CQE off\n" , mmc_hostname(mmc)); |
390 | |
391 | if (cq_host->ops->post_disable) |
392 | cq_host->ops->post_disable(mmc); |
393 | |
394 | mmc->cqe_on = false; |
395 | } |
396 | |
397 | static void cqhci_disable(struct mmc_host *mmc) |
398 | { |
399 | struct cqhci_host *cq_host = mmc->cqe_private; |
400 | |
401 | if (!cq_host->enabled) |
402 | return; |
403 | |
404 | cqhci_off(mmc); |
405 | |
406 | __cqhci_disable(cq_host); |
407 | |
408 | dmam_free_coherent(mmc_dev(mmc), size: cq_host->data_size, |
409 | vaddr: cq_host->trans_desc_base, |
410 | dma_handle: cq_host->trans_desc_dma_base); |
411 | |
412 | dmam_free_coherent(mmc_dev(mmc), size: cq_host->desc_size, |
413 | vaddr: cq_host->desc_base, |
414 | dma_handle: cq_host->desc_dma_base); |
415 | |
416 | cq_host->trans_desc_base = NULL; |
417 | cq_host->desc_base = NULL; |
418 | |
419 | cq_host->enabled = false; |
420 | } |
421 | |
422 | static void cqhci_prep_task_desc(struct mmc_request *mrq, |
423 | struct cqhci_host *cq_host, int tag) |
424 | { |
425 | __le64 *task_desc = (__le64 __force *)get_desc(cq_host, tag); |
426 | u32 req_flags = mrq->data->flags; |
427 | u64 desc0; |
428 | |
429 | desc0 = CQHCI_VALID(1) | |
430 | CQHCI_END(1) | |
431 | CQHCI_INT(1) | |
432 | CQHCI_ACT(0x5) | |
433 | CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) | |
434 | CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) | |
435 | CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) | |
436 | CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) | |
437 | CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) | |
438 | CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) | |
439 | CQHCI_BLK_COUNT(mrq->data->blocks) | |
440 | CQHCI_BLK_ADDR((u64)mrq->data->blk_addr); |
441 | |
442 | task_desc[0] = cpu_to_le64(desc0); |
443 | |
444 | if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) { |
445 | u64 desc1 = cqhci_crypto_prep_task_desc(mrq); |
446 | |
447 | task_desc[1] = cpu_to_le64(desc1); |
448 | |
449 | pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx%016llx\n" , |
450 | mmc_hostname(mrq->host), mrq->tag, desc1, desc0); |
451 | } else { |
452 | pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n" , |
453 | mmc_hostname(mrq->host), mrq->tag, desc0); |
454 | } |
455 | } |
456 | |
457 | static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq) |
458 | { |
459 | int sg_count; |
460 | struct mmc_data *data = mrq->data; |
461 | |
462 | if (!data) |
463 | return -EINVAL; |
464 | |
465 | sg_count = dma_map_sg(mmc_dev(host), data->sg, |
466 | data->sg_len, |
467 | (data->flags & MMC_DATA_WRITE) ? |
468 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
469 | if (!sg_count) { |
470 | pr_err("%s: sg-len: %d\n" , __func__, data->sg_len); |
471 | return -ENOMEM; |
472 | } |
473 | |
474 | return sg_count; |
475 | } |
476 | |
477 | static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end, |
478 | bool dma64) |
479 | { |
480 | __le32 *attr = (__le32 __force *)desc; |
481 | |
482 | *attr = (CQHCI_VALID(1) | |
483 | CQHCI_END(end ? 1 : 0) | |
484 | CQHCI_INT(0) | |
485 | CQHCI_ACT(0x4) | |
486 | CQHCI_DAT_LENGTH(len)); |
487 | |
488 | if (dma64) { |
489 | __le64 *dataddr = (__le64 __force *)(desc + 4); |
490 | |
491 | dataddr[0] = cpu_to_le64(addr); |
492 | } else { |
493 | __le32 *dataddr = (__le32 __force *)(desc + 4); |
494 | |
495 | dataddr[0] = cpu_to_le32(addr); |
496 | } |
497 | } |
498 | |
499 | static int cqhci_prep_tran_desc(struct mmc_request *mrq, |
500 | struct cqhci_host *cq_host, int tag) |
501 | { |
502 | struct mmc_data *data = mrq->data; |
503 | int i, sg_count, len; |
504 | bool end = false; |
505 | bool dma64 = cq_host->dma64; |
506 | dma_addr_t addr; |
507 | u8 *desc; |
508 | struct scatterlist *sg; |
509 | |
510 | sg_count = cqhci_dma_map(host: mrq->host, mrq); |
511 | if (sg_count < 0) { |
512 | pr_err("%s: %s: unable to map sg lists, %d\n" , |
513 | mmc_hostname(mrq->host), __func__, sg_count); |
514 | return sg_count; |
515 | } |
516 | |
517 | desc = get_trans_desc(cq_host, tag); |
518 | |
519 | for_each_sg(data->sg, sg, sg_count, i) { |
520 | addr = sg_dma_address(sg); |
521 | len = sg_dma_len(sg); |
522 | |
523 | if ((i+1) == sg_count) |
524 | end = true; |
525 | cqhci_set_tran_desc(desc, addr, len, end, dma64); |
526 | desc += cq_host->trans_desc_len; |
527 | } |
528 | |
529 | return 0; |
530 | } |
531 | |
532 | static void cqhci_prep_dcmd_desc(struct mmc_host *mmc, |
533 | struct mmc_request *mrq) |
534 | { |
535 | u64 *task_desc = NULL; |
536 | u64 data = 0; |
537 | u8 resp_type; |
538 | u8 *desc; |
539 | __le64 *dataddr; |
540 | struct cqhci_host *cq_host = mmc->cqe_private; |
541 | u8 timing; |
542 | |
543 | if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) { |
544 | resp_type = 0x0; |
545 | timing = 0x1; |
546 | } else { |
547 | if (mrq->cmd->flags & MMC_RSP_R1B) { |
548 | resp_type = 0x3; |
549 | timing = 0x0; |
550 | } else { |
551 | resp_type = 0x2; |
552 | timing = 0x1; |
553 | } |
554 | } |
555 | |
556 | task_desc = (__le64 __force *)get_desc(cq_host, tag: cq_host->dcmd_slot); |
557 | memset(task_desc, 0, cq_host->task_desc_len); |
558 | data |= (CQHCI_VALID(1) | |
559 | CQHCI_END(1) | |
560 | CQHCI_INT(1) | |
561 | CQHCI_QBAR(1) | |
562 | CQHCI_ACT(0x5) | |
563 | CQHCI_CMD_INDEX(mrq->cmd->opcode) | |
564 | CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type)); |
565 | if (cq_host->ops->update_dcmd_desc) |
566 | cq_host->ops->update_dcmd_desc(mmc, mrq, &data); |
567 | *task_desc |= data; |
568 | desc = (u8 *)task_desc; |
569 | pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n" , |
570 | mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type); |
571 | dataddr = (__le64 __force *)(desc + 4); |
572 | dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg); |
573 | |
574 | } |
575 | |
576 | static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq) |
577 | { |
578 | struct mmc_data *data = mrq->data; |
579 | |
580 | if (data) { |
581 | dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len, |
582 | (data->flags & MMC_DATA_READ) ? |
583 | DMA_FROM_DEVICE : DMA_TO_DEVICE); |
584 | } |
585 | } |
586 | |
587 | static inline int cqhci_tag(struct mmc_request *mrq) |
588 | { |
589 | return mrq->cmd ? DCMD_SLOT : mrq->tag; |
590 | } |
591 | |
592 | static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq) |
593 | { |
594 | int err = 0; |
595 | int tag = cqhci_tag(mrq); |
596 | struct cqhci_host *cq_host = mmc->cqe_private; |
597 | unsigned long flags; |
598 | |
599 | if (!cq_host->enabled) { |
600 | pr_err("%s: cqhci: not enabled\n" , mmc_hostname(mmc)); |
601 | return -EINVAL; |
602 | } |
603 | |
604 | /* First request after resume has to re-enable */ |
605 | if (!cq_host->activated) |
606 | __cqhci_enable(cq_host); |
607 | |
608 | if (!mmc->cqe_on) { |
609 | if (cq_host->ops->pre_enable) |
610 | cq_host->ops->pre_enable(mmc); |
611 | |
612 | cqhci_writel(host: cq_host, val: 0, CQHCI_CTL); |
613 | mmc->cqe_on = true; |
614 | pr_debug("%s: cqhci: CQE on\n" , mmc_hostname(mmc)); |
615 | if (cqhci_readl(host: cq_host, CQHCI_CTL) && CQHCI_HALT) { |
616 | pr_err("%s: cqhci: CQE failed to exit halt state\n" , |
617 | mmc_hostname(mmc)); |
618 | } |
619 | if (cq_host->ops->enable) |
620 | cq_host->ops->enable(mmc); |
621 | } |
622 | |
623 | if (mrq->data) { |
624 | cqhci_prep_task_desc(mrq, cq_host, tag); |
625 | |
626 | err = cqhci_prep_tran_desc(mrq, cq_host, tag); |
627 | if (err) { |
628 | pr_err("%s: cqhci: failed to setup tx desc: %d\n" , |
629 | mmc_hostname(mmc), err); |
630 | return err; |
631 | } |
632 | } else { |
633 | cqhci_prep_dcmd_desc(mmc, mrq); |
634 | } |
635 | |
636 | spin_lock_irqsave(&cq_host->lock, flags); |
637 | |
638 | if (cq_host->recovery_halt) { |
639 | err = -EBUSY; |
640 | goto out_unlock; |
641 | } |
642 | |
643 | cq_host->slot[tag].mrq = mrq; |
644 | cq_host->slot[tag].flags = 0; |
645 | |
646 | cq_host->qcnt += 1; |
647 | /* Make sure descriptors are ready before ringing the doorbell */ |
648 | wmb(); |
649 | cqhci_writel(host: cq_host, val: 1 << tag, CQHCI_TDBR); |
650 | if (!(cqhci_readl(host: cq_host, CQHCI_TDBR) & (1 << tag))) |
651 | pr_debug("%s: cqhci: doorbell not set for tag %d\n" , |
652 | mmc_hostname(mmc), tag); |
653 | out_unlock: |
654 | spin_unlock_irqrestore(lock: &cq_host->lock, flags); |
655 | |
656 | if (err) |
657 | cqhci_post_req(host: mmc, mrq); |
658 | |
659 | return err; |
660 | } |
661 | |
662 | static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq, |
663 | bool notify) |
664 | { |
665 | struct cqhci_host *cq_host = mmc->cqe_private; |
666 | |
667 | if (!cq_host->recovery_halt) { |
668 | cq_host->recovery_halt = true; |
669 | pr_debug("%s: cqhci: recovery needed\n" , mmc_hostname(mmc)); |
670 | wake_up(&cq_host->wait_queue); |
671 | if (notify && mrq->recovery_notifier) |
672 | mrq->recovery_notifier(mrq); |
673 | } |
674 | } |
675 | |
676 | static unsigned int cqhci_error_flags(int error1, int error2) |
677 | { |
678 | int error = error1 ? error1 : error2; |
679 | |
680 | switch (error) { |
681 | case -EILSEQ: |
682 | return CQHCI_HOST_CRC; |
683 | case -ETIMEDOUT: |
684 | return CQHCI_HOST_TIMEOUT; |
685 | default: |
686 | return CQHCI_HOST_OTHER; |
687 | } |
688 | } |
689 | |
690 | static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error, |
691 | int data_error) |
692 | { |
693 | struct cqhci_host *cq_host = mmc->cqe_private; |
694 | struct cqhci_slot *slot; |
695 | u32 terri; |
696 | u32 tdpe; |
697 | int tag; |
698 | |
699 | spin_lock(lock: &cq_host->lock); |
700 | |
701 | terri = cqhci_readl(host: cq_host, CQHCI_TERRI); |
702 | |
703 | pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n" , |
704 | mmc_hostname(mmc), status, cmd_error, data_error, terri); |
705 | |
706 | /* Forget about errors when recovery has already been triggered */ |
707 | if (cq_host->recovery_halt) |
708 | goto out_unlock; |
709 | |
710 | if (!cq_host->qcnt) { |
711 | WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n" , |
712 | mmc_hostname(mmc), status, cmd_error, data_error, |
713 | terri); |
714 | goto out_unlock; |
715 | } |
716 | |
717 | if (CQHCI_TERRI_C_VALID(terri)) { |
718 | tag = CQHCI_TERRI_C_TASK(terri); |
719 | slot = &cq_host->slot[tag]; |
720 | if (slot->mrq) { |
721 | slot->flags = cqhci_error_flags(error1: cmd_error, error2: data_error); |
722 | cqhci_recovery_needed(mmc, mrq: slot->mrq, notify: true); |
723 | } |
724 | } |
725 | |
726 | if (CQHCI_TERRI_D_VALID(terri)) { |
727 | tag = CQHCI_TERRI_D_TASK(terri); |
728 | slot = &cq_host->slot[tag]; |
729 | if (slot->mrq) { |
730 | slot->flags = cqhci_error_flags(error1: data_error, error2: cmd_error); |
731 | cqhci_recovery_needed(mmc, mrq: slot->mrq, notify: true); |
732 | } |
733 | } |
734 | |
735 | /* |
736 | * Handle ICCE ("Invalid Crypto Configuration Error"). This should |
737 | * never happen, since the block layer ensures that all crypto-enabled |
738 | * I/O requests have a valid keyslot before they reach the driver. |
739 | * |
740 | * Note that GCE ("General Crypto Error") is different; it already got |
741 | * handled above by checking TERRI. |
742 | */ |
743 | if (status & CQHCI_IS_ICCE) { |
744 | tdpe = cqhci_readl(host: cq_host, CQHCI_TDPE); |
745 | WARN_ONCE(1, |
746 | "%s: cqhci: invalid crypto configuration error. IRQ status: 0x%08x TDPE: 0x%08x\n" , |
747 | mmc_hostname(mmc), status, tdpe); |
748 | while (tdpe != 0) { |
749 | tag = __ffs(tdpe); |
750 | tdpe &= ~(1 << tag); |
751 | slot = &cq_host->slot[tag]; |
752 | if (!slot->mrq) |
753 | continue; |
754 | slot->flags = cqhci_error_flags(error1: data_error, error2: cmd_error); |
755 | cqhci_recovery_needed(mmc, mrq: slot->mrq, notify: true); |
756 | } |
757 | } |
758 | |
759 | if (!cq_host->recovery_halt) { |
760 | /* |
761 | * The only way to guarantee forward progress is to mark at |
762 | * least one task in error, so if none is indicated, pick one. |
763 | */ |
764 | for (tag = 0; tag < NUM_SLOTS; tag++) { |
765 | slot = &cq_host->slot[tag]; |
766 | if (!slot->mrq) |
767 | continue; |
768 | slot->flags = cqhci_error_flags(error1: data_error, error2: cmd_error); |
769 | cqhci_recovery_needed(mmc, mrq: slot->mrq, notify: true); |
770 | break; |
771 | } |
772 | } |
773 | |
774 | out_unlock: |
775 | spin_unlock(lock: &cq_host->lock); |
776 | } |
777 | |
778 | static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag) |
779 | { |
780 | struct cqhci_host *cq_host = mmc->cqe_private; |
781 | struct cqhci_slot *slot = &cq_host->slot[tag]; |
782 | struct mmc_request *mrq = slot->mrq; |
783 | struct mmc_data *data; |
784 | |
785 | if (!mrq) { |
786 | WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n" , |
787 | mmc_hostname(mmc), tag); |
788 | return; |
789 | } |
790 | |
791 | /* No completions allowed during recovery */ |
792 | if (cq_host->recovery_halt) { |
793 | slot->flags |= CQHCI_COMPLETED; |
794 | return; |
795 | } |
796 | |
797 | slot->mrq = NULL; |
798 | |
799 | cq_host->qcnt -= 1; |
800 | |
801 | data = mrq->data; |
802 | if (data) { |
803 | if (data->error) |
804 | data->bytes_xfered = 0; |
805 | else |
806 | data->bytes_xfered = data->blksz * data->blocks; |
807 | } |
808 | |
809 | mmc_cqe_request_done(host: mmc, mrq); |
810 | } |
811 | |
812 | irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error, |
813 | int data_error) |
814 | { |
815 | u32 status; |
816 | unsigned long tag = 0, comp_status; |
817 | struct cqhci_host *cq_host = mmc->cqe_private; |
818 | |
819 | status = cqhci_readl(host: cq_host, CQHCI_IS); |
820 | cqhci_writel(host: cq_host, val: status, CQHCI_IS); |
821 | |
822 | pr_debug("%s: cqhci: IRQ status: 0x%08x\n" , mmc_hostname(mmc), status); |
823 | |
824 | if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) || |
825 | cmd_error || data_error) { |
826 | if (status & CQHCI_IS_RED) |
827 | mmc_debugfs_err_stats_inc(host: mmc, stat: MMC_ERR_CMDQ_RED); |
828 | if (status & CQHCI_IS_GCE) |
829 | mmc_debugfs_err_stats_inc(host: mmc, stat: MMC_ERR_CMDQ_GCE); |
830 | if (status & CQHCI_IS_ICCE) |
831 | mmc_debugfs_err_stats_inc(host: mmc, stat: MMC_ERR_CMDQ_ICCE); |
832 | cqhci_error_irq(mmc, status, cmd_error, data_error); |
833 | } |
834 | |
835 | if (status & CQHCI_IS_TCC) { |
836 | /* read TCN and complete the request */ |
837 | comp_status = cqhci_readl(host: cq_host, CQHCI_TCN); |
838 | cqhci_writel(host: cq_host, val: comp_status, CQHCI_TCN); |
839 | pr_debug("%s: cqhci: TCN: 0x%08lx\n" , |
840 | mmc_hostname(mmc), comp_status); |
841 | |
842 | spin_lock(lock: &cq_host->lock); |
843 | |
844 | for_each_set_bit(tag, &comp_status, cq_host->num_slots) { |
845 | /* complete the corresponding mrq */ |
846 | pr_debug("%s: cqhci: completing tag %lu\n" , |
847 | mmc_hostname(mmc), tag); |
848 | cqhci_finish_mrq(mmc, tag); |
849 | } |
850 | |
851 | if (cq_host->waiting_for_idle && !cq_host->qcnt) { |
852 | cq_host->waiting_for_idle = false; |
853 | wake_up(&cq_host->wait_queue); |
854 | } |
855 | |
856 | spin_unlock(lock: &cq_host->lock); |
857 | } |
858 | |
859 | if (status & CQHCI_IS_TCL) |
860 | wake_up(&cq_host->wait_queue); |
861 | |
862 | if (status & CQHCI_IS_HAC) |
863 | wake_up(&cq_host->wait_queue); |
864 | |
865 | return IRQ_HANDLED; |
866 | } |
867 | EXPORT_SYMBOL(cqhci_irq); |
868 | |
869 | static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret) |
870 | { |
871 | unsigned long flags; |
872 | bool is_idle; |
873 | |
874 | spin_lock_irqsave(&cq_host->lock, flags); |
875 | is_idle = !cq_host->qcnt || cq_host->recovery_halt; |
876 | *ret = cq_host->recovery_halt ? -EBUSY : 0; |
877 | cq_host->waiting_for_idle = !is_idle; |
878 | spin_unlock_irqrestore(lock: &cq_host->lock, flags); |
879 | |
880 | return is_idle; |
881 | } |
882 | |
883 | static int cqhci_wait_for_idle(struct mmc_host *mmc) |
884 | { |
885 | struct cqhci_host *cq_host = mmc->cqe_private; |
886 | int ret; |
887 | |
888 | wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret)); |
889 | |
890 | return ret; |
891 | } |
892 | |
893 | static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq, |
894 | bool *recovery_needed) |
895 | { |
896 | struct cqhci_host *cq_host = mmc->cqe_private; |
897 | int tag = cqhci_tag(mrq); |
898 | struct cqhci_slot *slot = &cq_host->slot[tag]; |
899 | unsigned long flags; |
900 | bool timed_out; |
901 | |
902 | spin_lock_irqsave(&cq_host->lock, flags); |
903 | timed_out = slot->mrq == mrq; |
904 | if (timed_out) { |
905 | slot->flags |= CQHCI_EXTERNAL_TIMEOUT; |
906 | cqhci_recovery_needed(mmc, mrq, notify: false); |
907 | *recovery_needed = cq_host->recovery_halt; |
908 | } |
909 | spin_unlock_irqrestore(lock: &cq_host->lock, flags); |
910 | |
911 | if (timed_out) { |
912 | pr_err("%s: cqhci: timeout for tag %d, qcnt %d\n" , |
913 | mmc_hostname(mmc), tag, cq_host->qcnt); |
914 | cqhci_dumpregs(cq_host); |
915 | } |
916 | |
917 | return timed_out; |
918 | } |
919 | |
920 | static bool cqhci_tasks_cleared(struct cqhci_host *cq_host) |
921 | { |
922 | return !(cqhci_readl(host: cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS); |
923 | } |
924 | |
925 | static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout) |
926 | { |
927 | struct cqhci_host *cq_host = mmc->cqe_private; |
928 | bool ret; |
929 | u32 ctl; |
930 | |
931 | cqhci_set_irqs(cq_host, CQHCI_IS_TCL); |
932 | |
933 | ctl = cqhci_readl(host: cq_host, CQHCI_CTL); |
934 | ctl |= CQHCI_CLEAR_ALL_TASKS; |
935 | cqhci_writel(host: cq_host, val: ctl, CQHCI_CTL); |
936 | |
937 | wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host), |
938 | msecs_to_jiffies(timeout) + 1); |
939 | |
940 | cqhci_set_irqs(cq_host, set: 0); |
941 | |
942 | ret = cqhci_tasks_cleared(cq_host); |
943 | |
944 | if (!ret) |
945 | pr_warn("%s: cqhci: Failed to clear tasks\n" , |
946 | mmc_hostname(mmc)); |
947 | |
948 | return ret; |
949 | } |
950 | |
951 | static bool cqhci_halted(struct cqhci_host *cq_host) |
952 | { |
953 | return cqhci_readl(host: cq_host, CQHCI_CTL) & CQHCI_HALT; |
954 | } |
955 | |
956 | static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout) |
957 | { |
958 | struct cqhci_host *cq_host = mmc->cqe_private; |
959 | bool ret; |
960 | u32 ctl; |
961 | |
962 | if (cqhci_halted(cq_host)) |
963 | return true; |
964 | |
965 | cqhci_set_irqs(cq_host, CQHCI_IS_HAC); |
966 | |
967 | ctl = cqhci_readl(host: cq_host, CQHCI_CTL); |
968 | ctl |= CQHCI_HALT; |
969 | cqhci_writel(host: cq_host, val: ctl, CQHCI_CTL); |
970 | |
971 | wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host), |
972 | msecs_to_jiffies(timeout) + 1); |
973 | |
974 | cqhci_set_irqs(cq_host, set: 0); |
975 | |
976 | ret = cqhci_halted(cq_host); |
977 | |
978 | if (!ret) |
979 | pr_warn("%s: cqhci: Failed to halt\n" , mmc_hostname(mmc)); |
980 | |
981 | return ret; |
982 | } |
983 | |
984 | /* |
985 | * After halting we expect to be able to use the command line. We interpret the |
986 | * failure to halt to mean the data lines might still be in use (and the upper |
987 | * layers will need to send a STOP command), however failing to halt complicates |
988 | * the recovery, so set a timeout that would reasonably allow I/O to complete. |
989 | */ |
990 | #define CQHCI_START_HALT_TIMEOUT 500 |
991 | |
992 | static void cqhci_recovery_start(struct mmc_host *mmc) |
993 | { |
994 | struct cqhci_host *cq_host = mmc->cqe_private; |
995 | |
996 | pr_debug("%s: cqhci: %s\n" , mmc_hostname(mmc), __func__); |
997 | |
998 | WARN_ON(!cq_host->recovery_halt); |
999 | |
1000 | cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT); |
1001 | |
1002 | if (cq_host->ops->disable) |
1003 | cq_host->ops->disable(mmc, true); |
1004 | |
1005 | mmc->cqe_on = false; |
1006 | } |
1007 | |
1008 | static int cqhci_error_from_flags(unsigned int flags) |
1009 | { |
1010 | if (!flags) |
1011 | return 0; |
1012 | |
1013 | /* CRC errors might indicate re-tuning so prefer to report that */ |
1014 | if (flags & CQHCI_HOST_CRC) |
1015 | return -EILSEQ; |
1016 | |
1017 | if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT)) |
1018 | return -ETIMEDOUT; |
1019 | |
1020 | return -EIO; |
1021 | } |
1022 | |
1023 | static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag) |
1024 | { |
1025 | struct cqhci_slot *slot = &cq_host->slot[tag]; |
1026 | struct mmc_request *mrq = slot->mrq; |
1027 | struct mmc_data *data; |
1028 | |
1029 | if (!mrq) |
1030 | return; |
1031 | |
1032 | slot->mrq = NULL; |
1033 | |
1034 | cq_host->qcnt -= 1; |
1035 | |
1036 | data = mrq->data; |
1037 | if (data) { |
1038 | data->bytes_xfered = 0; |
1039 | data->error = cqhci_error_from_flags(flags: slot->flags); |
1040 | } else { |
1041 | mrq->cmd->error = cqhci_error_from_flags(flags: slot->flags); |
1042 | } |
1043 | |
1044 | mmc_cqe_request_done(host: cq_host->mmc, mrq); |
1045 | } |
1046 | |
1047 | static void cqhci_recover_mrqs(struct cqhci_host *cq_host) |
1048 | { |
1049 | int i; |
1050 | |
1051 | for (i = 0; i < cq_host->num_slots; i++) |
1052 | cqhci_recover_mrq(cq_host, tag: i); |
1053 | } |
1054 | |
1055 | /* |
1056 | * By now the command and data lines should be unused so there is no reason for |
1057 | * CQHCI to take a long time to halt, but if it doesn't halt there could be |
1058 | * problems clearing tasks, so be generous. |
1059 | */ |
1060 | #define CQHCI_FINISH_HALT_TIMEOUT 20 |
1061 | |
1062 | /* CQHCI could be expected to clear it's internal state pretty quickly */ |
1063 | #define CQHCI_CLEAR_TIMEOUT 20 |
1064 | |
1065 | static void cqhci_recovery_finish(struct mmc_host *mmc) |
1066 | { |
1067 | struct cqhci_host *cq_host = mmc->cqe_private; |
1068 | unsigned long flags; |
1069 | u32 cqcfg; |
1070 | bool ok; |
1071 | |
1072 | pr_debug("%s: cqhci: %s\n" , mmc_hostname(mmc), __func__); |
1073 | |
1074 | WARN_ON(!cq_host->recovery_halt); |
1075 | |
1076 | ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); |
1077 | |
1078 | /* |
1079 | * The specification contradicts itself, by saying that tasks cannot be |
1080 | * cleared if CQHCI does not halt, but if CQHCI does not halt, it should |
1081 | * be disabled/re-enabled, but not to disable before clearing tasks. |
1082 | * Have a go anyway. |
1083 | */ |
1084 | if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT)) |
1085 | ok = false; |
1086 | |
1087 | /* Disable to make sure tasks really are cleared */ |
1088 | cqcfg = cqhci_readl(host: cq_host, CQHCI_CFG); |
1089 | cqcfg &= ~CQHCI_ENABLE; |
1090 | cqhci_writel(host: cq_host, val: cqcfg, CQHCI_CFG); |
1091 | |
1092 | cqcfg = cqhci_readl(host: cq_host, CQHCI_CFG); |
1093 | cqcfg |= CQHCI_ENABLE; |
1094 | cqhci_writel(host: cq_host, val: cqcfg, CQHCI_CFG); |
1095 | |
1096 | cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT); |
1097 | |
1098 | if (!ok) |
1099 | cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT); |
1100 | |
1101 | cqhci_recover_mrqs(cq_host); |
1102 | |
1103 | WARN_ON(cq_host->qcnt); |
1104 | |
1105 | spin_lock_irqsave(&cq_host->lock, flags); |
1106 | cq_host->qcnt = 0; |
1107 | cq_host->recovery_halt = false; |
1108 | mmc->cqe_on = false; |
1109 | spin_unlock_irqrestore(lock: &cq_host->lock, flags); |
1110 | |
1111 | /* Ensure all writes are done before interrupts are re-enabled */ |
1112 | wmb(); |
1113 | |
1114 | cqhci_writel(host: cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS); |
1115 | |
1116 | cqhci_set_irqs(cq_host, CQHCI_IS_MASK); |
1117 | |
1118 | pr_debug("%s: cqhci: recovery done\n" , mmc_hostname(mmc)); |
1119 | } |
1120 | |
1121 | static const struct mmc_cqe_ops cqhci_cqe_ops = { |
1122 | .cqe_enable = cqhci_enable, |
1123 | .cqe_disable = cqhci_disable, |
1124 | .cqe_request = cqhci_request, |
1125 | .cqe_post_req = cqhci_post_req, |
1126 | .cqe_off = cqhci_off, |
1127 | .cqe_wait_for_idle = cqhci_wait_for_idle, |
1128 | .cqe_timeout = cqhci_timeout, |
1129 | .cqe_recovery_start = cqhci_recovery_start, |
1130 | .cqe_recovery_finish = cqhci_recovery_finish, |
1131 | }; |
1132 | |
1133 | struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev) |
1134 | { |
1135 | struct cqhci_host *cq_host; |
1136 | struct resource *cqhci_memres = NULL; |
1137 | |
1138 | /* check and setup CMDQ interface */ |
1139 | cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM, |
1140 | "cqhci" ); |
1141 | if (!cqhci_memres) { |
1142 | dev_dbg(&pdev->dev, "CMDQ not supported\n" ); |
1143 | return ERR_PTR(error: -EINVAL); |
1144 | } |
1145 | |
1146 | cq_host = devm_kzalloc(dev: &pdev->dev, size: sizeof(*cq_host), GFP_KERNEL); |
1147 | if (!cq_host) |
1148 | return ERR_PTR(error: -ENOMEM); |
1149 | cq_host->mmio = devm_ioremap(dev: &pdev->dev, |
1150 | offset: cqhci_memres->start, |
1151 | size: resource_size(res: cqhci_memres)); |
1152 | if (!cq_host->mmio) { |
1153 | dev_err(&pdev->dev, "failed to remap cqhci regs\n" ); |
1154 | return ERR_PTR(error: -EBUSY); |
1155 | } |
1156 | dev_dbg(&pdev->dev, "CMDQ ioremap: done\n" ); |
1157 | |
1158 | return cq_host; |
1159 | } |
1160 | EXPORT_SYMBOL(cqhci_pltfm_init); |
1161 | |
1162 | static unsigned int cqhci_ver_major(struct cqhci_host *cq_host) |
1163 | { |
1164 | return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER)); |
1165 | } |
1166 | |
1167 | static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host) |
1168 | { |
1169 | u32 ver = cqhci_readl(host: cq_host, CQHCI_VER); |
1170 | |
1171 | return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver); |
1172 | } |
1173 | |
1174 | int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc, |
1175 | bool dma64) |
1176 | { |
1177 | int err; |
1178 | |
1179 | cq_host->dma64 = dma64; |
1180 | cq_host->mmc = mmc; |
1181 | cq_host->mmc->cqe_private = cq_host; |
1182 | |
1183 | cq_host->num_slots = NUM_SLOTS; |
1184 | cq_host->dcmd_slot = DCMD_SLOT; |
1185 | |
1186 | mmc->cqe_ops = &cqhci_cqe_ops; |
1187 | |
1188 | mmc->cqe_qdepth = NUM_SLOTS; |
1189 | if (mmc->caps2 & MMC_CAP2_CQE_DCMD) |
1190 | mmc->cqe_qdepth -= 1; |
1191 | |
1192 | cq_host->slot = devm_kcalloc(mmc_dev(mmc), n: cq_host->num_slots, |
1193 | size: sizeof(*cq_host->slot), GFP_KERNEL); |
1194 | if (!cq_host->slot) { |
1195 | err = -ENOMEM; |
1196 | goto out_err; |
1197 | } |
1198 | |
1199 | err = cqhci_crypto_init(host: cq_host); |
1200 | if (err) { |
1201 | pr_err("%s: CQHCI crypto initialization failed\n" , |
1202 | mmc_hostname(mmc)); |
1203 | goto out_err; |
1204 | } |
1205 | |
1206 | spin_lock_init(&cq_host->lock); |
1207 | |
1208 | init_completion(x: &cq_host->halt_comp); |
1209 | init_waitqueue_head(&cq_host->wait_queue); |
1210 | |
1211 | pr_info("%s: CQHCI version %u.%02u\n" , |
1212 | mmc_hostname(mmc), cqhci_ver_major(cq_host), |
1213 | cqhci_ver_minor(cq_host)); |
1214 | |
1215 | return 0; |
1216 | |
1217 | out_err: |
1218 | pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n" , |
1219 | mmc_hostname(mmc), cqhci_ver_major(cq_host), |
1220 | cqhci_ver_minor(cq_host), err); |
1221 | return err; |
1222 | } |
1223 | EXPORT_SYMBOL(cqhci_init); |
1224 | |
1225 | MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>" ); |
1226 | MODULE_DESCRIPTION("Command Queue Host Controller Interface driver" ); |
1227 | MODULE_LICENSE("GPL v2" ); |
1228 | |