1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Synopsys DesignWare Multimedia Card Interface driver |
4 | * (Based on NXP driver for lpc 31xx) |
5 | * |
6 | * Copyright (C) 2009 NXP Semiconductors |
7 | * Copyright (C) 2009, 2010 Imagination Technologies Ltd. |
8 | */ |
9 | |
10 | #include <linux/blkdev.h> |
11 | #include <linux/clk.h> |
12 | #include <linux/debugfs.h> |
13 | #include <linux/device.h> |
14 | #include <linux/dma-mapping.h> |
15 | #include <linux/err.h> |
16 | #include <linux/init.h> |
17 | #include <linux/interrupt.h> |
18 | #include <linux/iopoll.h> |
19 | #include <linux/ioport.h> |
20 | #include <linux/ktime.h> |
21 | #include <linux/module.h> |
22 | #include <linux/platform_device.h> |
23 | #include <linux/pm_runtime.h> |
24 | #include <linux/prandom.h> |
25 | #include <linux/seq_file.h> |
26 | #include <linux/slab.h> |
27 | #include <linux/stat.h> |
28 | #include <linux/delay.h> |
29 | #include <linux/irq.h> |
30 | #include <linux/mmc/card.h> |
31 | #include <linux/mmc/host.h> |
32 | #include <linux/mmc/mmc.h> |
33 | #include <linux/mmc/sd.h> |
34 | #include <linux/mmc/sdio.h> |
35 | #include <linux/bitops.h> |
36 | #include <linux/regulator/consumer.h> |
37 | #include <linux/of.h> |
38 | #include <linux/of_gpio.h> |
39 | #include <linux/mmc/slot-gpio.h> |
40 | |
41 | #include "dw_mmc.h" |
42 | |
43 | /* Common flag combinations */ |
44 | #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ |
45 | SDMMC_INT_HTO | SDMMC_INT_SBE | \ |
46 | SDMMC_INT_EBE | SDMMC_INT_HLE) |
47 | #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ |
48 | SDMMC_INT_RESP_ERR | SDMMC_INT_HLE) |
49 | #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \ |
50 | DW_MCI_CMD_ERROR_FLAGS) |
51 | #define DW_MCI_SEND_STATUS 1 |
52 | #define DW_MCI_RECV_STATUS 2 |
53 | #define DW_MCI_DMA_THRESHOLD 16 |
54 | |
55 | #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ |
56 | #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */ |
57 | |
58 | #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ |
59 | SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ |
60 | SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ |
61 | SDMMC_IDMAC_INT_TI) |
62 | |
63 | #define DESC_RING_BUF_SZ PAGE_SIZE |
64 | |
65 | struct idmac_desc_64addr { |
66 | u32 des0; /* Control Descriptor */ |
67 | #define IDMAC_OWN_CLR64(x) \ |
68 | !((x) & cpu_to_le32(IDMAC_DES0_OWN)) |
69 | |
70 | u32 des1; /* Reserved */ |
71 | |
72 | u32 des2; /*Buffer sizes */ |
73 | #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \ |
74 | ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \ |
75 | ((cpu_to_le32(s)) & cpu_to_le32(0x1fff))) |
76 | |
77 | u32 des3; /* Reserved */ |
78 | |
79 | u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/ |
80 | u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/ |
81 | |
82 | u32 des6; /* Lower 32-bits of Next Descriptor Address */ |
83 | u32 des7; /* Upper 32-bits of Next Descriptor Address */ |
84 | }; |
85 | |
86 | struct idmac_desc { |
87 | __le32 des0; /* Control Descriptor */ |
88 | #define IDMAC_DES0_DIC BIT(1) |
89 | #define IDMAC_DES0_LD BIT(2) |
90 | #define IDMAC_DES0_FD BIT(3) |
91 | #define IDMAC_DES0_CH BIT(4) |
92 | #define IDMAC_DES0_ER BIT(5) |
93 | #define IDMAC_DES0_CES BIT(30) |
94 | #define IDMAC_DES0_OWN BIT(31) |
95 | |
96 | __le32 des1; /* Buffer sizes */ |
97 | #define IDMAC_SET_BUFFER1_SIZE(d, s) \ |
98 | ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff))) |
99 | |
100 | __le32 des2; /* buffer 1 physical address */ |
101 | |
102 | __le32 des3; /* buffer 2 physical address */ |
103 | }; |
104 | |
105 | /* Each descriptor can transfer up to 4KB of data in chained mode */ |
106 | #define DW_MCI_DESC_DATA_LENGTH 0x1000 |
107 | |
108 | #if defined(CONFIG_DEBUG_FS) |
109 | static int dw_mci_req_show(struct seq_file *s, void *v) |
110 | { |
111 | struct dw_mci_slot *slot = s->private; |
112 | struct mmc_request *mrq; |
113 | struct mmc_command *cmd; |
114 | struct mmc_command *stop; |
115 | struct mmc_data *data; |
116 | |
117 | /* Make sure we get a consistent snapshot */ |
118 | spin_lock_bh(lock: &slot->host->lock); |
119 | mrq = slot->mrq; |
120 | |
121 | if (mrq) { |
122 | cmd = mrq->cmd; |
123 | data = mrq->data; |
124 | stop = mrq->stop; |
125 | |
126 | if (cmd) |
127 | seq_printf(m: s, |
128 | fmt: "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n" , |
129 | cmd->opcode, cmd->arg, cmd->flags, |
130 | cmd->resp[0], cmd->resp[1], cmd->resp[2], |
131 | cmd->resp[2], cmd->error); |
132 | if (data) |
133 | seq_printf(m: s, fmt: "DATA %u / %u * %u flg %x err %d\n" , |
134 | data->bytes_xfered, data->blocks, |
135 | data->blksz, data->flags, data->error); |
136 | if (stop) |
137 | seq_printf(m: s, |
138 | fmt: "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n" , |
139 | stop->opcode, stop->arg, stop->flags, |
140 | stop->resp[0], stop->resp[1], stop->resp[2], |
141 | stop->resp[2], stop->error); |
142 | } |
143 | |
144 | spin_unlock_bh(lock: &slot->host->lock); |
145 | |
146 | return 0; |
147 | } |
148 | DEFINE_SHOW_ATTRIBUTE(dw_mci_req); |
149 | |
150 | static int dw_mci_regs_show(struct seq_file *s, void *v) |
151 | { |
152 | struct dw_mci *host = s->private; |
153 | |
154 | pm_runtime_get_sync(dev: host->dev); |
155 | |
156 | seq_printf(m: s, fmt: "STATUS:\t0x%08x\n" , mci_readl(host, STATUS)); |
157 | seq_printf(m: s, fmt: "RINTSTS:\t0x%08x\n" , mci_readl(host, RINTSTS)); |
158 | seq_printf(m: s, fmt: "CMD:\t0x%08x\n" , mci_readl(host, CMD)); |
159 | seq_printf(m: s, fmt: "CTRL:\t0x%08x\n" , mci_readl(host, CTRL)); |
160 | seq_printf(m: s, fmt: "INTMASK:\t0x%08x\n" , mci_readl(host, INTMASK)); |
161 | seq_printf(m: s, fmt: "CLKENA:\t0x%08x\n" , mci_readl(host, CLKENA)); |
162 | |
163 | pm_runtime_put_autosuspend(dev: host->dev); |
164 | |
165 | return 0; |
166 | } |
167 | DEFINE_SHOW_ATTRIBUTE(dw_mci_regs); |
168 | |
169 | static void dw_mci_init_debugfs(struct dw_mci_slot *slot) |
170 | { |
171 | struct mmc_host *mmc = slot->mmc; |
172 | struct dw_mci *host = slot->host; |
173 | struct dentry *root; |
174 | |
175 | root = mmc->debugfs_root; |
176 | if (!root) |
177 | return; |
178 | |
179 | debugfs_create_file(name: "regs" , S_IRUSR, parent: root, data: host, fops: &dw_mci_regs_fops); |
180 | debugfs_create_file(name: "req" , S_IRUSR, parent: root, data: slot, fops: &dw_mci_req_fops); |
181 | debugfs_create_u32(name: "state" , S_IRUSR, parent: root, value: &host->state); |
182 | debugfs_create_xul(name: "pending_events" , S_IRUSR, parent: root, |
183 | value: &host->pending_events); |
184 | debugfs_create_xul(name: "completed_events" , S_IRUSR, parent: root, |
185 | value: &host->completed_events); |
186 | #ifdef CONFIG_FAULT_INJECTION |
187 | fault_create_debugfs_attr(name: "fail_data_crc" , parent: root, attr: &host->fail_data_crc); |
188 | #endif |
189 | } |
190 | #endif /* defined(CONFIG_DEBUG_FS) */ |
191 | |
192 | static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) |
193 | { |
194 | u32 ctrl; |
195 | |
196 | ctrl = mci_readl(host, CTRL); |
197 | ctrl |= reset; |
198 | mci_writel(host, CTRL, ctrl); |
199 | |
200 | /* wait till resets clear */ |
201 | if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl, |
202 | !(ctrl & reset), |
203 | 1, 500 * USEC_PER_MSEC)) { |
204 | dev_err(host->dev, |
205 | "Timeout resetting block (ctrl reset %#x)\n" , |
206 | ctrl & reset); |
207 | return false; |
208 | } |
209 | |
210 | return true; |
211 | } |
212 | |
213 | static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags) |
214 | { |
215 | u32 status; |
216 | |
217 | /* |
218 | * Databook says that before issuing a new data transfer command |
219 | * we need to check to see if the card is busy. Data transfer commands |
220 | * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that. |
221 | * |
222 | * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is |
223 | * expected. |
224 | */ |
225 | if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) && |
226 | !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) { |
227 | if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, |
228 | status, |
229 | !(status & SDMMC_STATUS_BUSY), |
230 | 10, 500 * USEC_PER_MSEC)) |
231 | dev_err(host->dev, "Busy; trying anyway\n" ); |
232 | } |
233 | } |
234 | |
235 | static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) |
236 | { |
237 | struct dw_mci *host = slot->host; |
238 | unsigned int cmd_status = 0; |
239 | |
240 | mci_writel(host, CMDARG, arg); |
241 | wmb(); /* drain writebuffer */ |
242 | dw_mci_wait_while_busy(host, cmd_flags: cmd); |
243 | mci_writel(host, CMD, SDMMC_CMD_START | cmd); |
244 | |
245 | if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status, |
246 | !(cmd_status & SDMMC_CMD_START), |
247 | 1, 500 * USEC_PER_MSEC)) |
248 | dev_err(&slot->mmc->class_dev, |
249 | "Timeout sending command (cmd %#x arg %#x status %#x)\n" , |
250 | cmd, arg, cmd_status); |
251 | } |
252 | |
253 | static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) |
254 | { |
255 | struct dw_mci_slot *slot = mmc_priv(host: mmc); |
256 | struct dw_mci *host = slot->host; |
257 | u32 cmdr; |
258 | |
259 | cmd->error = -EINPROGRESS; |
260 | cmdr = cmd->opcode; |
261 | |
262 | if (cmd->opcode == MMC_STOP_TRANSMISSION || |
263 | cmd->opcode == MMC_GO_IDLE_STATE || |
264 | cmd->opcode == MMC_GO_INACTIVE_STATE || |
265 | (cmd->opcode == SD_IO_RW_DIRECT && |
266 | ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) |
267 | cmdr |= SDMMC_CMD_STOP; |
268 | else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) |
269 | cmdr |= SDMMC_CMD_PRV_DAT_WAIT; |
270 | |
271 | if (cmd->opcode == SD_SWITCH_VOLTAGE) { |
272 | u32 clk_en_a; |
273 | |
274 | /* Special bit makes CMD11 not die */ |
275 | cmdr |= SDMMC_CMD_VOLT_SWITCH; |
276 | |
277 | /* Change state to continue to handle CMD11 weirdness */ |
278 | WARN_ON(slot->host->state != STATE_SENDING_CMD); |
279 | slot->host->state = STATE_SENDING_CMD11; |
280 | |
281 | /* |
282 | * We need to disable low power mode (automatic clock stop) |
283 | * while doing voltage switch so we don't confuse the card, |
284 | * since stopping the clock is a specific part of the UHS |
285 | * voltage change dance. |
286 | * |
287 | * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be |
288 | * unconditionally turned back on in dw_mci_setup_bus() if it's |
289 | * ever called with a non-zero clock. That shouldn't happen |
290 | * until the voltage change is all done. |
291 | */ |
292 | clk_en_a = mci_readl(host, CLKENA); |
293 | clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id); |
294 | mci_writel(host, CLKENA, clk_en_a); |
295 | mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | |
296 | SDMMC_CMD_PRV_DAT_WAIT, arg: 0); |
297 | } |
298 | |
299 | if (cmd->flags & MMC_RSP_PRESENT) { |
300 | /* We expect a response, so set this bit */ |
301 | cmdr |= SDMMC_CMD_RESP_EXP; |
302 | if (cmd->flags & MMC_RSP_136) |
303 | cmdr |= SDMMC_CMD_RESP_LONG; |
304 | } |
305 | |
306 | if (cmd->flags & MMC_RSP_CRC) |
307 | cmdr |= SDMMC_CMD_RESP_CRC; |
308 | |
309 | if (cmd->data) { |
310 | cmdr |= SDMMC_CMD_DAT_EXP; |
311 | if (cmd->data->flags & MMC_DATA_WRITE) |
312 | cmdr |= SDMMC_CMD_DAT_WR; |
313 | } |
314 | |
315 | if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags)) |
316 | cmdr |= SDMMC_CMD_USE_HOLD_REG; |
317 | |
318 | return cmdr; |
319 | } |
320 | |
321 | static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) |
322 | { |
323 | struct mmc_command *stop; |
324 | u32 cmdr; |
325 | |
326 | if (!cmd->data) |
327 | return 0; |
328 | |
329 | stop = &host->stop_abort; |
330 | cmdr = cmd->opcode; |
331 | memset(stop, 0, sizeof(struct mmc_command)); |
332 | |
333 | if (cmdr == MMC_READ_SINGLE_BLOCK || |
334 | cmdr == MMC_READ_MULTIPLE_BLOCK || |
335 | cmdr == MMC_WRITE_BLOCK || |
336 | cmdr == MMC_WRITE_MULTIPLE_BLOCK || |
337 | mmc_op_tuning(opcode: cmdr) || |
338 | cmdr == MMC_GEN_CMD) { |
339 | stop->opcode = MMC_STOP_TRANSMISSION; |
340 | stop->arg = 0; |
341 | stop->flags = MMC_RSP_R1B | MMC_CMD_AC; |
342 | } else if (cmdr == SD_IO_RW_EXTENDED) { |
343 | stop->opcode = SD_IO_RW_DIRECT; |
344 | stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | |
345 | ((cmd->arg >> 28) & 0x7); |
346 | stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; |
347 | } else { |
348 | return 0; |
349 | } |
350 | |
351 | cmdr = stop->opcode | SDMMC_CMD_STOP | |
352 | SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; |
353 | |
354 | if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags)) |
355 | cmdr |= SDMMC_CMD_USE_HOLD_REG; |
356 | |
357 | return cmdr; |
358 | } |
359 | |
360 | static inline void dw_mci_set_cto(struct dw_mci *host) |
361 | { |
362 | unsigned int cto_clks; |
363 | unsigned int cto_div; |
364 | unsigned int cto_ms; |
365 | unsigned long irqflags; |
366 | |
367 | cto_clks = mci_readl(host, TMOUT) & 0xff; |
368 | cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; |
369 | if (cto_div == 0) |
370 | cto_div = 1; |
371 | |
372 | cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div, |
373 | host->bus_hz); |
374 | |
375 | /* add a bit spare time */ |
376 | cto_ms += 10; |
377 | |
378 | /* |
379 | * The durations we're working with are fairly short so we have to be |
380 | * extra careful about synchronization here. Specifically in hardware a |
381 | * command timeout is _at most_ 5.1 ms, so that means we expect an |
382 | * interrupt (either command done or timeout) to come rather quickly |
383 | * after the mci_writel. ...but just in case we have a long interrupt |
384 | * latency let's add a bit of paranoia. |
385 | * |
386 | * In general we'll assume that at least an interrupt will be asserted |
387 | * in hardware by the time the cto_timer runs. ...and if it hasn't |
388 | * been asserted in hardware by that time then we'll assume it'll never |
389 | * come. |
390 | */ |
391 | spin_lock_irqsave(&host->irq_lock, irqflags); |
392 | if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) |
393 | mod_timer(timer: &host->cto_timer, |
394 | expires: jiffies + msecs_to_jiffies(m: cto_ms) + 1); |
395 | spin_unlock_irqrestore(lock: &host->irq_lock, flags: irqflags); |
396 | } |
397 | |
398 | static void dw_mci_start_command(struct dw_mci *host, |
399 | struct mmc_command *cmd, u32 cmd_flags) |
400 | { |
401 | host->cmd = cmd; |
402 | dev_vdbg(host->dev, |
403 | "start command: ARGR=0x%08x CMDR=0x%08x\n" , |
404 | cmd->arg, cmd_flags); |
405 | |
406 | mci_writel(host, CMDARG, cmd->arg); |
407 | wmb(); /* drain writebuffer */ |
408 | dw_mci_wait_while_busy(host, cmd_flags); |
409 | |
410 | mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); |
411 | |
412 | /* response expected command only */ |
413 | if (cmd_flags & SDMMC_CMD_RESP_EXP) |
414 | dw_mci_set_cto(host); |
415 | } |
416 | |
417 | static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) |
418 | { |
419 | struct mmc_command *stop = &host->stop_abort; |
420 | |
421 | dw_mci_start_command(host, cmd: stop, cmd_flags: host->stop_cmdr); |
422 | } |
423 | |
424 | /* DMA interface functions */ |
425 | static void dw_mci_stop_dma(struct dw_mci *host) |
426 | { |
427 | if (host->using_dma) { |
428 | host->dma_ops->stop(host); |
429 | host->dma_ops->cleanup(host); |
430 | } |
431 | |
432 | /* Data transfer was stopped by the interrupt handler */ |
433 | set_bit(nr: EVENT_XFER_COMPLETE, addr: &host->pending_events); |
434 | } |
435 | |
436 | static void dw_mci_dma_cleanup(struct dw_mci *host) |
437 | { |
438 | struct mmc_data *data = host->data; |
439 | |
440 | if (data && data->host_cookie == COOKIE_MAPPED) { |
441 | dma_unmap_sg(host->dev, |
442 | data->sg, |
443 | data->sg_len, |
444 | mmc_get_dma_dir(data)); |
445 | data->host_cookie = COOKIE_UNMAPPED; |
446 | } |
447 | } |
448 | |
449 | static void dw_mci_idmac_reset(struct dw_mci *host) |
450 | { |
451 | u32 bmod = mci_readl(host, BMOD); |
452 | /* Software reset of DMA */ |
453 | bmod |= SDMMC_IDMAC_SWRESET; |
454 | mci_writel(host, BMOD, bmod); |
455 | } |
456 | |
457 | static void dw_mci_idmac_stop_dma(struct dw_mci *host) |
458 | { |
459 | u32 temp; |
460 | |
461 | /* Disable and reset the IDMAC interface */ |
462 | temp = mci_readl(host, CTRL); |
463 | temp &= ~SDMMC_CTRL_USE_IDMAC; |
464 | temp |= SDMMC_CTRL_DMA_RESET; |
465 | mci_writel(host, CTRL, temp); |
466 | |
467 | /* Stop the IDMAC running */ |
468 | temp = mci_readl(host, BMOD); |
469 | temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); |
470 | temp |= SDMMC_IDMAC_SWRESET; |
471 | mci_writel(host, BMOD, temp); |
472 | } |
473 | |
474 | static void dw_mci_dmac_complete_dma(void *arg) |
475 | { |
476 | struct dw_mci *host = arg; |
477 | struct mmc_data *data = host->data; |
478 | |
479 | dev_vdbg(host->dev, "DMA complete\n" ); |
480 | |
481 | if ((host->use_dma == TRANS_MODE_EDMAC) && |
482 | data && (data->flags & MMC_DATA_READ)) |
483 | /* Invalidate cache after read */ |
484 | dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc), |
485 | sg: data->sg, |
486 | nelems: data->sg_len, |
487 | dir: DMA_FROM_DEVICE); |
488 | |
489 | host->dma_ops->cleanup(host); |
490 | |
491 | /* |
492 | * If the card was removed, data will be NULL. No point in trying to |
493 | * send the stop command or waiting for NBUSY in this case. |
494 | */ |
495 | if (data) { |
496 | set_bit(nr: EVENT_XFER_COMPLETE, addr: &host->pending_events); |
497 | tasklet_schedule(t: &host->tasklet); |
498 | } |
499 | } |
500 | |
501 | static int dw_mci_idmac_init(struct dw_mci *host) |
502 | { |
503 | int i; |
504 | |
505 | if (host->dma_64bit_address == 1) { |
506 | struct idmac_desc_64addr *p; |
507 | /* Number of descriptors in the ring buffer */ |
508 | host->ring_size = |
509 | DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr); |
510 | |
511 | /* Forward link the descriptor list */ |
512 | for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; |
513 | i++, p++) { |
514 | p->des6 = (host->sg_dma + |
515 | (sizeof(struct idmac_desc_64addr) * |
516 | (i + 1))) & 0xffffffff; |
517 | |
518 | p->des7 = (u64)(host->sg_dma + |
519 | (sizeof(struct idmac_desc_64addr) * |
520 | (i + 1))) >> 32; |
521 | /* Initialize reserved and buffer size fields to "0" */ |
522 | p->des0 = 0; |
523 | p->des1 = 0; |
524 | p->des2 = 0; |
525 | p->des3 = 0; |
526 | } |
527 | |
528 | /* Set the last descriptor as the end-of-ring descriptor */ |
529 | p->des6 = host->sg_dma & 0xffffffff; |
530 | p->des7 = (u64)host->sg_dma >> 32; |
531 | p->des0 = IDMAC_DES0_ER; |
532 | |
533 | } else { |
534 | struct idmac_desc *p; |
535 | /* Number of descriptors in the ring buffer */ |
536 | host->ring_size = |
537 | DESC_RING_BUF_SZ / sizeof(struct idmac_desc); |
538 | |
539 | /* Forward link the descriptor list */ |
540 | for (i = 0, p = host->sg_cpu; |
541 | i < host->ring_size - 1; |
542 | i++, p++) { |
543 | p->des3 = cpu_to_le32(host->sg_dma + |
544 | (sizeof(struct idmac_desc) * (i + 1))); |
545 | p->des0 = 0; |
546 | p->des1 = 0; |
547 | } |
548 | |
549 | /* Set the last descriptor as the end-of-ring descriptor */ |
550 | p->des3 = cpu_to_le32(host->sg_dma); |
551 | p->des0 = cpu_to_le32(IDMAC_DES0_ER); |
552 | } |
553 | |
554 | dw_mci_idmac_reset(host); |
555 | |
556 | if (host->dma_64bit_address == 1) { |
557 | /* Mask out interrupts - get Tx & Rx complete only */ |
558 | mci_writel(host, IDSTS64, IDMAC_INT_CLR); |
559 | mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI | |
560 | SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); |
561 | |
562 | /* Set the descriptor base address */ |
563 | mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff); |
564 | mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32); |
565 | |
566 | } else { |
567 | /* Mask out interrupts - get Tx & Rx complete only */ |
568 | mci_writel(host, IDSTS, IDMAC_INT_CLR); |
569 | mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | |
570 | SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); |
571 | |
572 | /* Set the descriptor base address */ |
573 | mci_writel(host, DBADDR, host->sg_dma); |
574 | } |
575 | |
576 | return 0; |
577 | } |
578 | |
579 | static inline int dw_mci_prepare_desc64(struct dw_mci *host, |
580 | struct mmc_data *data, |
581 | unsigned int sg_len) |
582 | { |
583 | unsigned int desc_len; |
584 | struct idmac_desc_64addr *desc_first, *desc_last, *desc; |
585 | u32 val; |
586 | int i; |
587 | |
588 | desc_first = desc_last = desc = host->sg_cpu; |
589 | |
590 | for (i = 0; i < sg_len; i++) { |
591 | unsigned int length = sg_dma_len(&data->sg[i]); |
592 | |
593 | u64 mem_addr = sg_dma_address(&data->sg[i]); |
594 | |
595 | for ( ; length ; desc++) { |
596 | desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? |
597 | length : DW_MCI_DESC_DATA_LENGTH; |
598 | |
599 | length -= desc_len; |
600 | |
601 | /* |
602 | * Wait for the former clear OWN bit operation |
603 | * of IDMAC to make sure that this descriptor |
604 | * isn't still owned by IDMAC as IDMAC's write |
605 | * ops and CPU's read ops are asynchronous. |
606 | */ |
607 | if (readl_poll_timeout_atomic(&desc->des0, val, |
608 | !(val & IDMAC_DES0_OWN), |
609 | 10, 100 * USEC_PER_MSEC)) |
610 | goto err_own_bit; |
611 | |
612 | /* |
613 | * Set the OWN bit and disable interrupts |
614 | * for this descriptor |
615 | */ |
616 | desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | |
617 | IDMAC_DES0_CH; |
618 | |
619 | /* Buffer length */ |
620 | IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len); |
621 | |
622 | /* Physical address to DMA to/from */ |
623 | desc->des4 = mem_addr & 0xffffffff; |
624 | desc->des5 = mem_addr >> 32; |
625 | |
626 | /* Update physical address for the next desc */ |
627 | mem_addr += desc_len; |
628 | |
629 | /* Save pointer to the last descriptor */ |
630 | desc_last = desc; |
631 | } |
632 | } |
633 | |
634 | /* Set first descriptor */ |
635 | desc_first->des0 |= IDMAC_DES0_FD; |
636 | |
637 | /* Set last descriptor */ |
638 | desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); |
639 | desc_last->des0 |= IDMAC_DES0_LD; |
640 | |
641 | return 0; |
642 | err_own_bit: |
643 | /* restore the descriptor chain as it's polluted */ |
644 | dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n" ); |
645 | memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); |
646 | dw_mci_idmac_init(host); |
647 | return -EINVAL; |
648 | } |
649 | |
650 | |
651 | static inline int dw_mci_prepare_desc32(struct dw_mci *host, |
652 | struct mmc_data *data, |
653 | unsigned int sg_len) |
654 | { |
655 | unsigned int desc_len; |
656 | struct idmac_desc *desc_first, *desc_last, *desc; |
657 | u32 val; |
658 | int i; |
659 | |
660 | desc_first = desc_last = desc = host->sg_cpu; |
661 | |
662 | for (i = 0; i < sg_len; i++) { |
663 | unsigned int length = sg_dma_len(&data->sg[i]); |
664 | |
665 | u32 mem_addr = sg_dma_address(&data->sg[i]); |
666 | |
667 | for ( ; length ; desc++) { |
668 | desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ? |
669 | length : DW_MCI_DESC_DATA_LENGTH; |
670 | |
671 | length -= desc_len; |
672 | |
673 | /* |
674 | * Wait for the former clear OWN bit operation |
675 | * of IDMAC to make sure that this descriptor |
676 | * isn't still owned by IDMAC as IDMAC's write |
677 | * ops and CPU's read ops are asynchronous. |
678 | */ |
679 | if (readl_poll_timeout_atomic(&desc->des0, val, |
680 | IDMAC_OWN_CLR64(val), |
681 | 10, |
682 | 100 * USEC_PER_MSEC)) |
683 | goto err_own_bit; |
684 | |
685 | /* |
686 | * Set the OWN bit and disable interrupts |
687 | * for this descriptor |
688 | */ |
689 | desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | |
690 | IDMAC_DES0_DIC | |
691 | IDMAC_DES0_CH); |
692 | |
693 | /* Buffer length */ |
694 | IDMAC_SET_BUFFER1_SIZE(desc, desc_len); |
695 | |
696 | /* Physical address to DMA to/from */ |
697 | desc->des2 = cpu_to_le32(mem_addr); |
698 | |
699 | /* Update physical address for the next desc */ |
700 | mem_addr += desc_len; |
701 | |
702 | /* Save pointer to the last descriptor */ |
703 | desc_last = desc; |
704 | } |
705 | } |
706 | |
707 | /* Set first descriptor */ |
708 | desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD); |
709 | |
710 | /* Set last descriptor */ |
711 | desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | |
712 | IDMAC_DES0_DIC)); |
713 | desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); |
714 | |
715 | return 0; |
716 | err_own_bit: |
717 | /* restore the descriptor chain as it's polluted */ |
718 | dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n" ); |
719 | memset(host->sg_cpu, 0, DESC_RING_BUF_SZ); |
720 | dw_mci_idmac_init(host); |
721 | return -EINVAL; |
722 | } |
723 | |
724 | static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) |
725 | { |
726 | u32 temp; |
727 | int ret; |
728 | |
729 | if (host->dma_64bit_address == 1) |
730 | ret = dw_mci_prepare_desc64(host, data: host->data, sg_len); |
731 | else |
732 | ret = dw_mci_prepare_desc32(host, data: host->data, sg_len); |
733 | |
734 | if (ret) |
735 | goto out; |
736 | |
737 | /* drain writebuffer */ |
738 | wmb(); |
739 | |
740 | /* Make sure to reset DMA in case we did PIO before this */ |
741 | dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET); |
742 | dw_mci_idmac_reset(host); |
743 | |
744 | /* Select IDMAC interface */ |
745 | temp = mci_readl(host, CTRL); |
746 | temp |= SDMMC_CTRL_USE_IDMAC; |
747 | mci_writel(host, CTRL, temp); |
748 | |
749 | /* drain writebuffer */ |
750 | wmb(); |
751 | |
752 | /* Enable the IDMAC */ |
753 | temp = mci_readl(host, BMOD); |
754 | temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; |
755 | mci_writel(host, BMOD, temp); |
756 | |
757 | /* Start it running */ |
758 | mci_writel(host, PLDMND, 1); |
759 | |
760 | out: |
761 | return ret; |
762 | } |
763 | |
764 | static const struct dw_mci_dma_ops dw_mci_idmac_ops = { |
765 | .init = dw_mci_idmac_init, |
766 | .start = dw_mci_idmac_start_dma, |
767 | .stop = dw_mci_idmac_stop_dma, |
768 | .complete = dw_mci_dmac_complete_dma, |
769 | .cleanup = dw_mci_dma_cleanup, |
770 | }; |
771 | |
772 | static void dw_mci_edmac_stop_dma(struct dw_mci *host) |
773 | { |
774 | dmaengine_terminate_async(chan: host->dms->ch); |
775 | } |
776 | |
777 | static int dw_mci_edmac_start_dma(struct dw_mci *host, |
778 | unsigned int sg_len) |
779 | { |
780 | struct dma_slave_config cfg; |
781 | struct dma_async_tx_descriptor *desc = NULL; |
782 | struct scatterlist *sgl = host->data->sg; |
783 | static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; |
784 | u32 sg_elems = host->data->sg_len; |
785 | u32 fifoth_val; |
786 | u32 fifo_offset = host->fifo_reg - host->regs; |
787 | int ret = 0; |
788 | |
789 | /* Set external dma config: burst size, burst width */ |
790 | memset(&cfg, 0, sizeof(cfg)); |
791 | cfg.dst_addr = host->phy_regs + fifo_offset; |
792 | cfg.src_addr = cfg.dst_addr; |
793 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
794 | cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
795 | |
796 | /* Match burst msize with external dma config */ |
797 | fifoth_val = mci_readl(host, FIFOTH); |
798 | cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7]; |
799 | cfg.src_maxburst = cfg.dst_maxburst; |
800 | |
801 | if (host->data->flags & MMC_DATA_WRITE) |
802 | cfg.direction = DMA_MEM_TO_DEV; |
803 | else |
804 | cfg.direction = DMA_DEV_TO_MEM; |
805 | |
806 | ret = dmaengine_slave_config(chan: host->dms->ch, config: &cfg); |
807 | if (ret) { |
808 | dev_err(host->dev, "Failed to config edmac.\n" ); |
809 | return -EBUSY; |
810 | } |
811 | |
812 | desc = dmaengine_prep_slave_sg(chan: host->dms->ch, sgl, |
813 | sg_len, dir: cfg.direction, |
814 | flags: DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
815 | if (!desc) { |
816 | dev_err(host->dev, "Can't prepare slave sg.\n" ); |
817 | return -EBUSY; |
818 | } |
819 | |
820 | /* Set dw_mci_dmac_complete_dma as callback */ |
821 | desc->callback = dw_mci_dmac_complete_dma; |
822 | desc->callback_param = (void *)host; |
823 | dmaengine_submit(desc); |
824 | |
825 | /* Flush cache before write */ |
826 | if (host->data->flags & MMC_DATA_WRITE) |
827 | dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sg: sgl, |
828 | nelems: sg_elems, dir: DMA_TO_DEVICE); |
829 | |
830 | dma_async_issue_pending(chan: host->dms->ch); |
831 | |
832 | return 0; |
833 | } |
834 | |
835 | static int dw_mci_edmac_init(struct dw_mci *host) |
836 | { |
837 | /* Request external dma channel */ |
838 | host->dms = kzalloc(size: sizeof(struct dw_mci_dma_slave), GFP_KERNEL); |
839 | if (!host->dms) |
840 | return -ENOMEM; |
841 | |
842 | host->dms->ch = dma_request_chan(dev: host->dev, name: "rx-tx" ); |
843 | if (IS_ERR(ptr: host->dms->ch)) { |
844 | int ret = PTR_ERR(ptr: host->dms->ch); |
845 | |
846 | dev_err(host->dev, "Failed to get external DMA channel.\n" ); |
847 | kfree(objp: host->dms); |
848 | host->dms = NULL; |
849 | return ret; |
850 | } |
851 | |
852 | return 0; |
853 | } |
854 | |
855 | static void dw_mci_edmac_exit(struct dw_mci *host) |
856 | { |
857 | if (host->dms) { |
858 | if (host->dms->ch) { |
859 | dma_release_channel(chan: host->dms->ch); |
860 | host->dms->ch = NULL; |
861 | } |
862 | kfree(objp: host->dms); |
863 | host->dms = NULL; |
864 | } |
865 | } |
866 | |
867 | static const struct dw_mci_dma_ops dw_mci_edmac_ops = { |
868 | .init = dw_mci_edmac_init, |
869 | .exit = dw_mci_edmac_exit, |
870 | .start = dw_mci_edmac_start_dma, |
871 | .stop = dw_mci_edmac_stop_dma, |
872 | .complete = dw_mci_dmac_complete_dma, |
873 | .cleanup = dw_mci_dma_cleanup, |
874 | }; |
875 | |
876 | static int dw_mci_pre_dma_transfer(struct dw_mci *host, |
877 | struct mmc_data *data, |
878 | int cookie) |
879 | { |
880 | struct scatterlist *sg; |
881 | unsigned int i, sg_len; |
882 | |
883 | if (data->host_cookie == COOKIE_PRE_MAPPED) |
884 | return data->sg_len; |
885 | |
886 | /* |
887 | * We don't do DMA on "complex" transfers, i.e. with |
888 | * non-word-aligned buffers or lengths. Also, we don't bother |
889 | * with all the DMA setup overhead for short transfers. |
890 | */ |
891 | if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) |
892 | return -EINVAL; |
893 | |
894 | if (data->blksz & 3) |
895 | return -EINVAL; |
896 | |
897 | for_each_sg(data->sg, sg, data->sg_len, i) { |
898 | if (sg->offset & 3 || sg->length & 3) |
899 | return -EINVAL; |
900 | } |
901 | |
902 | sg_len = dma_map_sg(host->dev, |
903 | data->sg, |
904 | data->sg_len, |
905 | mmc_get_dma_dir(data)); |
906 | if (sg_len == 0) |
907 | return -EINVAL; |
908 | |
909 | data->host_cookie = cookie; |
910 | |
911 | return sg_len; |
912 | } |
913 | |
914 | static void dw_mci_pre_req(struct mmc_host *mmc, |
915 | struct mmc_request *mrq) |
916 | { |
917 | struct dw_mci_slot *slot = mmc_priv(host: mmc); |
918 | struct mmc_data *data = mrq->data; |
919 | |
920 | if (!slot->host->use_dma || !data) |
921 | return; |
922 | |
923 | /* This data might be unmapped at this time */ |
924 | data->host_cookie = COOKIE_UNMAPPED; |
925 | |
926 | if (dw_mci_pre_dma_transfer(host: slot->host, data: mrq->data, |
927 | cookie: COOKIE_PRE_MAPPED) < 0) |
928 | data->host_cookie = COOKIE_UNMAPPED; |
929 | } |
930 | |
931 | static void dw_mci_post_req(struct mmc_host *mmc, |
932 | struct mmc_request *mrq, |
933 | int err) |
934 | { |
935 | struct dw_mci_slot *slot = mmc_priv(host: mmc); |
936 | struct mmc_data *data = mrq->data; |
937 | |
938 | if (!slot->host->use_dma || !data) |
939 | return; |
940 | |
941 | if (data->host_cookie != COOKIE_UNMAPPED) |
942 | dma_unmap_sg(slot->host->dev, |
943 | data->sg, |
944 | data->sg_len, |
945 | mmc_get_dma_dir(data)); |
946 | data->host_cookie = COOKIE_UNMAPPED; |
947 | } |
948 | |
949 | static int dw_mci_get_cd(struct mmc_host *mmc) |
950 | { |
951 | int present; |
952 | struct dw_mci_slot *slot = mmc_priv(host: mmc); |
953 | struct dw_mci *host = slot->host; |
954 | int gpio_cd = mmc_gpio_get_cd(host: mmc); |
955 | |
956 | /* Use platform get_cd function, else try onboard card detect */ |
957 | if (((mmc->caps & MMC_CAP_NEEDS_POLL) |
958 | || !mmc_card_is_removable(host: mmc))) { |
959 | present = 1; |
960 | |
961 | if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { |
962 | if (mmc->caps & MMC_CAP_NEEDS_POLL) { |
963 | dev_info(&mmc->class_dev, |
964 | "card is polling.\n" ); |
965 | } else { |
966 | dev_info(&mmc->class_dev, |
967 | "card is non-removable.\n" ); |
968 | } |
969 | set_bit(DW_MMC_CARD_PRESENT, addr: &slot->flags); |
970 | } |
971 | |
972 | return present; |
973 | } else if (gpio_cd >= 0) |
974 | present = gpio_cd; |
975 | else |
976 | present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) |
977 | == 0 ? 1 : 0; |
978 | |
979 | spin_lock_bh(lock: &host->lock); |
980 | if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, addr: &slot->flags)) |
981 | dev_dbg(&mmc->class_dev, "card is present\n" ); |
982 | else if (!present && |
983 | !test_and_clear_bit(DW_MMC_CARD_PRESENT, addr: &slot->flags)) |
984 | dev_dbg(&mmc->class_dev, "card is not present\n" ); |
985 | spin_unlock_bh(lock: &host->lock); |
986 | |
987 | return present; |
988 | } |
989 | |
990 | static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) |
991 | { |
992 | unsigned int blksz = data->blksz; |
993 | static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; |
994 | u32 fifo_width = 1 << host->data_shift; |
995 | u32 blksz_depth = blksz / fifo_width, fifoth_val; |
996 | u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; |
997 | int idx = ARRAY_SIZE(mszs) - 1; |
998 | |
999 | /* pio should ship this scenario */ |
1000 | if (!host->use_dma) |
1001 | return; |
1002 | |
1003 | tx_wmark = (host->fifo_depth) / 2; |
1004 | tx_wmark_invers = host->fifo_depth - tx_wmark; |
1005 | |
1006 | /* |
1007 | * MSIZE is '1', |
1008 | * if blksz is not a multiple of the FIFO width |
1009 | */ |
1010 | if (blksz % fifo_width) |
1011 | goto done; |
1012 | |
1013 | do { |
1014 | if (!((blksz_depth % mszs[idx]) || |
1015 | (tx_wmark_invers % mszs[idx]))) { |
1016 | msize = idx; |
1017 | rx_wmark = mszs[idx] - 1; |
1018 | break; |
1019 | } |
1020 | } while (--idx > 0); |
1021 | /* |
1022 | * If idx is '0', it won't be tried |
1023 | * Thus, initial values are uesed |
1024 | */ |
1025 | done: |
1026 | fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); |
1027 | mci_writel(host, FIFOTH, fifoth_val); |
1028 | } |
1029 | |
1030 | static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data) |
1031 | { |
1032 | unsigned int blksz = data->blksz; |
1033 | u32 blksz_depth, fifo_depth; |
1034 | u16 thld_size; |
1035 | u8 enable; |
1036 | |
1037 | /* |
1038 | * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is |
1039 | * in the FIFO region, so we really shouldn't access it). |
1040 | */ |
1041 | if (host->verid < DW_MMC_240A || |
1042 | (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE)) |
1043 | return; |
1044 | |
1045 | /* |
1046 | * Card write Threshold is introduced since 2.80a |
1047 | * It's used when HS400 mode is enabled. |
1048 | */ |
1049 | if (data->flags & MMC_DATA_WRITE && |
1050 | host->timing != MMC_TIMING_MMC_HS400) |
1051 | goto disable; |
1052 | |
1053 | if (data->flags & MMC_DATA_WRITE) |
1054 | enable = SDMMC_CARD_WR_THR_EN; |
1055 | else |
1056 | enable = SDMMC_CARD_RD_THR_EN; |
1057 | |
1058 | if (host->timing != MMC_TIMING_MMC_HS200 && |
1059 | host->timing != MMC_TIMING_UHS_SDR104 && |
1060 | host->timing != MMC_TIMING_MMC_HS400) |
1061 | goto disable; |
1062 | |
1063 | blksz_depth = blksz / (1 << host->data_shift); |
1064 | fifo_depth = host->fifo_depth; |
1065 | |
1066 | if (blksz_depth > fifo_depth) |
1067 | goto disable; |
1068 | |
1069 | /* |
1070 | * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' |
1071 | * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz |
1072 | * Currently just choose blksz. |
1073 | */ |
1074 | thld_size = blksz; |
1075 | mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable)); |
1076 | return; |
1077 | |
1078 | disable: |
1079 | mci_writel(host, CDTHRCTL, 0); |
1080 | } |
1081 | |
1082 | static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) |
1083 | { |
1084 | unsigned long irqflags; |
1085 | int sg_len; |
1086 | u32 temp; |
1087 | |
1088 | host->using_dma = 0; |
1089 | |
1090 | /* If we don't have a channel, we can't do DMA */ |
1091 | if (!host->use_dma) |
1092 | return -ENODEV; |
1093 | |
1094 | sg_len = dw_mci_pre_dma_transfer(host, data, cookie: COOKIE_MAPPED); |
1095 | if (sg_len < 0) { |
1096 | host->dma_ops->stop(host); |
1097 | return sg_len; |
1098 | } |
1099 | |
1100 | host->using_dma = 1; |
1101 | |
1102 | if (host->use_dma == TRANS_MODE_IDMAC) |
1103 | dev_vdbg(host->dev, |
1104 | "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n" , |
1105 | (unsigned long)host->sg_cpu, |
1106 | (unsigned long)host->sg_dma, |
1107 | sg_len); |
1108 | |
1109 | /* |
1110 | * Decide the MSIZE and RX/TX Watermark. |
1111 | * If current block size is same with previous size, |
1112 | * no need to update fifoth. |
1113 | */ |
1114 | if (host->prev_blksz != data->blksz) |
1115 | dw_mci_adjust_fifoth(host, data); |
1116 | |
1117 | /* Enable the DMA interface */ |
1118 | temp = mci_readl(host, CTRL); |
1119 | temp |= SDMMC_CTRL_DMA_ENABLE; |
1120 | mci_writel(host, CTRL, temp); |
1121 | |
1122 | /* Disable RX/TX IRQs, let DMA handle it */ |
1123 | spin_lock_irqsave(&host->irq_lock, irqflags); |
1124 | temp = mci_readl(host, INTMASK); |
1125 | temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); |
1126 | mci_writel(host, INTMASK, temp); |
1127 | spin_unlock_irqrestore(lock: &host->irq_lock, flags: irqflags); |
1128 | |
1129 | if (host->dma_ops->start(host, sg_len)) { |
1130 | host->dma_ops->stop(host); |
1131 | /* We can't do DMA, try PIO for this one */ |
1132 | dev_dbg(host->dev, |
1133 | "%s: fall back to PIO mode for current transfer\n" , |
1134 | __func__); |
1135 | return -ENODEV; |
1136 | } |
1137 | |
1138 | return 0; |
1139 | } |
1140 | |
1141 | static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) |
1142 | { |
1143 | unsigned long irqflags; |
1144 | int flags = SG_MITER_ATOMIC; |
1145 | u32 temp; |
1146 | |
1147 | data->error = -EINPROGRESS; |
1148 | |
1149 | WARN_ON(host->data); |
1150 | host->sg = NULL; |
1151 | host->data = data; |
1152 | |
1153 | if (data->flags & MMC_DATA_READ) |
1154 | host->dir_status = DW_MCI_RECV_STATUS; |
1155 | else |
1156 | host->dir_status = DW_MCI_SEND_STATUS; |
1157 | |
1158 | dw_mci_ctrl_thld(host, data); |
1159 | |
1160 | if (dw_mci_submit_data_dma(host, data)) { |
1161 | if (host->data->flags & MMC_DATA_READ) |
1162 | flags |= SG_MITER_TO_SG; |
1163 | else |
1164 | flags |= SG_MITER_FROM_SG; |
1165 | |
1166 | sg_miter_start(miter: &host->sg_miter, sgl: data->sg, nents: data->sg_len, flags); |
1167 | host->sg = data->sg; |
1168 | host->part_buf_start = 0; |
1169 | host->part_buf_count = 0; |
1170 | |
1171 | mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); |
1172 | |
1173 | spin_lock_irqsave(&host->irq_lock, irqflags); |
1174 | temp = mci_readl(host, INTMASK); |
1175 | temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; |
1176 | mci_writel(host, INTMASK, temp); |
1177 | spin_unlock_irqrestore(lock: &host->irq_lock, flags: irqflags); |
1178 | |
1179 | temp = mci_readl(host, CTRL); |
1180 | temp &= ~SDMMC_CTRL_DMA_ENABLE; |
1181 | mci_writel(host, CTRL, temp); |
1182 | |
1183 | /* |
1184 | * Use the initial fifoth_val for PIO mode. If wm_algined |
1185 | * is set, we set watermark same as data size. |
1186 | * If next issued data may be transfered by DMA mode, |
1187 | * prev_blksz should be invalidated. |
1188 | */ |
1189 | if (host->wm_aligned) |
1190 | dw_mci_adjust_fifoth(host, data); |
1191 | else |
1192 | mci_writel(host, FIFOTH, host->fifoth_val); |
1193 | host->prev_blksz = 0; |
1194 | } else { |
1195 | /* |
1196 | * Keep the current block size. |
1197 | * It will be used to decide whether to update |
1198 | * fifoth register next time. |
1199 | */ |
1200 | host->prev_blksz = data->blksz; |
1201 | } |
1202 | } |
1203 | |
1204 | static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) |
1205 | { |
1206 | struct dw_mci *host = slot->host; |
1207 | unsigned int clock = slot->clock; |
1208 | u32 div; |
1209 | u32 clk_en_a; |
1210 | u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT; |
1211 | |
1212 | /* We must continue to set bit 28 in CMD until the change is complete */ |
1213 | if (host->state == STATE_WAITING_CMD11_DONE) |
1214 | sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; |
1215 | |
1216 | slot->mmc->actual_clock = 0; |
1217 | |
1218 | if (!clock) { |
1219 | mci_writel(host, CLKENA, 0); |
1220 | mci_send_cmd(slot, cmd: sdmmc_cmd_bits, arg: 0); |
1221 | } else if (clock != host->current_speed || force_clkinit) { |
1222 | div = host->bus_hz / clock; |
1223 | if (host->bus_hz % clock && host->bus_hz > clock) |
1224 | /* |
1225 | * move the + 1 after the divide to prevent |
1226 | * over-clocking the card. |
1227 | */ |
1228 | div += 1; |
1229 | |
1230 | div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; |
1231 | |
1232 | if ((clock != slot->__clk_old && |
1233 | !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) || |
1234 | force_clkinit) { |
1235 | /* Silent the verbose log if calling from PM context */ |
1236 | if (!force_clkinit) |
1237 | dev_info(&slot->mmc->class_dev, |
1238 | "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n" , |
1239 | slot->id, host->bus_hz, clock, |
1240 | div ? ((host->bus_hz / div) >> 1) : |
1241 | host->bus_hz, div); |
1242 | |
1243 | /* |
1244 | * If card is polling, display the message only |
1245 | * one time at boot time. |
1246 | */ |
1247 | if (slot->mmc->caps & MMC_CAP_NEEDS_POLL && |
1248 | slot->mmc->f_min == clock) |
1249 | set_bit(DW_MMC_CARD_NEEDS_POLL, addr: &slot->flags); |
1250 | } |
1251 | |
1252 | /* disable clock */ |
1253 | mci_writel(host, CLKENA, 0); |
1254 | mci_writel(host, CLKSRC, 0); |
1255 | |
1256 | /* inform CIU */ |
1257 | mci_send_cmd(slot, cmd: sdmmc_cmd_bits, arg: 0); |
1258 | |
1259 | /* set clock to desired speed */ |
1260 | mci_writel(host, CLKDIV, div); |
1261 | |
1262 | /* inform CIU */ |
1263 | mci_send_cmd(slot, cmd: sdmmc_cmd_bits, arg: 0); |
1264 | |
1265 | /* enable clock; only low power if no SDIO */ |
1266 | clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; |
1267 | if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags)) |
1268 | clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; |
1269 | mci_writel(host, CLKENA, clk_en_a); |
1270 | |
1271 | /* inform CIU */ |
1272 | mci_send_cmd(slot, cmd: sdmmc_cmd_bits, arg: 0); |
1273 | |
1274 | /* keep the last clock value that was requested from core */ |
1275 | slot->__clk_old = clock; |
1276 | slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) : |
1277 | host->bus_hz; |
1278 | } |
1279 | |
1280 | host->current_speed = clock; |
1281 | |
1282 | /* Set the current slot bus width */ |
1283 | mci_writel(host, CTYPE, (slot->ctype << slot->id)); |
1284 | } |
1285 | |
1286 | static void dw_mci_set_data_timeout(struct dw_mci *host, |
1287 | unsigned int timeout_ns) |
1288 | { |
1289 | const struct dw_mci_drv_data *drv_data = host->drv_data; |
1290 | u32 clk_div, tmout; |
1291 | u64 tmp; |
1292 | |
1293 | if (drv_data && drv_data->set_data_timeout) |
1294 | return drv_data->set_data_timeout(host, timeout_ns); |
1295 | |
1296 | clk_div = (mci_readl(host, CLKDIV) & 0xFF) * 2; |
1297 | if (clk_div == 0) |
1298 | clk_div = 1; |
1299 | |
1300 | tmp = DIV_ROUND_UP_ULL((u64)timeout_ns * host->bus_hz, NSEC_PER_SEC); |
1301 | tmp = DIV_ROUND_UP_ULL(tmp, clk_div); |
1302 | |
1303 | /* TMOUT[7:0] (RESPONSE_TIMEOUT) */ |
1304 | tmout = 0xFF; /* Set maximum */ |
1305 | |
1306 | /* TMOUT[31:8] (DATA_TIMEOUT) */ |
1307 | if (!tmp || tmp > 0xFFFFFF) |
1308 | tmout |= (0xFFFFFF << 8); |
1309 | else |
1310 | tmout |= (tmp & 0xFFFFFF) << 8; |
1311 | |
1312 | mci_writel(host, TMOUT, tmout); |
1313 | dev_dbg(host->dev, "timeout_ns: %u => TMOUT[31:8]: %#08x" , |
1314 | timeout_ns, tmout >> 8); |
1315 | } |
1316 | |
1317 | static void __dw_mci_start_request(struct dw_mci *host, |
1318 | struct dw_mci_slot *slot, |
1319 | struct mmc_command *cmd) |
1320 | { |
1321 | struct mmc_request *mrq; |
1322 | struct mmc_data *data; |
1323 | u32 cmdflags; |
1324 | |
1325 | mrq = slot->mrq; |
1326 | |
1327 | host->mrq = mrq; |
1328 | |
1329 | host->pending_events = 0; |
1330 | host->completed_events = 0; |
1331 | host->cmd_status = 0; |
1332 | host->data_status = 0; |
1333 | host->dir_status = 0; |
1334 | |
1335 | data = cmd->data; |
1336 | if (data) { |
1337 | dw_mci_set_data_timeout(host, timeout_ns: data->timeout_ns); |
1338 | mci_writel(host, BYTCNT, data->blksz*data->blocks); |
1339 | mci_writel(host, BLKSIZ, data->blksz); |
1340 | } |
1341 | |
1342 | cmdflags = dw_mci_prepare_command(mmc: slot->mmc, cmd); |
1343 | |
1344 | /* this is the first command, send the initialization clock */ |
1345 | if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, addr: &slot->flags)) |
1346 | cmdflags |= SDMMC_CMD_INIT; |
1347 | |
1348 | if (data) { |
1349 | dw_mci_submit_data(host, data); |
1350 | wmb(); /* drain writebuffer */ |
1351 | } |
1352 | |
1353 | dw_mci_start_command(host, cmd, cmd_flags: cmdflags); |
1354 | |
1355 | if (cmd->opcode == SD_SWITCH_VOLTAGE) { |
1356 | unsigned long irqflags; |
1357 | |
1358 | /* |
1359 | * Databook says to fail after 2ms w/ no response, but evidence |
1360 | * shows that sometimes the cmd11 interrupt takes over 130ms. |
1361 | * We'll set to 500ms, plus an extra jiffy just in case jiffies |
1362 | * is just about to roll over. |
1363 | * |
1364 | * We do this whole thing under spinlock and only if the |
1365 | * command hasn't already completed (indicating the irq |
1366 | * already ran so we don't want the timeout). |
1367 | */ |
1368 | spin_lock_irqsave(&host->irq_lock, irqflags); |
1369 | if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) |
1370 | mod_timer(timer: &host->cmd11_timer, |
1371 | expires: jiffies + msecs_to_jiffies(m: 500) + 1); |
1372 | spin_unlock_irqrestore(lock: &host->irq_lock, flags: irqflags); |
1373 | } |
1374 | |
1375 | host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); |
1376 | } |
1377 | |
1378 | static void dw_mci_start_request(struct dw_mci *host, |
1379 | struct dw_mci_slot *slot) |
1380 | { |
1381 | struct mmc_request *mrq = slot->mrq; |
1382 | struct mmc_command *cmd; |
1383 | |
1384 | cmd = mrq->sbc ? mrq->sbc : mrq->cmd; |
1385 | __dw_mci_start_request(host, slot, cmd); |
1386 | } |
1387 | |
1388 | /* must be called with host->lock held */ |
1389 | static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, |
1390 | struct mmc_request *mrq) |
1391 | { |
1392 | dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n" , |
1393 | host->state); |
1394 | |
1395 | slot->mrq = mrq; |
1396 | |
1397 | if (host->state == STATE_WAITING_CMD11_DONE) { |
1398 | dev_warn(&slot->mmc->class_dev, |
1399 | "Voltage change didn't complete\n" ); |
1400 | /* |
1401 | * this case isn't expected to happen, so we can |
1402 | * either crash here or just try to continue on |
1403 | * in the closest possible state |
1404 | */ |
1405 | host->state = STATE_IDLE; |
1406 | } |
1407 | |
1408 | if (host->state == STATE_IDLE) { |
1409 | host->state = STATE_SENDING_CMD; |
1410 | dw_mci_start_request(host, slot); |
1411 | } else { |
1412 | list_add_tail(new: &slot->queue_node, head: &host->queue); |
1413 | } |
1414 | } |
1415 | |
1416 | static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) |
1417 | { |
1418 | struct dw_mci_slot *slot = mmc_priv(host: mmc); |
1419 | struct dw_mci *host = slot->host; |
1420 | |
1421 | WARN_ON(slot->mrq); |
1422 | |
1423 | /* |
1424 | * The check for card presence and queueing of the request must be |
1425 | * atomic, otherwise the card could be removed in between and the |
1426 | * request wouldn't fail until another card was inserted. |
1427 | */ |
1428 | |
1429 | if (!dw_mci_get_cd(mmc)) { |
1430 | mrq->cmd->error = -ENOMEDIUM; |
1431 | mmc_request_done(mmc, mrq); |
1432 | return; |
1433 | } |
1434 | |
1435 | spin_lock_bh(lock: &host->lock); |
1436 | |
1437 | dw_mci_queue_request(host, slot, mrq); |
1438 | |
1439 | spin_unlock_bh(lock: &host->lock); |
1440 | } |
1441 | |
1442 | static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
1443 | { |
1444 | struct dw_mci_slot *slot = mmc_priv(host: mmc); |
1445 | const struct dw_mci_drv_data *drv_data = slot->host->drv_data; |
1446 | u32 regs; |
1447 | int ret; |
1448 | |
1449 | switch (ios->bus_width) { |
1450 | case MMC_BUS_WIDTH_4: |
1451 | slot->ctype = SDMMC_CTYPE_4BIT; |
1452 | break; |
1453 | case MMC_BUS_WIDTH_8: |
1454 | slot->ctype = SDMMC_CTYPE_8BIT; |
1455 | break; |
1456 | default: |
1457 | /* set default 1 bit mode */ |
1458 | slot->ctype = SDMMC_CTYPE_1BIT; |
1459 | } |
1460 | |
1461 | regs = mci_readl(slot->host, UHS_REG); |
1462 | |
1463 | /* DDR mode set */ |
1464 | if (ios->timing == MMC_TIMING_MMC_DDR52 || |
1465 | ios->timing == MMC_TIMING_UHS_DDR50 || |
1466 | ios->timing == MMC_TIMING_MMC_HS400) |
1467 | regs |= ((0x1 << slot->id) << 16); |
1468 | else |
1469 | regs &= ~((0x1 << slot->id) << 16); |
1470 | |
1471 | mci_writel(slot->host, UHS_REG, regs); |
1472 | slot->host->timing = ios->timing; |
1473 | |
1474 | /* |
1475 | * Use mirror of ios->clock to prevent race with mmc |
1476 | * core ios update when finding the minimum. |
1477 | */ |
1478 | slot->clock = ios->clock; |
1479 | |
1480 | if (drv_data && drv_data->set_ios) |
1481 | drv_data->set_ios(slot->host, ios); |
1482 | |
1483 | switch (ios->power_mode) { |
1484 | case MMC_POWER_UP: |
1485 | if (!IS_ERR(ptr: mmc->supply.vmmc)) { |
1486 | ret = mmc_regulator_set_ocr(mmc, supply: mmc->supply.vmmc, |
1487 | vdd_bit: ios->vdd); |
1488 | if (ret) { |
1489 | dev_err(slot->host->dev, |
1490 | "failed to enable vmmc regulator\n" ); |
1491 | /*return, if failed turn on vmmc*/ |
1492 | return; |
1493 | } |
1494 | } |
1495 | set_bit(DW_MMC_CARD_NEED_INIT, addr: &slot->flags); |
1496 | regs = mci_readl(slot->host, PWREN); |
1497 | regs |= (1 << slot->id); |
1498 | mci_writel(slot->host, PWREN, regs); |
1499 | break; |
1500 | case MMC_POWER_ON: |
1501 | if (!slot->host->vqmmc_enabled) { |
1502 | if (!IS_ERR(ptr: mmc->supply.vqmmc)) { |
1503 | ret = regulator_enable(regulator: mmc->supply.vqmmc); |
1504 | if (ret < 0) |
1505 | dev_err(slot->host->dev, |
1506 | "failed to enable vqmmc\n" ); |
1507 | else |
1508 | slot->host->vqmmc_enabled = true; |
1509 | |
1510 | } else { |
1511 | /* Keep track so we don't reset again */ |
1512 | slot->host->vqmmc_enabled = true; |
1513 | } |
1514 | |
1515 | /* Reset our state machine after powering on */ |
1516 | dw_mci_ctrl_reset(host: slot->host, |
1517 | SDMMC_CTRL_ALL_RESET_FLAGS); |
1518 | } |
1519 | |
1520 | /* Adjust clock / bus width after power is up */ |
1521 | dw_mci_setup_bus(slot, force_clkinit: false); |
1522 | |
1523 | break; |
1524 | case MMC_POWER_OFF: |
1525 | /* Turn clock off before power goes down */ |
1526 | dw_mci_setup_bus(slot, force_clkinit: false); |
1527 | |
1528 | if (!IS_ERR(ptr: mmc->supply.vmmc)) |
1529 | mmc_regulator_set_ocr(mmc, supply: mmc->supply.vmmc, vdd_bit: 0); |
1530 | |
1531 | if (!IS_ERR(ptr: mmc->supply.vqmmc) && slot->host->vqmmc_enabled) |
1532 | regulator_disable(regulator: mmc->supply.vqmmc); |
1533 | slot->host->vqmmc_enabled = false; |
1534 | |
1535 | regs = mci_readl(slot->host, PWREN); |
1536 | regs &= ~(1 << slot->id); |
1537 | mci_writel(slot->host, PWREN, regs); |
1538 | break; |
1539 | default: |
1540 | break; |
1541 | } |
1542 | |
1543 | if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0) |
1544 | slot->host->state = STATE_IDLE; |
1545 | } |
1546 | |
1547 | static int dw_mci_card_busy(struct mmc_host *mmc) |
1548 | { |
1549 | struct dw_mci_slot *slot = mmc_priv(host: mmc); |
1550 | u32 status; |
1551 | |
1552 | /* |
1553 | * Check the busy bit which is low when DAT[3:0] |
1554 | * (the data lines) are 0000 |
1555 | */ |
1556 | status = mci_readl(slot->host, STATUS); |
1557 | |
1558 | return !!(status & SDMMC_STATUS_BUSY); |
1559 | } |
1560 | |
1561 | static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) |
1562 | { |
1563 | struct dw_mci_slot *slot = mmc_priv(host: mmc); |
1564 | struct dw_mci *host = slot->host; |
1565 | const struct dw_mci_drv_data *drv_data = host->drv_data; |
1566 | u32 uhs; |
1567 | u32 v18 = SDMMC_UHS_18V << slot->id; |
1568 | int ret; |
1569 | |
1570 | if (drv_data && drv_data->switch_voltage) |
1571 | return drv_data->switch_voltage(mmc, ios); |
1572 | |
1573 | /* |
1574 | * Program the voltage. Note that some instances of dw_mmc may use |
1575 | * the UHS_REG for this. For other instances (like exynos) the UHS_REG |
1576 | * does no harm but you need to set the regulator directly. Try both. |
1577 | */ |
1578 | uhs = mci_readl(host, UHS_REG); |
1579 | if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) |
1580 | uhs &= ~v18; |
1581 | else |
1582 | uhs |= v18; |
1583 | |
1584 | if (!IS_ERR(ptr: mmc->supply.vqmmc)) { |
1585 | ret = mmc_regulator_set_vqmmc(mmc, ios); |
1586 | if (ret < 0) { |
1587 | dev_dbg(&mmc->class_dev, |
1588 | "Regulator set error %d - %s V\n" , |
1589 | ret, uhs & v18 ? "1.8" : "3.3" ); |
1590 | return ret; |
1591 | } |
1592 | } |
1593 | mci_writel(host, UHS_REG, uhs); |
1594 | |
1595 | return 0; |
1596 | } |
1597 | |
1598 | static int dw_mci_get_ro(struct mmc_host *mmc) |
1599 | { |
1600 | int read_only; |
1601 | struct dw_mci_slot *slot = mmc_priv(host: mmc); |
1602 | int gpio_ro = mmc_gpio_get_ro(host: mmc); |
1603 | |
1604 | /* Use platform get_ro function, else try on board write protect */ |
1605 | if (gpio_ro >= 0) |
1606 | read_only = gpio_ro; |
1607 | else |
1608 | read_only = |
1609 | mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; |
1610 | |
1611 | dev_dbg(&mmc->class_dev, "card is %s\n" , |
1612 | read_only ? "read-only" : "read-write" ); |
1613 | |
1614 | return read_only; |
1615 | } |
1616 | |
1617 | static void dw_mci_hw_reset(struct mmc_host *mmc) |
1618 | { |
1619 | struct dw_mci_slot *slot = mmc_priv(host: mmc); |
1620 | struct dw_mci *host = slot->host; |
1621 | int reset; |
1622 | |
1623 | if (host->use_dma == TRANS_MODE_IDMAC) |
1624 | dw_mci_idmac_reset(host); |
1625 | |
1626 | if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET | |
1627 | SDMMC_CTRL_FIFO_RESET)) |
1628 | return; |
1629 | |
1630 | /* |
1631 | * According to eMMC spec, card reset procedure: |
1632 | * tRstW >= 1us: RST_n pulse width |
1633 | * tRSCA >= 200us: RST_n to Command time |
1634 | * tRSTH >= 1us: RST_n high period |
1635 | */ |
1636 | reset = mci_readl(host, RST_N); |
1637 | reset &= ~(SDMMC_RST_HWACTIVE << slot->id); |
1638 | mci_writel(host, RST_N, reset); |
1639 | usleep_range(min: 1, max: 2); |
1640 | reset |= SDMMC_RST_HWACTIVE << slot->id; |
1641 | mci_writel(host, RST_N, reset); |
1642 | usleep_range(min: 200, max: 300); |
1643 | } |
1644 | |
1645 | static void dw_mci_prepare_sdio_irq(struct dw_mci_slot *slot, bool prepare) |
1646 | { |
1647 | struct dw_mci *host = slot->host; |
1648 | const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; |
1649 | u32 clk_en_a_old; |
1650 | u32 clk_en_a; |
1651 | |
1652 | /* |
1653 | * Low power mode will stop the card clock when idle. According to the |
1654 | * description of the CLKENA register we should disable low power mode |
1655 | * for SDIO cards if we need SDIO interrupts to work. |
1656 | */ |
1657 | |
1658 | clk_en_a_old = mci_readl(host, CLKENA); |
1659 | if (prepare) { |
1660 | set_bit(DW_MMC_CARD_NO_LOW_PWR, addr: &slot->flags); |
1661 | clk_en_a = clk_en_a_old & ~clken_low_pwr; |
1662 | } else { |
1663 | clear_bit(DW_MMC_CARD_NO_LOW_PWR, addr: &slot->flags); |
1664 | clk_en_a = clk_en_a_old | clken_low_pwr; |
1665 | } |
1666 | |
1667 | if (clk_en_a != clk_en_a_old) { |
1668 | mci_writel(host, CLKENA, clk_en_a); |
1669 | mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, |
1670 | arg: 0); |
1671 | } |
1672 | } |
1673 | |
1674 | static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb) |
1675 | { |
1676 | struct dw_mci *host = slot->host; |
1677 | unsigned long irqflags; |
1678 | u32 int_mask; |
1679 | |
1680 | spin_lock_irqsave(&host->irq_lock, irqflags); |
1681 | |
1682 | /* Enable/disable Slot Specific SDIO interrupt */ |
1683 | int_mask = mci_readl(host, INTMASK); |
1684 | if (enb) |
1685 | int_mask |= SDMMC_INT_SDIO(slot->sdio_id); |
1686 | else |
1687 | int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id); |
1688 | mci_writel(host, INTMASK, int_mask); |
1689 | |
1690 | spin_unlock_irqrestore(lock: &host->irq_lock, flags: irqflags); |
1691 | } |
1692 | |
1693 | static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) |
1694 | { |
1695 | struct dw_mci_slot *slot = mmc_priv(host: mmc); |
1696 | struct dw_mci *host = slot->host; |
1697 | |
1698 | dw_mci_prepare_sdio_irq(slot, prepare: enb); |
1699 | __dw_mci_enable_sdio_irq(slot, enb); |
1700 | |
1701 | /* Avoid runtime suspending the device when SDIO IRQ is enabled */ |
1702 | if (enb) |
1703 | pm_runtime_get_noresume(dev: host->dev); |
1704 | else |
1705 | pm_runtime_put_noidle(dev: host->dev); |
1706 | } |
1707 | |
1708 | static void dw_mci_ack_sdio_irq(struct mmc_host *mmc) |
1709 | { |
1710 | struct dw_mci_slot *slot = mmc_priv(host: mmc); |
1711 | |
1712 | __dw_mci_enable_sdio_irq(slot, enb: 1); |
1713 | } |
1714 | |
1715 | static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) |
1716 | { |
1717 | struct dw_mci_slot *slot = mmc_priv(host: mmc); |
1718 | struct dw_mci *host = slot->host; |
1719 | const struct dw_mci_drv_data *drv_data = host->drv_data; |
1720 | int err = -EINVAL; |
1721 | |
1722 | if (drv_data && drv_data->execute_tuning) |
1723 | err = drv_data->execute_tuning(slot, opcode); |
1724 | return err; |
1725 | } |
1726 | |
1727 | static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, |
1728 | struct mmc_ios *ios) |
1729 | { |
1730 | struct dw_mci_slot *slot = mmc_priv(host: mmc); |
1731 | struct dw_mci *host = slot->host; |
1732 | const struct dw_mci_drv_data *drv_data = host->drv_data; |
1733 | |
1734 | if (drv_data && drv_data->prepare_hs400_tuning) |
1735 | return drv_data->prepare_hs400_tuning(host, ios); |
1736 | |
1737 | return 0; |
1738 | } |
1739 | |
1740 | static bool dw_mci_reset(struct dw_mci *host) |
1741 | { |
1742 | u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET; |
1743 | bool ret = false; |
1744 | u32 status = 0; |
1745 | |
1746 | /* |
1747 | * Resetting generates a block interrupt, hence setting |
1748 | * the scatter-gather pointer to NULL. |
1749 | */ |
1750 | if (host->sg) { |
1751 | sg_miter_stop(miter: &host->sg_miter); |
1752 | host->sg = NULL; |
1753 | } |
1754 | |
1755 | if (host->use_dma) |
1756 | flags |= SDMMC_CTRL_DMA_RESET; |
1757 | |
1758 | if (dw_mci_ctrl_reset(host, reset: flags)) { |
1759 | /* |
1760 | * In all cases we clear the RAWINTS |
1761 | * register to clear any interrupts. |
1762 | */ |
1763 | mci_writel(host, RINTSTS, 0xFFFFFFFF); |
1764 | |
1765 | if (!host->use_dma) { |
1766 | ret = true; |
1767 | goto ciu_out; |
1768 | } |
1769 | |
1770 | /* Wait for dma_req to be cleared */ |
1771 | if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, |
1772 | status, |
1773 | !(status & SDMMC_STATUS_DMA_REQ), |
1774 | 1, 500 * USEC_PER_MSEC)) { |
1775 | dev_err(host->dev, |
1776 | "%s: Timeout waiting for dma_req to be cleared\n" , |
1777 | __func__); |
1778 | goto ciu_out; |
1779 | } |
1780 | |
1781 | /* when using DMA next we reset the fifo again */ |
1782 | if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET)) |
1783 | goto ciu_out; |
1784 | } else { |
1785 | /* if the controller reset bit did clear, then set clock regs */ |
1786 | if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) { |
1787 | dev_err(host->dev, |
1788 | "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n" , |
1789 | __func__); |
1790 | goto ciu_out; |
1791 | } |
1792 | } |
1793 | |
1794 | if (host->use_dma == TRANS_MODE_IDMAC) |
1795 | /* It is also required that we reinit idmac */ |
1796 | dw_mci_idmac_init(host); |
1797 | |
1798 | ret = true; |
1799 | |
1800 | ciu_out: |
1801 | /* After a CTRL reset we need to have CIU set clock registers */ |
1802 | mci_send_cmd(slot: host->slot, SDMMC_CMD_UPD_CLK, arg: 0); |
1803 | |
1804 | return ret; |
1805 | } |
1806 | |
1807 | static const struct mmc_host_ops dw_mci_ops = { |
1808 | .request = dw_mci_request, |
1809 | .pre_req = dw_mci_pre_req, |
1810 | .post_req = dw_mci_post_req, |
1811 | .set_ios = dw_mci_set_ios, |
1812 | .get_ro = dw_mci_get_ro, |
1813 | .get_cd = dw_mci_get_cd, |
1814 | .card_hw_reset = dw_mci_hw_reset, |
1815 | .enable_sdio_irq = dw_mci_enable_sdio_irq, |
1816 | .ack_sdio_irq = dw_mci_ack_sdio_irq, |
1817 | .execute_tuning = dw_mci_execute_tuning, |
1818 | .card_busy = dw_mci_card_busy, |
1819 | .start_signal_voltage_switch = dw_mci_switch_voltage, |
1820 | .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning, |
1821 | }; |
1822 | |
1823 | #ifdef CONFIG_FAULT_INJECTION |
1824 | static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t) |
1825 | { |
1826 | struct dw_mci *host = container_of(t, struct dw_mci, fault_timer); |
1827 | unsigned long flags; |
1828 | |
1829 | spin_lock_irqsave(&host->irq_lock, flags); |
1830 | |
1831 | /* |
1832 | * Only inject an error if we haven't already got an error or data over |
1833 | * interrupt. |
1834 | */ |
1835 | if (!host->data_status) { |
1836 | host->data_status = SDMMC_INT_DCRC; |
1837 | set_bit(nr: EVENT_DATA_ERROR, addr: &host->pending_events); |
1838 | tasklet_schedule(t: &host->tasklet); |
1839 | } |
1840 | |
1841 | spin_unlock_irqrestore(lock: &host->irq_lock, flags); |
1842 | |
1843 | return HRTIMER_NORESTART; |
1844 | } |
1845 | |
1846 | static void dw_mci_start_fault_timer(struct dw_mci *host) |
1847 | { |
1848 | struct mmc_data *data = host->data; |
1849 | |
1850 | if (!data || data->blocks <= 1) |
1851 | return; |
1852 | |
1853 | if (!should_fail(attr: &host->fail_data_crc, size: 1)) |
1854 | return; |
1855 | |
1856 | /* |
1857 | * Try to inject the error at random points during the data transfer. |
1858 | */ |
1859 | hrtimer_start(timer: &host->fault_timer, |
1860 | tim: ms_to_ktime(ms: get_random_u32_below(ceil: 25)), |
1861 | mode: HRTIMER_MODE_REL); |
1862 | } |
1863 | |
1864 | static void dw_mci_stop_fault_timer(struct dw_mci *host) |
1865 | { |
1866 | hrtimer_cancel(timer: &host->fault_timer); |
1867 | } |
1868 | |
1869 | static void dw_mci_init_fault(struct dw_mci *host) |
1870 | { |
1871 | host->fail_data_crc = (struct fault_attr) FAULT_ATTR_INITIALIZER; |
1872 | |
1873 | hrtimer_init(timer: &host->fault_timer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL); |
1874 | host->fault_timer.function = dw_mci_fault_timer; |
1875 | } |
1876 | #else |
1877 | static void dw_mci_init_fault(struct dw_mci *host) |
1878 | { |
1879 | } |
1880 | |
1881 | static void dw_mci_start_fault_timer(struct dw_mci *host) |
1882 | { |
1883 | } |
1884 | |
1885 | static void dw_mci_stop_fault_timer(struct dw_mci *host) |
1886 | { |
1887 | } |
1888 | #endif |
1889 | |
1890 | static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) |
1891 | __releases(&host->lock) |
1892 | __acquires(&host->lock) |
1893 | { |
1894 | struct dw_mci_slot *slot; |
1895 | struct mmc_host *prev_mmc = host->slot->mmc; |
1896 | |
1897 | WARN_ON(host->cmd || host->data); |
1898 | |
1899 | host->slot->mrq = NULL; |
1900 | host->mrq = NULL; |
1901 | if (!list_empty(head: &host->queue)) { |
1902 | slot = list_entry(host->queue.next, |
1903 | struct dw_mci_slot, queue_node); |
1904 | list_del(entry: &slot->queue_node); |
1905 | dev_vdbg(host->dev, "list not empty: %s is next\n" , |
1906 | mmc_hostname(slot->mmc)); |
1907 | host->state = STATE_SENDING_CMD; |
1908 | dw_mci_start_request(host, slot); |
1909 | } else { |
1910 | dev_vdbg(host->dev, "list empty\n" ); |
1911 | |
1912 | if (host->state == STATE_SENDING_CMD11) |
1913 | host->state = STATE_WAITING_CMD11_DONE; |
1914 | else |
1915 | host->state = STATE_IDLE; |
1916 | } |
1917 | |
1918 | spin_unlock(lock: &host->lock); |
1919 | mmc_request_done(prev_mmc, mrq); |
1920 | spin_lock(lock: &host->lock); |
1921 | } |
1922 | |
1923 | static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) |
1924 | { |
1925 | u32 status = host->cmd_status; |
1926 | |
1927 | host->cmd_status = 0; |
1928 | |
1929 | /* Read the response from the card (up to 16 bytes) */ |
1930 | if (cmd->flags & MMC_RSP_PRESENT) { |
1931 | if (cmd->flags & MMC_RSP_136) { |
1932 | cmd->resp[3] = mci_readl(host, RESP0); |
1933 | cmd->resp[2] = mci_readl(host, RESP1); |
1934 | cmd->resp[1] = mci_readl(host, RESP2); |
1935 | cmd->resp[0] = mci_readl(host, RESP3); |
1936 | } else { |
1937 | cmd->resp[0] = mci_readl(host, RESP0); |
1938 | cmd->resp[1] = 0; |
1939 | cmd->resp[2] = 0; |
1940 | cmd->resp[3] = 0; |
1941 | } |
1942 | } |
1943 | |
1944 | if (status & SDMMC_INT_RTO) |
1945 | cmd->error = -ETIMEDOUT; |
1946 | else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) |
1947 | cmd->error = -EILSEQ; |
1948 | else if (status & SDMMC_INT_RESP_ERR) |
1949 | cmd->error = -EIO; |
1950 | else |
1951 | cmd->error = 0; |
1952 | |
1953 | return cmd->error; |
1954 | } |
1955 | |
1956 | static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) |
1957 | { |
1958 | u32 status = host->data_status; |
1959 | |
1960 | if (status & DW_MCI_DATA_ERROR_FLAGS) { |
1961 | if (status & SDMMC_INT_DRTO) { |
1962 | data->error = -ETIMEDOUT; |
1963 | } else if (status & SDMMC_INT_DCRC) { |
1964 | data->error = -EILSEQ; |
1965 | } else if (status & SDMMC_INT_EBE) { |
1966 | if (host->dir_status == |
1967 | DW_MCI_SEND_STATUS) { |
1968 | /* |
1969 | * No data CRC status was returned. |
1970 | * The number of bytes transferred |
1971 | * will be exaggerated in PIO mode. |
1972 | */ |
1973 | data->bytes_xfered = 0; |
1974 | data->error = -ETIMEDOUT; |
1975 | } else if (host->dir_status == |
1976 | DW_MCI_RECV_STATUS) { |
1977 | data->error = -EILSEQ; |
1978 | } |
1979 | } else { |
1980 | /* SDMMC_INT_SBE is included */ |
1981 | data->error = -EILSEQ; |
1982 | } |
1983 | |
1984 | dev_dbg(host->dev, "data error, status 0x%08x\n" , status); |
1985 | |
1986 | /* |
1987 | * After an error, there may be data lingering |
1988 | * in the FIFO |
1989 | */ |
1990 | dw_mci_reset(host); |
1991 | } else { |
1992 | data->bytes_xfered = data->blocks * data->blksz; |
1993 | data->error = 0; |
1994 | } |
1995 | |
1996 | return data->error; |
1997 | } |
1998 | |
1999 | static void dw_mci_set_drto(struct dw_mci *host) |
2000 | { |
2001 | const struct dw_mci_drv_data *drv_data = host->drv_data; |
2002 | unsigned int drto_clks; |
2003 | unsigned int drto_div; |
2004 | unsigned int drto_ms; |
2005 | unsigned long irqflags; |
2006 | |
2007 | if (drv_data && drv_data->get_drto_clks) |
2008 | drto_clks = drv_data->get_drto_clks(host); |
2009 | else |
2010 | drto_clks = mci_readl(host, TMOUT) >> 8; |
2011 | drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; |
2012 | if (drto_div == 0) |
2013 | drto_div = 1; |
2014 | |
2015 | drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div, |
2016 | host->bus_hz); |
2017 | |
2018 | dev_dbg(host->dev, "drto_ms: %u\n" , drto_ms); |
2019 | |
2020 | /* add a bit spare time */ |
2021 | drto_ms += 10; |
2022 | |
2023 | spin_lock_irqsave(&host->irq_lock, irqflags); |
2024 | if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) |
2025 | mod_timer(timer: &host->dto_timer, |
2026 | expires: jiffies + msecs_to_jiffies(m: drto_ms)); |
2027 | spin_unlock_irqrestore(lock: &host->irq_lock, flags: irqflags); |
2028 | } |
2029 | |
2030 | static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host) |
2031 | { |
2032 | if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) |
2033 | return false; |
2034 | |
2035 | /* |
2036 | * Really be certain that the timer has stopped. This is a bit of |
2037 | * paranoia and could only really happen if we had really bad |
2038 | * interrupt latency and the interrupt routine and timeout were |
2039 | * running concurrently so that the del_timer() in the interrupt |
2040 | * handler couldn't run. |
2041 | */ |
2042 | WARN_ON(del_timer_sync(&host->cto_timer)); |
2043 | clear_bit(nr: EVENT_CMD_COMPLETE, addr: &host->pending_events); |
2044 | |
2045 | return true; |
2046 | } |
2047 | |
2048 | static bool dw_mci_clear_pending_data_complete(struct dw_mci *host) |
2049 | { |
2050 | if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) |
2051 | return false; |
2052 | |
2053 | /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */ |
2054 | WARN_ON(del_timer_sync(&host->dto_timer)); |
2055 | clear_bit(nr: EVENT_DATA_COMPLETE, addr: &host->pending_events); |
2056 | |
2057 | return true; |
2058 | } |
2059 | |
2060 | static void dw_mci_tasklet_func(struct tasklet_struct *t) |
2061 | { |
2062 | struct dw_mci *host = from_tasklet(host, t, tasklet); |
2063 | struct mmc_data *data; |
2064 | struct mmc_command *cmd; |
2065 | struct mmc_request *mrq; |
2066 | enum dw_mci_state state; |
2067 | enum dw_mci_state prev_state; |
2068 | unsigned int err; |
2069 | |
2070 | spin_lock(lock: &host->lock); |
2071 | |
2072 | state = host->state; |
2073 | data = host->data; |
2074 | mrq = host->mrq; |
2075 | |
2076 | do { |
2077 | prev_state = state; |
2078 | |
2079 | switch (state) { |
2080 | case STATE_IDLE: |
2081 | case STATE_WAITING_CMD11_DONE: |
2082 | break; |
2083 | |
2084 | case STATE_SENDING_CMD11: |
2085 | case STATE_SENDING_CMD: |
2086 | if (!dw_mci_clear_pending_cmd_complete(host)) |
2087 | break; |
2088 | |
2089 | cmd = host->cmd; |
2090 | host->cmd = NULL; |
2091 | set_bit(nr: EVENT_CMD_COMPLETE, addr: &host->completed_events); |
2092 | err = dw_mci_command_complete(host, cmd); |
2093 | if (cmd == mrq->sbc && !err) { |
2094 | __dw_mci_start_request(host, slot: host->slot, |
2095 | cmd: mrq->cmd); |
2096 | goto unlock; |
2097 | } |
2098 | |
2099 | if (cmd->data && err) { |
2100 | /* |
2101 | * During UHS tuning sequence, sending the stop |
2102 | * command after the response CRC error would |
2103 | * throw the system into a confused state |
2104 | * causing all future tuning phases to report |
2105 | * failure. |
2106 | * |
2107 | * In such case controller will move into a data |
2108 | * transfer state after a response error or |
2109 | * response CRC error. Let's let that finish |
2110 | * before trying to send a stop, so we'll go to |
2111 | * STATE_SENDING_DATA. |
2112 | * |
2113 | * Although letting the data transfer take place |
2114 | * will waste a bit of time (we already know |
2115 | * the command was bad), it can't cause any |
2116 | * errors since it's possible it would have |
2117 | * taken place anyway if this tasklet got |
2118 | * delayed. Allowing the transfer to take place |
2119 | * avoids races and keeps things simple. |
2120 | */ |
2121 | if (err != -ETIMEDOUT && |
2122 | host->dir_status == DW_MCI_RECV_STATUS) { |
2123 | state = STATE_SENDING_DATA; |
2124 | continue; |
2125 | } |
2126 | |
2127 | send_stop_abort(host, data); |
2128 | dw_mci_stop_dma(host); |
2129 | state = STATE_SENDING_STOP; |
2130 | break; |
2131 | } |
2132 | |
2133 | if (!cmd->data || err) { |
2134 | dw_mci_request_end(host, mrq); |
2135 | goto unlock; |
2136 | } |
2137 | |
2138 | prev_state = state = STATE_SENDING_DATA; |
2139 | fallthrough; |
2140 | |
2141 | case STATE_SENDING_DATA: |
2142 | /* |
2143 | * We could get a data error and never a transfer |
2144 | * complete so we'd better check for it here. |
2145 | * |
2146 | * Note that we don't really care if we also got a |
2147 | * transfer complete; stopping the DMA and sending an |
2148 | * abort won't hurt. |
2149 | */ |
2150 | if (test_and_clear_bit(nr: EVENT_DATA_ERROR, |
2151 | addr: &host->pending_events)) { |
2152 | if (!(host->data_status & (SDMMC_INT_DRTO | |
2153 | SDMMC_INT_EBE))) |
2154 | send_stop_abort(host, data); |
2155 | dw_mci_stop_dma(host); |
2156 | state = STATE_DATA_ERROR; |
2157 | break; |
2158 | } |
2159 | |
2160 | if (!test_and_clear_bit(nr: EVENT_XFER_COMPLETE, |
2161 | addr: &host->pending_events)) { |
2162 | /* |
2163 | * If all data-related interrupts don't come |
2164 | * within the given time in reading data state. |
2165 | */ |
2166 | if (host->dir_status == DW_MCI_RECV_STATUS) |
2167 | dw_mci_set_drto(host); |
2168 | break; |
2169 | } |
2170 | |
2171 | set_bit(nr: EVENT_XFER_COMPLETE, addr: &host->completed_events); |
2172 | |
2173 | /* |
2174 | * Handle an EVENT_DATA_ERROR that might have shown up |
2175 | * before the transfer completed. This might not have |
2176 | * been caught by the check above because the interrupt |
2177 | * could have gone off between the previous check and |
2178 | * the check for transfer complete. |
2179 | * |
2180 | * Technically this ought not be needed assuming we |
2181 | * get a DATA_COMPLETE eventually (we'll notice the |
2182 | * error and end the request), but it shouldn't hurt. |
2183 | * |
2184 | * This has the advantage of sending the stop command. |
2185 | */ |
2186 | if (test_and_clear_bit(nr: EVENT_DATA_ERROR, |
2187 | addr: &host->pending_events)) { |
2188 | if (!(host->data_status & (SDMMC_INT_DRTO | |
2189 | SDMMC_INT_EBE))) |
2190 | send_stop_abort(host, data); |
2191 | dw_mci_stop_dma(host); |
2192 | state = STATE_DATA_ERROR; |
2193 | break; |
2194 | } |
2195 | prev_state = state = STATE_DATA_BUSY; |
2196 | |
2197 | fallthrough; |
2198 | |
2199 | case STATE_DATA_BUSY: |
2200 | if (!dw_mci_clear_pending_data_complete(host)) { |
2201 | /* |
2202 | * If data error interrupt comes but data over |
2203 | * interrupt doesn't come within the given time. |
2204 | * in reading data state. |
2205 | */ |
2206 | if (host->dir_status == DW_MCI_RECV_STATUS) |
2207 | dw_mci_set_drto(host); |
2208 | break; |
2209 | } |
2210 | |
2211 | dw_mci_stop_fault_timer(host); |
2212 | host->data = NULL; |
2213 | set_bit(nr: EVENT_DATA_COMPLETE, addr: &host->completed_events); |
2214 | err = dw_mci_data_complete(host, data); |
2215 | |
2216 | if (!err) { |
2217 | if (!data->stop || mrq->sbc) { |
2218 | if (mrq->sbc && data->stop) |
2219 | data->stop->error = 0; |
2220 | dw_mci_request_end(host, mrq); |
2221 | goto unlock; |
2222 | } |
2223 | |
2224 | /* stop command for open-ended transfer*/ |
2225 | if (data->stop) |
2226 | send_stop_abort(host, data); |
2227 | } else { |
2228 | /* |
2229 | * If we don't have a command complete now we'll |
2230 | * never get one since we just reset everything; |
2231 | * better end the request. |
2232 | * |
2233 | * If we do have a command complete we'll fall |
2234 | * through to the SENDING_STOP command and |
2235 | * everything will be peachy keen. |
2236 | */ |
2237 | if (!test_bit(EVENT_CMD_COMPLETE, |
2238 | &host->pending_events)) { |
2239 | host->cmd = NULL; |
2240 | dw_mci_request_end(host, mrq); |
2241 | goto unlock; |
2242 | } |
2243 | } |
2244 | |
2245 | /* |
2246 | * If err has non-zero, |
2247 | * stop-abort command has been already issued. |
2248 | */ |
2249 | prev_state = state = STATE_SENDING_STOP; |
2250 | |
2251 | fallthrough; |
2252 | |
2253 | case STATE_SENDING_STOP: |
2254 | if (!dw_mci_clear_pending_cmd_complete(host)) |
2255 | break; |
2256 | |
2257 | /* CMD error in data command */ |
2258 | if (mrq->cmd->error && mrq->data) |
2259 | dw_mci_reset(host); |
2260 | |
2261 | dw_mci_stop_fault_timer(host); |
2262 | host->cmd = NULL; |
2263 | host->data = NULL; |
2264 | |
2265 | if (!mrq->sbc && mrq->stop) |
2266 | dw_mci_command_complete(host, cmd: mrq->stop); |
2267 | else |
2268 | host->cmd_status = 0; |
2269 | |
2270 | dw_mci_request_end(host, mrq); |
2271 | goto unlock; |
2272 | |
2273 | case STATE_DATA_ERROR: |
2274 | if (!test_and_clear_bit(nr: EVENT_XFER_COMPLETE, |
2275 | addr: &host->pending_events)) |
2276 | break; |
2277 | |
2278 | state = STATE_DATA_BUSY; |
2279 | break; |
2280 | } |
2281 | } while (state != prev_state); |
2282 | |
2283 | host->state = state; |
2284 | unlock: |
2285 | spin_unlock(lock: &host->lock); |
2286 | |
2287 | } |
2288 | |
2289 | /* push final bytes to part_buf, only use during push */ |
2290 | static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) |
2291 | { |
2292 | memcpy((void *)&host->part_buf, buf, cnt); |
2293 | host->part_buf_count = cnt; |
2294 | } |
2295 | |
2296 | /* append bytes to part_buf, only use during push */ |
2297 | static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) |
2298 | { |
2299 | cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); |
2300 | memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); |
2301 | host->part_buf_count += cnt; |
2302 | return cnt; |
2303 | } |
2304 | |
2305 | /* pull first bytes from part_buf, only use during pull */ |
2306 | static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) |
2307 | { |
2308 | cnt = min_t(int, cnt, host->part_buf_count); |
2309 | if (cnt) { |
2310 | memcpy(buf, (void *)&host->part_buf + host->part_buf_start, |
2311 | cnt); |
2312 | host->part_buf_count -= cnt; |
2313 | host->part_buf_start += cnt; |
2314 | } |
2315 | return cnt; |
2316 | } |
2317 | |
2318 | /* pull final bytes from the part_buf, assuming it's just been filled */ |
2319 | static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) |
2320 | { |
2321 | memcpy(buf, &host->part_buf, cnt); |
2322 | host->part_buf_start = cnt; |
2323 | host->part_buf_count = (1 << host->data_shift) - cnt; |
2324 | } |
2325 | |
2326 | static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) |
2327 | { |
2328 | struct mmc_data *data = host->data; |
2329 | int init_cnt = cnt; |
2330 | |
2331 | /* try and push anything in the part_buf */ |
2332 | if (unlikely(host->part_buf_count)) { |
2333 | int len = dw_mci_push_part_bytes(host, buf, cnt); |
2334 | |
2335 | buf += len; |
2336 | cnt -= len; |
2337 | if (host->part_buf_count == 2) { |
2338 | mci_fifo_writew(host->fifo_reg, host->part_buf16); |
2339 | host->part_buf_count = 0; |
2340 | } |
2341 | } |
2342 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
2343 | if (unlikely((unsigned long)buf & 0x1)) { |
2344 | while (cnt >= 2) { |
2345 | u16 aligned_buf[64]; |
2346 | int len = min(cnt & -2, (int)sizeof(aligned_buf)); |
2347 | int items = len >> 1; |
2348 | int i; |
2349 | /* memcpy from input buffer into aligned buffer */ |
2350 | memcpy(aligned_buf, buf, len); |
2351 | buf += len; |
2352 | cnt -= len; |
2353 | /* push data from aligned buffer into fifo */ |
2354 | for (i = 0; i < items; ++i) |
2355 | mci_fifo_writew(host->fifo_reg, aligned_buf[i]); |
2356 | } |
2357 | } else |
2358 | #endif |
2359 | { |
2360 | u16 *pdata = buf; |
2361 | |
2362 | for (; cnt >= 2; cnt -= 2) |
2363 | mci_fifo_writew(host->fifo_reg, *pdata++); |
2364 | buf = pdata; |
2365 | } |
2366 | /* put anything remaining in the part_buf */ |
2367 | if (cnt) { |
2368 | dw_mci_set_part_bytes(host, buf, cnt); |
2369 | /* Push data if we have reached the expected data length */ |
2370 | if ((data->bytes_xfered + init_cnt) == |
2371 | (data->blksz * data->blocks)) |
2372 | mci_fifo_writew(host->fifo_reg, host->part_buf16); |
2373 | } |
2374 | } |
2375 | |
2376 | static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) |
2377 | { |
2378 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
2379 | if (unlikely((unsigned long)buf & 0x1)) { |
2380 | while (cnt >= 2) { |
2381 | /* pull data from fifo into aligned buffer */ |
2382 | u16 aligned_buf[64]; |
2383 | int len = min(cnt & -2, (int)sizeof(aligned_buf)); |
2384 | int items = len >> 1; |
2385 | int i; |
2386 | |
2387 | for (i = 0; i < items; ++i) |
2388 | aligned_buf[i] = mci_fifo_readw(host->fifo_reg); |
2389 | /* memcpy from aligned buffer into output buffer */ |
2390 | memcpy(buf, aligned_buf, len); |
2391 | buf += len; |
2392 | cnt -= len; |
2393 | } |
2394 | } else |
2395 | #endif |
2396 | { |
2397 | u16 *pdata = buf; |
2398 | |
2399 | for (; cnt >= 2; cnt -= 2) |
2400 | *pdata++ = mci_fifo_readw(host->fifo_reg); |
2401 | buf = pdata; |
2402 | } |
2403 | if (cnt) { |
2404 | host->part_buf16 = mci_fifo_readw(host->fifo_reg); |
2405 | dw_mci_pull_final_bytes(host, buf, cnt); |
2406 | } |
2407 | } |
2408 | |
2409 | static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) |
2410 | { |
2411 | struct mmc_data *data = host->data; |
2412 | int init_cnt = cnt; |
2413 | |
2414 | /* try and push anything in the part_buf */ |
2415 | if (unlikely(host->part_buf_count)) { |
2416 | int len = dw_mci_push_part_bytes(host, buf, cnt); |
2417 | |
2418 | buf += len; |
2419 | cnt -= len; |
2420 | if (host->part_buf_count == 4) { |
2421 | mci_fifo_writel(host->fifo_reg, host->part_buf32); |
2422 | host->part_buf_count = 0; |
2423 | } |
2424 | } |
2425 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
2426 | if (unlikely((unsigned long)buf & 0x3)) { |
2427 | while (cnt >= 4) { |
2428 | u32 aligned_buf[32]; |
2429 | int len = min(cnt & -4, (int)sizeof(aligned_buf)); |
2430 | int items = len >> 2; |
2431 | int i; |
2432 | /* memcpy from input buffer into aligned buffer */ |
2433 | memcpy(aligned_buf, buf, len); |
2434 | buf += len; |
2435 | cnt -= len; |
2436 | /* push data from aligned buffer into fifo */ |
2437 | for (i = 0; i < items; ++i) |
2438 | mci_fifo_writel(host->fifo_reg, aligned_buf[i]); |
2439 | } |
2440 | } else |
2441 | #endif |
2442 | { |
2443 | u32 *pdata = buf; |
2444 | |
2445 | for (; cnt >= 4; cnt -= 4) |
2446 | mci_fifo_writel(host->fifo_reg, *pdata++); |
2447 | buf = pdata; |
2448 | } |
2449 | /* put anything remaining in the part_buf */ |
2450 | if (cnt) { |
2451 | dw_mci_set_part_bytes(host, buf, cnt); |
2452 | /* Push data if we have reached the expected data length */ |
2453 | if ((data->bytes_xfered + init_cnt) == |
2454 | (data->blksz * data->blocks)) |
2455 | mci_fifo_writel(host->fifo_reg, host->part_buf32); |
2456 | } |
2457 | } |
2458 | |
2459 | static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) |
2460 | { |
2461 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
2462 | if (unlikely((unsigned long)buf & 0x3)) { |
2463 | while (cnt >= 4) { |
2464 | /* pull data from fifo into aligned buffer */ |
2465 | u32 aligned_buf[32]; |
2466 | int len = min(cnt & -4, (int)sizeof(aligned_buf)); |
2467 | int items = len >> 2; |
2468 | int i; |
2469 | |
2470 | for (i = 0; i < items; ++i) |
2471 | aligned_buf[i] = mci_fifo_readl(host->fifo_reg); |
2472 | /* memcpy from aligned buffer into output buffer */ |
2473 | memcpy(buf, aligned_buf, len); |
2474 | buf += len; |
2475 | cnt -= len; |
2476 | } |
2477 | } else |
2478 | #endif |
2479 | { |
2480 | u32 *pdata = buf; |
2481 | |
2482 | for (; cnt >= 4; cnt -= 4) |
2483 | *pdata++ = mci_fifo_readl(host->fifo_reg); |
2484 | buf = pdata; |
2485 | } |
2486 | if (cnt) { |
2487 | host->part_buf32 = mci_fifo_readl(host->fifo_reg); |
2488 | dw_mci_pull_final_bytes(host, buf, cnt); |
2489 | } |
2490 | } |
2491 | |
2492 | static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) |
2493 | { |
2494 | struct mmc_data *data = host->data; |
2495 | int init_cnt = cnt; |
2496 | |
2497 | /* try and push anything in the part_buf */ |
2498 | if (unlikely(host->part_buf_count)) { |
2499 | int len = dw_mci_push_part_bytes(host, buf, cnt); |
2500 | |
2501 | buf += len; |
2502 | cnt -= len; |
2503 | |
2504 | if (host->part_buf_count == 8) { |
2505 | mci_fifo_writeq(host->fifo_reg, host->part_buf); |
2506 | host->part_buf_count = 0; |
2507 | } |
2508 | } |
2509 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
2510 | if (unlikely((unsigned long)buf & 0x7)) { |
2511 | while (cnt >= 8) { |
2512 | u64 aligned_buf[16]; |
2513 | int len = min(cnt & -8, (int)sizeof(aligned_buf)); |
2514 | int items = len >> 3; |
2515 | int i; |
2516 | /* memcpy from input buffer into aligned buffer */ |
2517 | memcpy(aligned_buf, buf, len); |
2518 | buf += len; |
2519 | cnt -= len; |
2520 | /* push data from aligned buffer into fifo */ |
2521 | for (i = 0; i < items; ++i) |
2522 | mci_fifo_writeq(host->fifo_reg, aligned_buf[i]); |
2523 | } |
2524 | } else |
2525 | #endif |
2526 | { |
2527 | u64 *pdata = buf; |
2528 | |
2529 | for (; cnt >= 8; cnt -= 8) |
2530 | mci_fifo_writeq(host->fifo_reg, *pdata++); |
2531 | buf = pdata; |
2532 | } |
2533 | /* put anything remaining in the part_buf */ |
2534 | if (cnt) { |
2535 | dw_mci_set_part_bytes(host, buf, cnt); |
2536 | /* Push data if we have reached the expected data length */ |
2537 | if ((data->bytes_xfered + init_cnt) == |
2538 | (data->blksz * data->blocks)) |
2539 | mci_fifo_writeq(host->fifo_reg, host->part_buf); |
2540 | } |
2541 | } |
2542 | |
2543 | static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) |
2544 | { |
2545 | #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
2546 | if (unlikely((unsigned long)buf & 0x7)) { |
2547 | while (cnt >= 8) { |
2548 | /* pull data from fifo into aligned buffer */ |
2549 | u64 aligned_buf[16]; |
2550 | int len = min(cnt & -8, (int)sizeof(aligned_buf)); |
2551 | int items = len >> 3; |
2552 | int i; |
2553 | |
2554 | for (i = 0; i < items; ++i) |
2555 | aligned_buf[i] = mci_fifo_readq(host->fifo_reg); |
2556 | |
2557 | /* memcpy from aligned buffer into output buffer */ |
2558 | memcpy(buf, aligned_buf, len); |
2559 | buf += len; |
2560 | cnt -= len; |
2561 | } |
2562 | } else |
2563 | #endif |
2564 | { |
2565 | u64 *pdata = buf; |
2566 | |
2567 | for (; cnt >= 8; cnt -= 8) |
2568 | *pdata++ = mci_fifo_readq(host->fifo_reg); |
2569 | buf = pdata; |
2570 | } |
2571 | if (cnt) { |
2572 | host->part_buf = mci_fifo_readq(host->fifo_reg); |
2573 | dw_mci_pull_final_bytes(host, buf, cnt); |
2574 | } |
2575 | } |
2576 | |
2577 | static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) |
2578 | { |
2579 | int len; |
2580 | |
2581 | /* get remaining partial bytes */ |
2582 | len = dw_mci_pull_part_bytes(host, buf, cnt); |
2583 | if (unlikely(len == cnt)) |
2584 | return; |
2585 | buf += len; |
2586 | cnt -= len; |
2587 | |
2588 | /* get the rest of the data */ |
2589 | host->pull_data(host, buf, cnt); |
2590 | } |
2591 | |
2592 | static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) |
2593 | { |
2594 | struct sg_mapping_iter *sg_miter = &host->sg_miter; |
2595 | void *buf; |
2596 | unsigned int offset; |
2597 | struct mmc_data *data = host->data; |
2598 | int shift = host->data_shift; |
2599 | u32 status; |
2600 | unsigned int len; |
2601 | unsigned int remain, fcnt; |
2602 | |
2603 | do { |
2604 | if (!sg_miter_next(miter: sg_miter)) |
2605 | goto done; |
2606 | |
2607 | host->sg = sg_miter->piter.sg; |
2608 | buf = sg_miter->addr; |
2609 | remain = sg_miter->length; |
2610 | offset = 0; |
2611 | |
2612 | do { |
2613 | fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) |
2614 | << shift) + host->part_buf_count; |
2615 | len = min(remain, fcnt); |
2616 | if (!len) |
2617 | break; |
2618 | dw_mci_pull_data(host, buf: (void *)(buf + offset), cnt: len); |
2619 | data->bytes_xfered += len; |
2620 | offset += len; |
2621 | remain -= len; |
2622 | } while (remain); |
2623 | |
2624 | sg_miter->consumed = offset; |
2625 | status = mci_readl(host, MINTSTS); |
2626 | mci_writel(host, RINTSTS, SDMMC_INT_RXDR); |
2627 | /* if the RXDR is ready read again */ |
2628 | } while ((status & SDMMC_INT_RXDR) || |
2629 | (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); |
2630 | |
2631 | if (!remain) { |
2632 | if (!sg_miter_next(miter: sg_miter)) |
2633 | goto done; |
2634 | sg_miter->consumed = 0; |
2635 | } |
2636 | sg_miter_stop(miter: sg_miter); |
2637 | return; |
2638 | |
2639 | done: |
2640 | sg_miter_stop(miter: sg_miter); |
2641 | host->sg = NULL; |
2642 | smp_wmb(); /* drain writebuffer */ |
2643 | set_bit(nr: EVENT_XFER_COMPLETE, addr: &host->pending_events); |
2644 | } |
2645 | |
2646 | static void dw_mci_write_data_pio(struct dw_mci *host) |
2647 | { |
2648 | struct sg_mapping_iter *sg_miter = &host->sg_miter; |
2649 | void *buf; |
2650 | unsigned int offset; |
2651 | struct mmc_data *data = host->data; |
2652 | int shift = host->data_shift; |
2653 | u32 status; |
2654 | unsigned int len; |
2655 | unsigned int fifo_depth = host->fifo_depth; |
2656 | unsigned int remain, fcnt; |
2657 | |
2658 | do { |
2659 | if (!sg_miter_next(miter: sg_miter)) |
2660 | goto done; |
2661 | |
2662 | host->sg = sg_miter->piter.sg; |
2663 | buf = sg_miter->addr; |
2664 | remain = sg_miter->length; |
2665 | offset = 0; |
2666 | |
2667 | do { |
2668 | fcnt = ((fifo_depth - |
2669 | SDMMC_GET_FCNT(mci_readl(host, STATUS))) |
2670 | << shift) - host->part_buf_count; |
2671 | len = min(remain, fcnt); |
2672 | if (!len) |
2673 | break; |
2674 | host->push_data(host, (void *)(buf + offset), len); |
2675 | data->bytes_xfered += len; |
2676 | offset += len; |
2677 | remain -= len; |
2678 | } while (remain); |
2679 | |
2680 | sg_miter->consumed = offset; |
2681 | status = mci_readl(host, MINTSTS); |
2682 | mci_writel(host, RINTSTS, SDMMC_INT_TXDR); |
2683 | } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ |
2684 | |
2685 | if (!remain) { |
2686 | if (!sg_miter_next(miter: sg_miter)) |
2687 | goto done; |
2688 | sg_miter->consumed = 0; |
2689 | } |
2690 | sg_miter_stop(miter: sg_miter); |
2691 | return; |
2692 | |
2693 | done: |
2694 | sg_miter_stop(miter: sg_miter); |
2695 | host->sg = NULL; |
2696 | smp_wmb(); /* drain writebuffer */ |
2697 | set_bit(nr: EVENT_XFER_COMPLETE, addr: &host->pending_events); |
2698 | } |
2699 | |
2700 | static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) |
2701 | { |
2702 | del_timer(timer: &host->cto_timer); |
2703 | |
2704 | if (!host->cmd_status) |
2705 | host->cmd_status = status; |
2706 | |
2707 | smp_wmb(); /* drain writebuffer */ |
2708 | |
2709 | set_bit(nr: EVENT_CMD_COMPLETE, addr: &host->pending_events); |
2710 | tasklet_schedule(t: &host->tasklet); |
2711 | |
2712 | dw_mci_start_fault_timer(host); |
2713 | } |
2714 | |
2715 | static void dw_mci_handle_cd(struct dw_mci *host) |
2716 | { |
2717 | struct dw_mci_slot *slot = host->slot; |
2718 | |
2719 | mmc_detect_change(slot->mmc, |
2720 | delay: msecs_to_jiffies(m: host->pdata->detect_delay_ms)); |
2721 | } |
2722 | |
2723 | static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) |
2724 | { |
2725 | struct dw_mci *host = dev_id; |
2726 | u32 pending; |
2727 | struct dw_mci_slot *slot = host->slot; |
2728 | |
2729 | pending = mci_readl(host, MINTSTS); /* read-only mask reg */ |
2730 | |
2731 | if (pending) { |
2732 | /* Check volt switch first, since it can look like an error */ |
2733 | if ((host->state == STATE_SENDING_CMD11) && |
2734 | (pending & SDMMC_INT_VOLT_SWITCH)) { |
2735 | mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH); |
2736 | pending &= ~SDMMC_INT_VOLT_SWITCH; |
2737 | |
2738 | /* |
2739 | * Hold the lock; we know cmd11_timer can't be kicked |
2740 | * off after the lock is released, so safe to delete. |
2741 | */ |
2742 | spin_lock(lock: &host->irq_lock); |
2743 | dw_mci_cmd_interrupt(host, status: pending); |
2744 | spin_unlock(lock: &host->irq_lock); |
2745 | |
2746 | del_timer(timer: &host->cmd11_timer); |
2747 | } |
2748 | |
2749 | if (pending & DW_MCI_CMD_ERROR_FLAGS) { |
2750 | spin_lock(lock: &host->irq_lock); |
2751 | |
2752 | del_timer(timer: &host->cto_timer); |
2753 | mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); |
2754 | host->cmd_status = pending; |
2755 | smp_wmb(); /* drain writebuffer */ |
2756 | set_bit(nr: EVENT_CMD_COMPLETE, addr: &host->pending_events); |
2757 | |
2758 | spin_unlock(lock: &host->irq_lock); |
2759 | } |
2760 | |
2761 | if (pending & DW_MCI_DATA_ERROR_FLAGS) { |
2762 | spin_lock(lock: &host->irq_lock); |
2763 | |
2764 | if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT) |
2765 | del_timer(timer: &host->dto_timer); |
2766 | |
2767 | /* if there is an error report DATA_ERROR */ |
2768 | mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); |
2769 | host->data_status = pending; |
2770 | smp_wmb(); /* drain writebuffer */ |
2771 | set_bit(nr: EVENT_DATA_ERROR, addr: &host->pending_events); |
2772 | |
2773 | if (host->quirks & DW_MMC_QUIRK_EXTENDED_TMOUT) |
2774 | /* In case of error, we cannot expect a DTO */ |
2775 | set_bit(nr: EVENT_DATA_COMPLETE, |
2776 | addr: &host->pending_events); |
2777 | |
2778 | tasklet_schedule(t: &host->tasklet); |
2779 | |
2780 | spin_unlock(lock: &host->irq_lock); |
2781 | } |
2782 | |
2783 | if (pending & SDMMC_INT_DATA_OVER) { |
2784 | spin_lock(lock: &host->irq_lock); |
2785 | |
2786 | del_timer(timer: &host->dto_timer); |
2787 | |
2788 | mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); |
2789 | if (!host->data_status) |
2790 | host->data_status = pending; |
2791 | smp_wmb(); /* drain writebuffer */ |
2792 | if (host->dir_status == DW_MCI_RECV_STATUS) { |
2793 | if (host->sg != NULL) |
2794 | dw_mci_read_data_pio(host, dto: true); |
2795 | } |
2796 | set_bit(nr: EVENT_DATA_COMPLETE, addr: &host->pending_events); |
2797 | tasklet_schedule(t: &host->tasklet); |
2798 | |
2799 | spin_unlock(lock: &host->irq_lock); |
2800 | } |
2801 | |
2802 | if (pending & SDMMC_INT_RXDR) { |
2803 | mci_writel(host, RINTSTS, SDMMC_INT_RXDR); |
2804 | if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) |
2805 | dw_mci_read_data_pio(host, dto: false); |
2806 | } |
2807 | |
2808 | if (pending & SDMMC_INT_TXDR) { |
2809 | mci_writel(host, RINTSTS, SDMMC_INT_TXDR); |
2810 | if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) |
2811 | dw_mci_write_data_pio(host); |
2812 | } |
2813 | |
2814 | if (pending & SDMMC_INT_CMD_DONE) { |
2815 | spin_lock(lock: &host->irq_lock); |
2816 | |
2817 | mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); |
2818 | dw_mci_cmd_interrupt(host, status: pending); |
2819 | |
2820 | spin_unlock(lock: &host->irq_lock); |
2821 | } |
2822 | |
2823 | if (pending & SDMMC_INT_CD) { |
2824 | mci_writel(host, RINTSTS, SDMMC_INT_CD); |
2825 | dw_mci_handle_cd(host); |
2826 | } |
2827 | |
2828 | if (pending & SDMMC_INT_SDIO(slot->sdio_id)) { |
2829 | mci_writel(host, RINTSTS, |
2830 | SDMMC_INT_SDIO(slot->sdio_id)); |
2831 | __dw_mci_enable_sdio_irq(slot, enb: 0); |
2832 | sdio_signal_irq(host: slot->mmc); |
2833 | } |
2834 | |
2835 | } |
2836 | |
2837 | if (host->use_dma != TRANS_MODE_IDMAC) |
2838 | return IRQ_HANDLED; |
2839 | |
2840 | /* Handle IDMA interrupts */ |
2841 | if (host->dma_64bit_address == 1) { |
2842 | pending = mci_readl(host, IDSTS64); |
2843 | if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { |
2844 | mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | |
2845 | SDMMC_IDMAC_INT_RI); |
2846 | mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI); |
2847 | if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) |
2848 | host->dma_ops->complete((void *)host); |
2849 | } |
2850 | } else { |
2851 | pending = mci_readl(host, IDSTS); |
2852 | if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { |
2853 | mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | |
2854 | SDMMC_IDMAC_INT_RI); |
2855 | mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); |
2856 | if (!test_bit(EVENT_DATA_ERROR, &host->pending_events)) |
2857 | host->dma_ops->complete((void *)host); |
2858 | } |
2859 | } |
2860 | |
2861 | return IRQ_HANDLED; |
2862 | } |
2863 | |
2864 | static int dw_mci_init_slot_caps(struct dw_mci_slot *slot) |
2865 | { |
2866 | struct dw_mci *host = slot->host; |
2867 | const struct dw_mci_drv_data *drv_data = host->drv_data; |
2868 | struct mmc_host *mmc = slot->mmc; |
2869 | int ctrl_id; |
2870 | |
2871 | if (host->pdata->caps) |
2872 | mmc->caps = host->pdata->caps; |
2873 | |
2874 | if (host->pdata->pm_caps) |
2875 | mmc->pm_caps = host->pdata->pm_caps; |
2876 | |
2877 | if (drv_data) |
2878 | mmc->caps |= drv_data->common_caps; |
2879 | |
2880 | if (host->dev->of_node) { |
2881 | ctrl_id = of_alias_get_id(np: host->dev->of_node, stem: "mshc" ); |
2882 | if (ctrl_id < 0) |
2883 | ctrl_id = 0; |
2884 | } else { |
2885 | ctrl_id = to_platform_device(host->dev)->id; |
2886 | } |
2887 | |
2888 | if (drv_data && drv_data->caps) { |
2889 | if (ctrl_id >= drv_data->num_caps) { |
2890 | dev_err(host->dev, "invalid controller id %d\n" , |
2891 | ctrl_id); |
2892 | return -EINVAL; |
2893 | } |
2894 | mmc->caps |= drv_data->caps[ctrl_id]; |
2895 | } |
2896 | |
2897 | if (host->pdata->caps2) |
2898 | mmc->caps2 = host->pdata->caps2; |
2899 | |
2900 | /* if host has set a minimum_freq, we should respect it */ |
2901 | if (host->minimum_speed) |
2902 | mmc->f_min = host->minimum_speed; |
2903 | else |
2904 | mmc->f_min = DW_MCI_FREQ_MIN; |
2905 | |
2906 | if (!mmc->f_max) |
2907 | mmc->f_max = DW_MCI_FREQ_MAX; |
2908 | |
2909 | /* Process SDIO IRQs through the sdio_irq_work. */ |
2910 | if (mmc->caps & MMC_CAP_SDIO_IRQ) |
2911 | mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; |
2912 | |
2913 | return 0; |
2914 | } |
2915 | |
2916 | static int dw_mci_init_slot(struct dw_mci *host) |
2917 | { |
2918 | struct mmc_host *mmc; |
2919 | struct dw_mci_slot *slot; |
2920 | int ret; |
2921 | |
2922 | mmc = mmc_alloc_host(extra: sizeof(struct dw_mci_slot), host->dev); |
2923 | if (!mmc) |
2924 | return -ENOMEM; |
2925 | |
2926 | slot = mmc_priv(host: mmc); |
2927 | slot->id = 0; |
2928 | slot->sdio_id = host->sdio_id0 + slot->id; |
2929 | slot->mmc = mmc; |
2930 | slot->host = host; |
2931 | host->slot = slot; |
2932 | |
2933 | mmc->ops = &dw_mci_ops; |
2934 | |
2935 | /*if there are external regulators, get them*/ |
2936 | ret = mmc_regulator_get_supply(mmc); |
2937 | if (ret) |
2938 | goto err_host_allocated; |
2939 | |
2940 | if (!mmc->ocr_avail) |
2941 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; |
2942 | |
2943 | ret = mmc_of_parse(host: mmc); |
2944 | if (ret) |
2945 | goto err_host_allocated; |
2946 | |
2947 | ret = dw_mci_init_slot_caps(slot); |
2948 | if (ret) |
2949 | goto err_host_allocated; |
2950 | |
2951 | /* Useful defaults if platform data is unset. */ |
2952 | if (host->use_dma == TRANS_MODE_IDMAC) { |
2953 | mmc->max_segs = host->ring_size; |
2954 | mmc->max_blk_size = 65535; |
2955 | mmc->max_seg_size = 0x1000; |
2956 | mmc->max_req_size = mmc->max_seg_size * host->ring_size; |
2957 | mmc->max_blk_count = mmc->max_req_size / 512; |
2958 | } else if (host->use_dma == TRANS_MODE_EDMAC) { |
2959 | mmc->max_segs = 64; |
2960 | mmc->max_blk_size = 65535; |
2961 | mmc->max_blk_count = 65535; |
2962 | mmc->max_req_size = |
2963 | mmc->max_blk_size * mmc->max_blk_count; |
2964 | mmc->max_seg_size = mmc->max_req_size; |
2965 | } else { |
2966 | /* TRANS_MODE_PIO */ |
2967 | mmc->max_segs = 64; |
2968 | mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */ |
2969 | mmc->max_blk_count = 512; |
2970 | mmc->max_req_size = mmc->max_blk_size * |
2971 | mmc->max_blk_count; |
2972 | mmc->max_seg_size = mmc->max_req_size; |
2973 | } |
2974 | |
2975 | dw_mci_get_cd(mmc); |
2976 | |
2977 | ret = mmc_add_host(mmc); |
2978 | if (ret) |
2979 | goto err_host_allocated; |
2980 | |
2981 | #if defined(CONFIG_DEBUG_FS) |
2982 | dw_mci_init_debugfs(slot); |
2983 | #endif |
2984 | |
2985 | return 0; |
2986 | |
2987 | err_host_allocated: |
2988 | mmc_free_host(mmc); |
2989 | return ret; |
2990 | } |
2991 | |
2992 | static void dw_mci_cleanup_slot(struct dw_mci_slot *slot) |
2993 | { |
2994 | /* Debugfs stuff is cleaned up by mmc core */ |
2995 | mmc_remove_host(slot->mmc); |
2996 | slot->host->slot = NULL; |
2997 | mmc_free_host(slot->mmc); |
2998 | } |
2999 | |
3000 | static void dw_mci_init_dma(struct dw_mci *host) |
3001 | { |
3002 | int addr_config; |
3003 | struct device *dev = host->dev; |
3004 | |
3005 | /* |
3006 | * Check tansfer mode from HCON[17:16] |
3007 | * Clear the ambiguous description of dw_mmc databook: |
3008 | * 2b'00: No DMA Interface -> Actually means using Internal DMA block |
3009 | * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block |
3010 | * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block |
3011 | * 2b'11: Non DW DMA Interface -> pio only |
3012 | * Compared to DesignWare DMA Interface, Generic DMA Interface has a |
3013 | * simpler request/acknowledge handshake mechanism and both of them |
3014 | * are regarded as external dma master for dw_mmc. |
3015 | */ |
3016 | host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON)); |
3017 | if (host->use_dma == DMA_INTERFACE_IDMA) { |
3018 | host->use_dma = TRANS_MODE_IDMAC; |
3019 | } else if (host->use_dma == DMA_INTERFACE_DWDMA || |
3020 | host->use_dma == DMA_INTERFACE_GDMA) { |
3021 | host->use_dma = TRANS_MODE_EDMAC; |
3022 | } else { |
3023 | goto no_dma; |
3024 | } |
3025 | |
3026 | /* Determine which DMA interface to use */ |
3027 | if (host->use_dma == TRANS_MODE_IDMAC) { |
3028 | /* |
3029 | * Check ADDR_CONFIG bit in HCON to find |
3030 | * IDMAC address bus width |
3031 | */ |
3032 | addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON)); |
3033 | |
3034 | if (addr_config == 1) { |
3035 | /* host supports IDMAC in 64-bit address mode */ |
3036 | host->dma_64bit_address = 1; |
3037 | dev_info(host->dev, |
3038 | "IDMAC supports 64-bit address mode.\n" ); |
3039 | if (!dma_set_mask(dev: host->dev, DMA_BIT_MASK(64))) |
3040 | dma_set_coherent_mask(dev: host->dev, |
3041 | DMA_BIT_MASK(64)); |
3042 | } else { |
3043 | /* host supports IDMAC in 32-bit address mode */ |
3044 | host->dma_64bit_address = 0; |
3045 | dev_info(host->dev, |
3046 | "IDMAC supports 32-bit address mode.\n" ); |
3047 | } |
3048 | |
3049 | /* Alloc memory for sg translation */ |
3050 | host->sg_cpu = dmam_alloc_coherent(dev: host->dev, |
3051 | DESC_RING_BUF_SZ, |
3052 | dma_handle: &host->sg_dma, GFP_KERNEL); |
3053 | if (!host->sg_cpu) { |
3054 | dev_err(host->dev, |
3055 | "%s: could not alloc DMA memory\n" , |
3056 | __func__); |
3057 | goto no_dma; |
3058 | } |
3059 | |
3060 | host->dma_ops = &dw_mci_idmac_ops; |
3061 | dev_info(host->dev, "Using internal DMA controller.\n" ); |
3062 | } else { |
3063 | /* TRANS_MODE_EDMAC: check dma bindings again */ |
3064 | if ((device_property_string_array_count(dev, propname: "dma-names" ) < 0) || |
3065 | !device_property_present(dev, propname: "dmas" )) { |
3066 | goto no_dma; |
3067 | } |
3068 | host->dma_ops = &dw_mci_edmac_ops; |
3069 | dev_info(host->dev, "Using external DMA controller.\n" ); |
3070 | } |
3071 | |
3072 | if (host->dma_ops->init && host->dma_ops->start && |
3073 | host->dma_ops->stop && host->dma_ops->cleanup) { |
3074 | if (host->dma_ops->init(host)) { |
3075 | dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n" , |
3076 | __func__); |
3077 | goto no_dma; |
3078 | } |
3079 | } else { |
3080 | dev_err(host->dev, "DMA initialization not found.\n" ); |
3081 | goto no_dma; |
3082 | } |
3083 | |
3084 | return; |
3085 | |
3086 | no_dma: |
3087 | dev_info(host->dev, "Using PIO mode.\n" ); |
3088 | host->use_dma = TRANS_MODE_PIO; |
3089 | } |
3090 | |
3091 | static void dw_mci_cmd11_timer(struct timer_list *t) |
3092 | { |
3093 | struct dw_mci *host = from_timer(host, t, cmd11_timer); |
3094 | |
3095 | if (host->state != STATE_SENDING_CMD11) { |
3096 | dev_warn(host->dev, "Unexpected CMD11 timeout\n" ); |
3097 | return; |
3098 | } |
3099 | |
3100 | host->cmd_status = SDMMC_INT_RTO; |
3101 | set_bit(nr: EVENT_CMD_COMPLETE, addr: &host->pending_events); |
3102 | tasklet_schedule(t: &host->tasklet); |
3103 | } |
3104 | |
3105 | static void dw_mci_cto_timer(struct timer_list *t) |
3106 | { |
3107 | struct dw_mci *host = from_timer(host, t, cto_timer); |
3108 | unsigned long irqflags; |
3109 | u32 pending; |
3110 | |
3111 | spin_lock_irqsave(&host->irq_lock, irqflags); |
3112 | |
3113 | /* |
3114 | * If somehow we have very bad interrupt latency it's remotely possible |
3115 | * that the timer could fire while the interrupt is still pending or |
3116 | * while the interrupt is midway through running. Let's be paranoid |
3117 | * and detect those two cases. Note that this is paranoia is somewhat |
3118 | * justified because in this function we don't actually cancel the |
3119 | * pending command in the controller--we just assume it will never come. |
3120 | */ |
3121 | pending = mci_readl(host, MINTSTS); /* read-only mask reg */ |
3122 | if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) { |
3123 | /* The interrupt should fire; no need to act but we can warn */ |
3124 | dev_warn(host->dev, "Unexpected interrupt latency\n" ); |
3125 | goto exit; |
3126 | } |
3127 | if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) { |
3128 | /* Presumably interrupt handler couldn't delete the timer */ |
3129 | dev_warn(host->dev, "CTO timeout when already completed\n" ); |
3130 | goto exit; |
3131 | } |
3132 | |
3133 | /* |
3134 | * Continued paranoia to make sure we're in the state we expect. |
3135 | * This paranoia isn't really justified but it seems good to be safe. |
3136 | */ |
3137 | switch (host->state) { |
3138 | case STATE_SENDING_CMD11: |
3139 | case STATE_SENDING_CMD: |
3140 | case STATE_SENDING_STOP: |
3141 | /* |
3142 | * If CMD_DONE interrupt does NOT come in sending command |
3143 | * state, we should notify the driver to terminate current |
3144 | * transfer and report a command timeout to the core. |
3145 | */ |
3146 | host->cmd_status = SDMMC_INT_RTO; |
3147 | set_bit(nr: EVENT_CMD_COMPLETE, addr: &host->pending_events); |
3148 | tasklet_schedule(t: &host->tasklet); |
3149 | break; |
3150 | default: |
3151 | dev_warn(host->dev, "Unexpected command timeout, state %d\n" , |
3152 | host->state); |
3153 | break; |
3154 | } |
3155 | |
3156 | exit: |
3157 | spin_unlock_irqrestore(lock: &host->irq_lock, flags: irqflags); |
3158 | } |
3159 | |
3160 | static void dw_mci_dto_timer(struct timer_list *t) |
3161 | { |
3162 | struct dw_mci *host = from_timer(host, t, dto_timer); |
3163 | unsigned long irqflags; |
3164 | u32 pending; |
3165 | |
3166 | spin_lock_irqsave(&host->irq_lock, irqflags); |
3167 | |
3168 | /* |
3169 | * The DTO timer is much longer than the CTO timer, so it's even less |
3170 | * likely that we'll these cases, but it pays to be paranoid. |
3171 | */ |
3172 | pending = mci_readl(host, MINTSTS); /* read-only mask reg */ |
3173 | if (pending & SDMMC_INT_DATA_OVER) { |
3174 | /* The interrupt should fire; no need to act but we can warn */ |
3175 | dev_warn(host->dev, "Unexpected data interrupt latency\n" ); |
3176 | goto exit; |
3177 | } |
3178 | if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) { |
3179 | /* Presumably interrupt handler couldn't delete the timer */ |
3180 | dev_warn(host->dev, "DTO timeout when already completed\n" ); |
3181 | goto exit; |
3182 | } |
3183 | |
3184 | /* |
3185 | * Continued paranoia to make sure we're in the state we expect. |
3186 | * This paranoia isn't really justified but it seems good to be safe. |
3187 | */ |
3188 | switch (host->state) { |
3189 | case STATE_SENDING_DATA: |
3190 | case STATE_DATA_BUSY: |
3191 | /* |
3192 | * If DTO interrupt does NOT come in sending data state, |
3193 | * we should notify the driver to terminate current transfer |
3194 | * and report a data timeout to the core. |
3195 | */ |
3196 | host->data_status = SDMMC_INT_DRTO; |
3197 | set_bit(nr: EVENT_DATA_ERROR, addr: &host->pending_events); |
3198 | set_bit(nr: EVENT_DATA_COMPLETE, addr: &host->pending_events); |
3199 | tasklet_schedule(t: &host->tasklet); |
3200 | break; |
3201 | default: |
3202 | dev_warn(host->dev, "Unexpected data timeout, state %d\n" , |
3203 | host->state); |
3204 | break; |
3205 | } |
3206 | |
3207 | exit: |
3208 | spin_unlock_irqrestore(lock: &host->irq_lock, flags: irqflags); |
3209 | } |
3210 | |
3211 | #ifdef CONFIG_OF |
3212 | static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) |
3213 | { |
3214 | struct dw_mci_board *pdata; |
3215 | struct device *dev = host->dev; |
3216 | const struct dw_mci_drv_data *drv_data = host->drv_data; |
3217 | int ret; |
3218 | u32 clock_frequency; |
3219 | |
3220 | pdata = devm_kzalloc(dev, size: sizeof(*pdata), GFP_KERNEL); |
3221 | if (!pdata) |
3222 | return ERR_PTR(error: -ENOMEM); |
3223 | |
3224 | /* find reset controller when exist */ |
3225 | pdata->rstc = devm_reset_control_get_optional_exclusive(dev, id: "reset" ); |
3226 | if (IS_ERR(ptr: pdata->rstc)) |
3227 | return ERR_CAST(ptr: pdata->rstc); |
3228 | |
3229 | if (device_property_read_u32(dev, propname: "fifo-depth" , val: &pdata->fifo_depth)) |
3230 | dev_info(dev, |
3231 | "fifo-depth property not found, using value of FIFOTH register as default\n" ); |
3232 | |
3233 | device_property_read_u32(dev, propname: "card-detect-delay" , |
3234 | val: &pdata->detect_delay_ms); |
3235 | |
3236 | device_property_read_u32(dev, propname: "data-addr" , val: &host->data_addr_override); |
3237 | |
3238 | if (device_property_present(dev, propname: "fifo-watermark-aligned" )) |
3239 | host->wm_aligned = true; |
3240 | |
3241 | if (!device_property_read_u32(dev, propname: "clock-frequency" , val: &clock_frequency)) |
3242 | pdata->bus_hz = clock_frequency; |
3243 | |
3244 | if (drv_data && drv_data->parse_dt) { |
3245 | ret = drv_data->parse_dt(host); |
3246 | if (ret) |
3247 | return ERR_PTR(error: ret); |
3248 | } |
3249 | |
3250 | return pdata; |
3251 | } |
3252 | |
3253 | #else /* CONFIG_OF */ |
3254 | static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) |
3255 | { |
3256 | return ERR_PTR(-EINVAL); |
3257 | } |
3258 | #endif /* CONFIG_OF */ |
3259 | |
3260 | static void dw_mci_enable_cd(struct dw_mci *host) |
3261 | { |
3262 | unsigned long irqflags; |
3263 | u32 temp; |
3264 | |
3265 | /* |
3266 | * No need for CD if all slots have a non-error GPIO |
3267 | * as well as broken card detection is found. |
3268 | */ |
3269 | if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL) |
3270 | return; |
3271 | |
3272 | if (mmc_gpio_get_cd(host: host->slot->mmc) < 0) { |
3273 | spin_lock_irqsave(&host->irq_lock, irqflags); |
3274 | temp = mci_readl(host, INTMASK); |
3275 | temp |= SDMMC_INT_CD; |
3276 | mci_writel(host, INTMASK, temp); |
3277 | spin_unlock_irqrestore(lock: &host->irq_lock, flags: irqflags); |
3278 | } |
3279 | } |
3280 | |
3281 | int dw_mci_probe(struct dw_mci *host) |
3282 | { |
3283 | const struct dw_mci_drv_data *drv_data = host->drv_data; |
3284 | int width, i, ret = 0; |
3285 | u32 fifo_size; |
3286 | |
3287 | if (!host->pdata) { |
3288 | host->pdata = dw_mci_parse_dt(host); |
3289 | if (IS_ERR(ptr: host->pdata)) |
3290 | return dev_err_probe(dev: host->dev, err: PTR_ERR(ptr: host->pdata), |
3291 | fmt: "platform data not available\n" ); |
3292 | } |
3293 | |
3294 | host->biu_clk = devm_clk_get(dev: host->dev, id: "biu" ); |
3295 | if (IS_ERR(ptr: host->biu_clk)) { |
3296 | dev_dbg(host->dev, "biu clock not available\n" ); |
3297 | } else { |
3298 | ret = clk_prepare_enable(clk: host->biu_clk); |
3299 | if (ret) { |
3300 | dev_err(host->dev, "failed to enable biu clock\n" ); |
3301 | return ret; |
3302 | } |
3303 | } |
3304 | |
3305 | host->ciu_clk = devm_clk_get(dev: host->dev, id: "ciu" ); |
3306 | if (IS_ERR(ptr: host->ciu_clk)) { |
3307 | dev_dbg(host->dev, "ciu clock not available\n" ); |
3308 | host->bus_hz = host->pdata->bus_hz; |
3309 | } else { |
3310 | ret = clk_prepare_enable(clk: host->ciu_clk); |
3311 | if (ret) { |
3312 | dev_err(host->dev, "failed to enable ciu clock\n" ); |
3313 | goto err_clk_biu; |
3314 | } |
3315 | |
3316 | if (host->pdata->bus_hz) { |
3317 | ret = clk_set_rate(clk: host->ciu_clk, rate: host->pdata->bus_hz); |
3318 | if (ret) |
3319 | dev_warn(host->dev, |
3320 | "Unable to set bus rate to %uHz\n" , |
3321 | host->pdata->bus_hz); |
3322 | } |
3323 | host->bus_hz = clk_get_rate(clk: host->ciu_clk); |
3324 | } |
3325 | |
3326 | if (!host->bus_hz) { |
3327 | dev_err(host->dev, |
3328 | "Platform data must supply bus speed\n" ); |
3329 | ret = -ENODEV; |
3330 | goto err_clk_ciu; |
3331 | } |
3332 | |
3333 | if (host->pdata->rstc) { |
3334 | reset_control_assert(rstc: host->pdata->rstc); |
3335 | usleep_range(min: 10, max: 50); |
3336 | reset_control_deassert(rstc: host->pdata->rstc); |
3337 | } |
3338 | |
3339 | if (drv_data && drv_data->init) { |
3340 | ret = drv_data->init(host); |
3341 | if (ret) { |
3342 | dev_err(host->dev, |
3343 | "implementation specific init failed\n" ); |
3344 | goto err_clk_ciu; |
3345 | } |
3346 | } |
3347 | |
3348 | timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0); |
3349 | timer_setup(&host->cto_timer, dw_mci_cto_timer, 0); |
3350 | timer_setup(&host->dto_timer, dw_mci_dto_timer, 0); |
3351 | |
3352 | spin_lock_init(&host->lock); |
3353 | spin_lock_init(&host->irq_lock); |
3354 | INIT_LIST_HEAD(list: &host->queue); |
3355 | |
3356 | dw_mci_init_fault(host); |
3357 | |
3358 | /* |
3359 | * Get the host data width - this assumes that HCON has been set with |
3360 | * the correct values. |
3361 | */ |
3362 | i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON)); |
3363 | if (!i) { |
3364 | host->push_data = dw_mci_push_data16; |
3365 | host->pull_data = dw_mci_pull_data16; |
3366 | width = 16; |
3367 | host->data_shift = 1; |
3368 | } else if (i == 2) { |
3369 | host->push_data = dw_mci_push_data64; |
3370 | host->pull_data = dw_mci_pull_data64; |
3371 | width = 64; |
3372 | host->data_shift = 3; |
3373 | } else { |
3374 | /* Check for a reserved value, and warn if it is */ |
3375 | WARN((i != 1), |
3376 | "HCON reports a reserved host data width!\n" |
3377 | "Defaulting to 32-bit access.\n" ); |
3378 | host->push_data = dw_mci_push_data32; |
3379 | host->pull_data = dw_mci_pull_data32; |
3380 | width = 32; |
3381 | host->data_shift = 2; |
3382 | } |
3383 | |
3384 | /* Reset all blocks */ |
3385 | if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { |
3386 | ret = -ENODEV; |
3387 | goto err_clk_ciu; |
3388 | } |
3389 | |
3390 | host->dma_ops = host->pdata->dma_ops; |
3391 | dw_mci_init_dma(host); |
3392 | |
3393 | /* Clear the interrupts for the host controller */ |
3394 | mci_writel(host, RINTSTS, 0xFFFFFFFF); |
3395 | mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ |
3396 | |
3397 | /* Put in max timeout */ |
3398 | mci_writel(host, TMOUT, 0xFFFFFFFF); |
3399 | |
3400 | /* |
3401 | * FIFO threshold settings RxMark = fifo_size / 2 - 1, |
3402 | * Tx Mark = fifo_size / 2 DMA Size = 8 |
3403 | */ |
3404 | if (!host->pdata->fifo_depth) { |
3405 | /* |
3406 | * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may |
3407 | * have been overwritten by the bootloader, just like we're |
3408 | * about to do, so if you know the value for your hardware, you |
3409 | * should put it in the platform data. |
3410 | */ |
3411 | fifo_size = mci_readl(host, FIFOTH); |
3412 | fifo_size = 1 + ((fifo_size >> 16) & 0xfff); |
3413 | } else { |
3414 | fifo_size = host->pdata->fifo_depth; |
3415 | } |
3416 | host->fifo_depth = fifo_size; |
3417 | host->fifoth_val = |
3418 | SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); |
3419 | mci_writel(host, FIFOTH, host->fifoth_val); |
3420 | |
3421 | /* disable clock to CIU */ |
3422 | mci_writel(host, CLKENA, 0); |
3423 | mci_writel(host, CLKSRC, 0); |
3424 | |
3425 | /* |
3426 | * In 2.40a spec, Data offset is changed. |
3427 | * Need to check the version-id and set data-offset for DATA register. |
3428 | */ |
3429 | host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); |
3430 | dev_info(host->dev, "Version ID is %04x\n" , host->verid); |
3431 | |
3432 | if (host->data_addr_override) |
3433 | host->fifo_reg = host->regs + host->data_addr_override; |
3434 | else if (host->verid < DW_MMC_240A) |
3435 | host->fifo_reg = host->regs + DATA_OFFSET; |
3436 | else |
3437 | host->fifo_reg = host->regs + DATA_240A_OFFSET; |
3438 | |
3439 | tasklet_setup(t: &host->tasklet, callback: dw_mci_tasklet_func); |
3440 | ret = devm_request_irq(dev: host->dev, irq: host->irq, handler: dw_mci_interrupt, |
3441 | irqflags: host->irq_flags, devname: "dw-mci" , dev_id: host); |
3442 | if (ret) |
3443 | goto err_dmaunmap; |
3444 | |
3445 | /* |
3446 | * Enable interrupts for command done, data over, data empty, |
3447 | * receive ready and error such as transmit, receive timeout, crc error |
3448 | */ |
3449 | mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | |
3450 | SDMMC_INT_TXDR | SDMMC_INT_RXDR | |
3451 | DW_MCI_ERROR_FLAGS); |
3452 | /* Enable mci interrupt */ |
3453 | mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); |
3454 | |
3455 | dev_info(host->dev, |
3456 | "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n" , |
3457 | host->irq, width, fifo_size); |
3458 | |
3459 | /* We need at least one slot to succeed */ |
3460 | ret = dw_mci_init_slot(host); |
3461 | if (ret) { |
3462 | dev_dbg(host->dev, "slot %d init failed\n" , i); |
3463 | goto err_dmaunmap; |
3464 | } |
3465 | |
3466 | /* Now that slots are all setup, we can enable card detect */ |
3467 | dw_mci_enable_cd(host); |
3468 | |
3469 | return 0; |
3470 | |
3471 | err_dmaunmap: |
3472 | if (host->use_dma && host->dma_ops->exit) |
3473 | host->dma_ops->exit(host); |
3474 | |
3475 | reset_control_assert(rstc: host->pdata->rstc); |
3476 | |
3477 | err_clk_ciu: |
3478 | clk_disable_unprepare(clk: host->ciu_clk); |
3479 | |
3480 | err_clk_biu: |
3481 | clk_disable_unprepare(clk: host->biu_clk); |
3482 | |
3483 | return ret; |
3484 | } |
3485 | EXPORT_SYMBOL(dw_mci_probe); |
3486 | |
3487 | void dw_mci_remove(struct dw_mci *host) |
3488 | { |
3489 | dev_dbg(host->dev, "remove slot\n" ); |
3490 | if (host->slot) |
3491 | dw_mci_cleanup_slot(slot: host->slot); |
3492 | |
3493 | mci_writel(host, RINTSTS, 0xFFFFFFFF); |
3494 | mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ |
3495 | |
3496 | /* disable clock to CIU */ |
3497 | mci_writel(host, CLKENA, 0); |
3498 | mci_writel(host, CLKSRC, 0); |
3499 | |
3500 | if (host->use_dma && host->dma_ops->exit) |
3501 | host->dma_ops->exit(host); |
3502 | |
3503 | reset_control_assert(rstc: host->pdata->rstc); |
3504 | |
3505 | clk_disable_unprepare(clk: host->ciu_clk); |
3506 | clk_disable_unprepare(clk: host->biu_clk); |
3507 | } |
3508 | EXPORT_SYMBOL(dw_mci_remove); |
3509 | |
3510 | |
3511 | |
3512 | #ifdef CONFIG_PM |
3513 | int dw_mci_runtime_suspend(struct device *dev) |
3514 | { |
3515 | struct dw_mci *host = dev_get_drvdata(dev); |
3516 | |
3517 | if (host->use_dma && host->dma_ops->exit) |
3518 | host->dma_ops->exit(host); |
3519 | |
3520 | clk_disable_unprepare(clk: host->ciu_clk); |
3521 | |
3522 | if (host->slot && |
3523 | (mmc_can_gpio_cd(host: host->slot->mmc) || |
3524 | !mmc_card_is_removable(host: host->slot->mmc))) |
3525 | clk_disable_unprepare(clk: host->biu_clk); |
3526 | |
3527 | return 0; |
3528 | } |
3529 | EXPORT_SYMBOL(dw_mci_runtime_suspend); |
3530 | |
3531 | int dw_mci_runtime_resume(struct device *dev) |
3532 | { |
3533 | int ret = 0; |
3534 | struct dw_mci *host = dev_get_drvdata(dev); |
3535 | |
3536 | if (host->slot && |
3537 | (mmc_can_gpio_cd(host: host->slot->mmc) || |
3538 | !mmc_card_is_removable(host: host->slot->mmc))) { |
3539 | ret = clk_prepare_enable(clk: host->biu_clk); |
3540 | if (ret) |
3541 | return ret; |
3542 | } |
3543 | |
3544 | ret = clk_prepare_enable(clk: host->ciu_clk); |
3545 | if (ret) |
3546 | goto err; |
3547 | |
3548 | if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) { |
3549 | clk_disable_unprepare(clk: host->ciu_clk); |
3550 | ret = -ENODEV; |
3551 | goto err; |
3552 | } |
3553 | |
3554 | if (host->use_dma && host->dma_ops->init) |
3555 | host->dma_ops->init(host); |
3556 | |
3557 | /* |
3558 | * Restore the initial value at FIFOTH register |
3559 | * And Invalidate the prev_blksz with zero |
3560 | */ |
3561 | mci_writel(host, FIFOTH, host->fifoth_val); |
3562 | host->prev_blksz = 0; |
3563 | |
3564 | /* Put in max timeout */ |
3565 | mci_writel(host, TMOUT, 0xFFFFFFFF); |
3566 | |
3567 | mci_writel(host, RINTSTS, 0xFFFFFFFF); |
3568 | mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | |
3569 | SDMMC_INT_TXDR | SDMMC_INT_RXDR | |
3570 | DW_MCI_ERROR_FLAGS); |
3571 | mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); |
3572 | |
3573 | |
3574 | if (host->slot && host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER) |
3575 | dw_mci_set_ios(mmc: host->slot->mmc, ios: &host->slot->mmc->ios); |
3576 | |
3577 | /* Force setup bus to guarantee available clock output */ |
3578 | dw_mci_setup_bus(slot: host->slot, force_clkinit: true); |
3579 | |
3580 | /* Re-enable SDIO interrupts. */ |
3581 | if (sdio_irq_claimed(host: host->slot->mmc)) |
3582 | __dw_mci_enable_sdio_irq(slot: host->slot, enb: 1); |
3583 | |
3584 | /* Now that slots are all setup, we can enable card detect */ |
3585 | dw_mci_enable_cd(host); |
3586 | |
3587 | return 0; |
3588 | |
3589 | err: |
3590 | if (host->slot && |
3591 | (mmc_can_gpio_cd(host: host->slot->mmc) || |
3592 | !mmc_card_is_removable(host: host->slot->mmc))) |
3593 | clk_disable_unprepare(clk: host->biu_clk); |
3594 | |
3595 | return ret; |
3596 | } |
3597 | EXPORT_SYMBOL(dw_mci_runtime_resume); |
3598 | #endif /* CONFIG_PM */ |
3599 | |
3600 | static int __init dw_mci_init(void) |
3601 | { |
3602 | pr_info("Synopsys Designware Multimedia Card Interface Driver\n" ); |
3603 | return 0; |
3604 | } |
3605 | |
3606 | static void __exit dw_mci_exit(void) |
3607 | { |
3608 | } |
3609 | |
3610 | module_init(dw_mci_init); |
3611 | module_exit(dw_mci_exit); |
3612 | |
3613 | MODULE_DESCRIPTION("DW Multimedia Card Interface driver" ); |
3614 | MODULE_AUTHOR("NXP Semiconductor VietNam" ); |
3615 | MODULE_AUTHOR("Imagination Technologies Ltd" ); |
3616 | MODULE_LICENSE("GPL v2" ); |
3617 | |