1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 *
5 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
6 * Copyright (C) 2010 ST-Ericsson SA
7 */
8#include <linux/module.h>
9#include <linux/moduleparam.h>
10#include <linux/init.h>
11#include <linux/ioport.h>
12#include <linux/device.h>
13#include <linux/io.h>
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/delay.h>
18#include <linux/err.h>
19#include <linux/highmem.h>
20#include <linux/log2.h>
21#include <linux/mmc/mmc.h>
22#include <linux/mmc/pm.h>
23#include <linux/mmc/host.h>
24#include <linux/mmc/card.h>
25#include <linux/mmc/sd.h>
26#include <linux/mmc/slot-gpio.h>
27#include <linux/amba/bus.h>
28#include <linux/clk.h>
29#include <linux/scatterlist.h>
30#include <linux/of.h>
31#include <linux/regulator/consumer.h>
32#include <linux/dmaengine.h>
33#include <linux/dma-mapping.h>
34#include <linux/amba/mmci.h>
35#include <linux/pm_runtime.h>
36#include <linux/types.h>
37#include <linux/pinctrl/consumer.h>
38#include <linux/reset.h>
39#include <linux/gpio/consumer.h>
40#include <linux/workqueue.h>
41
42#include <asm/div64.h>
43#include <asm/io.h>
44
45#include "mmci.h"
46
47#define DRIVER_NAME "mmci-pl18x"
48
49static void mmci_variant_init(struct mmci_host *host);
50static void ux500_variant_init(struct mmci_host *host);
51static void ux500v2_variant_init(struct mmci_host *host);
52
53static unsigned int fmax = 515633;
54
55static struct variant_data variant_arm = {
56 .fifosize = 16 * 4,
57 .fifohalfsize = 8 * 4,
58 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
59 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
60 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
61 .cmdreg_srsp = MCI_CPSM_RESPONSE,
62 .datalength_bits = 16,
63 .datactrl_blocksz = 11,
64 .pwrreg_powerup = MCI_PWR_UP,
65 .f_max = 100000000,
66 .reversed_irq_handling = true,
67 .mmcimask1 = true,
68 .irq_pio_mask = MCI_IRQ_PIO_MASK,
69 .start_err = MCI_STARTBITERR,
70 .opendrain = MCI_ROD,
71 .init = mmci_variant_init,
72};
73
74static struct variant_data variant_arm_extended_fifo = {
75 .fifosize = 128 * 4,
76 .fifohalfsize = 64 * 4,
77 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
78 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
79 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
80 .cmdreg_srsp = MCI_CPSM_RESPONSE,
81 .datalength_bits = 16,
82 .datactrl_blocksz = 11,
83 .pwrreg_powerup = MCI_PWR_UP,
84 .f_max = 100000000,
85 .mmcimask1 = true,
86 .irq_pio_mask = MCI_IRQ_PIO_MASK,
87 .start_err = MCI_STARTBITERR,
88 .opendrain = MCI_ROD,
89 .init = mmci_variant_init,
90};
91
92static struct variant_data variant_arm_extended_fifo_hwfc = {
93 .fifosize = 128 * 4,
94 .fifohalfsize = 64 * 4,
95 .clkreg_enable = MCI_ARM_HWFCEN,
96 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
97 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
98 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
99 .cmdreg_srsp = MCI_CPSM_RESPONSE,
100 .datalength_bits = 16,
101 .datactrl_blocksz = 11,
102 .pwrreg_powerup = MCI_PWR_UP,
103 .f_max = 100000000,
104 .mmcimask1 = true,
105 .irq_pio_mask = MCI_IRQ_PIO_MASK,
106 .start_err = MCI_STARTBITERR,
107 .opendrain = MCI_ROD,
108 .init = mmci_variant_init,
109};
110
111static struct variant_data variant_u300 = {
112 .fifosize = 16 * 4,
113 .fifohalfsize = 8 * 4,
114 .clkreg_enable = MCI_ST_U300_HWFCEN,
115 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
116 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
117 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
118 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
119 .cmdreg_srsp = MCI_CPSM_RESPONSE,
120 .datalength_bits = 16,
121 .datactrl_blocksz = 11,
122 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
123 .st_sdio = true,
124 .pwrreg_powerup = MCI_PWR_ON,
125 .f_max = 100000000,
126 .signal_direction = true,
127 .pwrreg_clkgate = true,
128 .pwrreg_nopower = true,
129 .mmcimask1 = true,
130 .irq_pio_mask = MCI_IRQ_PIO_MASK,
131 .start_err = MCI_STARTBITERR,
132 .opendrain = MCI_OD,
133 .init = mmci_variant_init,
134};
135
136static struct variant_data variant_nomadik = {
137 .fifosize = 16 * 4,
138 .fifohalfsize = 8 * 4,
139 .clkreg = MCI_CLK_ENABLE,
140 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
141 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
142 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
143 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
144 .cmdreg_srsp = MCI_CPSM_RESPONSE,
145 .datalength_bits = 24,
146 .datactrl_blocksz = 11,
147 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
148 .st_sdio = true,
149 .st_clkdiv = true,
150 .pwrreg_powerup = MCI_PWR_ON,
151 .f_max = 100000000,
152 .signal_direction = true,
153 .pwrreg_clkgate = true,
154 .pwrreg_nopower = true,
155 .mmcimask1 = true,
156 .irq_pio_mask = MCI_IRQ_PIO_MASK,
157 .start_err = MCI_STARTBITERR,
158 .opendrain = MCI_OD,
159 .init = mmci_variant_init,
160};
161
162static struct variant_data variant_ux500 = {
163 .fifosize = 30 * 4,
164 .fifohalfsize = 8 * 4,
165 .clkreg = MCI_CLK_ENABLE,
166 .clkreg_enable = MCI_ST_UX500_HWFCEN,
167 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
168 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
169 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
170 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
171 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
172 .cmdreg_srsp = MCI_CPSM_RESPONSE,
173 .datalength_bits = 24,
174 .datactrl_blocksz = 11,
175 .datactrl_any_blocksz = true,
176 .dma_power_of_2 = true,
177 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
178 .st_sdio = true,
179 .st_clkdiv = true,
180 .pwrreg_powerup = MCI_PWR_ON,
181 .f_max = 100000000,
182 .signal_direction = true,
183 .pwrreg_clkgate = true,
184 .busy_detect = true,
185 .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
186 .busy_detect_flag = MCI_ST_CARDBUSY,
187 .busy_detect_mask = MCI_ST_BUSYENDMASK,
188 .pwrreg_nopower = true,
189 .mmcimask1 = true,
190 .irq_pio_mask = MCI_IRQ_PIO_MASK,
191 .start_err = MCI_STARTBITERR,
192 .opendrain = MCI_OD,
193 .init = ux500_variant_init,
194};
195
196static struct variant_data variant_ux500v2 = {
197 .fifosize = 30 * 4,
198 .fifohalfsize = 8 * 4,
199 .clkreg = MCI_CLK_ENABLE,
200 .clkreg_enable = MCI_ST_UX500_HWFCEN,
201 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
202 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
203 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
204 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
205 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
206 .cmdreg_srsp = MCI_CPSM_RESPONSE,
207 .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
208 .datalength_bits = 24,
209 .datactrl_blocksz = 11,
210 .datactrl_any_blocksz = true,
211 .dma_power_of_2 = true,
212 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
213 .st_sdio = true,
214 .st_clkdiv = true,
215 .pwrreg_powerup = MCI_PWR_ON,
216 .f_max = 100000000,
217 .signal_direction = true,
218 .pwrreg_clkgate = true,
219 .busy_detect = true,
220 .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
221 .busy_detect_flag = MCI_ST_CARDBUSY,
222 .busy_detect_mask = MCI_ST_BUSYENDMASK,
223 .pwrreg_nopower = true,
224 .mmcimask1 = true,
225 .irq_pio_mask = MCI_IRQ_PIO_MASK,
226 .start_err = MCI_STARTBITERR,
227 .opendrain = MCI_OD,
228 .init = ux500v2_variant_init,
229};
230
231static struct variant_data variant_stm32 = {
232 .fifosize = 32 * 4,
233 .fifohalfsize = 8 * 4,
234 .clkreg = MCI_CLK_ENABLE,
235 .clkreg_enable = MCI_ST_UX500_HWFCEN,
236 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
237 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
238 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
239 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
240 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
241 .cmdreg_srsp = MCI_CPSM_RESPONSE,
242 .irq_pio_mask = MCI_IRQ_PIO_MASK,
243 .datalength_bits = 24,
244 .datactrl_blocksz = 11,
245 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
246 .st_sdio = true,
247 .st_clkdiv = true,
248 .pwrreg_powerup = MCI_PWR_ON,
249 .f_max = 48000000,
250 .pwrreg_clkgate = true,
251 .pwrreg_nopower = true,
252 .dma_flow_controller = true,
253 .init = mmci_variant_init,
254};
255
256static struct variant_data variant_stm32_sdmmc = {
257 .fifosize = 16 * 4,
258 .fifohalfsize = 8 * 4,
259 .f_max = 208000000,
260 .stm32_clkdiv = true,
261 .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
262 .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
263 .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
264 .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
265 .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
266 .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
267 .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
268 .datactrl_first = true,
269 .datacnt_useless = true,
270 .datalength_bits = 25,
271 .datactrl_blocksz = 14,
272 .datactrl_any_blocksz = true,
273 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
274 .stm32_idmabsize_mask = GENMASK(12, 5),
275 .stm32_idmabsize_align = BIT(5),
276 .busy_timeout = true,
277 .busy_detect = true,
278 .busy_detect_flag = MCI_STM32_BUSYD0,
279 .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
280 .init = sdmmc_variant_init,
281};
282
283static struct variant_data variant_stm32_sdmmcv2 = {
284 .fifosize = 16 * 4,
285 .fifohalfsize = 8 * 4,
286 .f_max = 267000000,
287 .stm32_clkdiv = true,
288 .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
289 .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
290 .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
291 .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
292 .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
293 .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
294 .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
295 .datactrl_first = true,
296 .datacnt_useless = true,
297 .datalength_bits = 25,
298 .datactrl_blocksz = 14,
299 .datactrl_any_blocksz = true,
300 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
301 .stm32_idmabsize_mask = GENMASK(16, 5),
302 .stm32_idmabsize_align = BIT(5),
303 .dma_lli = true,
304 .busy_timeout = true,
305 .busy_detect = true,
306 .busy_detect_flag = MCI_STM32_BUSYD0,
307 .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
308 .init = sdmmc_variant_init,
309};
310
311static struct variant_data variant_stm32_sdmmcv3 = {
312 .fifosize = 256 * 4,
313 .fifohalfsize = 128 * 4,
314 .f_max = 267000000,
315 .stm32_clkdiv = true,
316 .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
317 .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
318 .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
319 .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
320 .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
321 .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
322 .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
323 .datactrl_first = true,
324 .datacnt_useless = true,
325 .datalength_bits = 25,
326 .datactrl_blocksz = 14,
327 .datactrl_any_blocksz = true,
328 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
329 .stm32_idmabsize_mask = GENMASK(16, 6),
330 .stm32_idmabsize_align = BIT(6),
331 .dma_lli = true,
332 .busy_timeout = true,
333 .busy_detect = true,
334 .busy_detect_flag = MCI_STM32_BUSYD0,
335 .busy_detect_mask = MCI_STM32_BUSYD0ENDMASK,
336 .init = sdmmc_variant_init,
337};
338
339static struct variant_data variant_qcom = {
340 .fifosize = 16 * 4,
341 .fifohalfsize = 8 * 4,
342 .clkreg = MCI_CLK_ENABLE,
343 .clkreg_enable = MCI_QCOM_CLK_FLOWENA |
344 MCI_QCOM_CLK_SELECT_IN_FBCLK,
345 .clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
346 .datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
347 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
348 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
349 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
350 .cmdreg_srsp = MCI_CPSM_RESPONSE,
351 .data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
352 .datalength_bits = 24,
353 .datactrl_blocksz = 11,
354 .datactrl_any_blocksz = true,
355 .pwrreg_powerup = MCI_PWR_UP,
356 .f_max = 208000000,
357 .explicit_mclk_control = true,
358 .qcom_fifo = true,
359 .qcom_dml = true,
360 .mmcimask1 = true,
361 .irq_pio_mask = MCI_IRQ_PIO_MASK,
362 .start_err = MCI_STARTBITERR,
363 .opendrain = MCI_ROD,
364 .init = qcom_variant_init,
365};
366
367/* Busy detection for the ST Micro variant */
368static int mmci_card_busy(struct mmc_host *mmc)
369{
370 struct mmci_host *host = mmc_priv(host: mmc);
371 unsigned long flags;
372 int busy = 0;
373
374 spin_lock_irqsave(&host->lock, flags);
375 if (readl(addr: host->base + MMCISTATUS) & host->variant->busy_detect_flag)
376 busy = 1;
377 spin_unlock_irqrestore(lock: &host->lock, flags);
378
379 return busy;
380}
381
382static void mmci_reg_delay(struct mmci_host *host)
383{
384 /*
385 * According to the spec, at least three feedback clock cycles
386 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
387 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
388 * Worst delay time during card init is at 100 kHz => 30 us.
389 * Worst delay time when up and running is at 25 MHz => 120 ns.
390 */
391 if (host->cclk < 25000000)
392 udelay(30);
393 else
394 ndelay(120);
395}
396
397/*
398 * This must be called with host->lock held
399 */
400void mmci_write_clkreg(struct mmci_host *host, u32 clk)
401{
402 if (host->clk_reg != clk) {
403 host->clk_reg = clk;
404 writel(val: clk, addr: host->base + MMCICLOCK);
405 }
406}
407
408/*
409 * This must be called with host->lock held
410 */
411void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
412{
413 if (host->pwr_reg != pwr) {
414 host->pwr_reg = pwr;
415 writel(val: pwr, addr: host->base + MMCIPOWER);
416 }
417}
418
419/*
420 * This must be called with host->lock held
421 */
422static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
423{
424 /* Keep busy mode in DPSM if enabled */
425 datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
426
427 if (host->datactrl_reg != datactrl) {
428 host->datactrl_reg = datactrl;
429 writel(val: datactrl, addr: host->base + MMCIDATACTRL);
430 }
431}
432
433/*
434 * This must be called with host->lock held
435 */
436static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
437{
438 struct variant_data *variant = host->variant;
439 u32 clk = variant->clkreg;
440
441 /* Make sure cclk reflects the current calculated clock */
442 host->cclk = 0;
443
444 if (desired) {
445 if (variant->explicit_mclk_control) {
446 host->cclk = host->mclk;
447 } else if (desired >= host->mclk) {
448 clk = MCI_CLK_BYPASS;
449 if (variant->st_clkdiv)
450 clk |= MCI_ST_UX500_NEG_EDGE;
451 host->cclk = host->mclk;
452 } else if (variant->st_clkdiv) {
453 /*
454 * DB8500 TRM says f = mclk / (clkdiv + 2)
455 * => clkdiv = (mclk / f) - 2
456 * Round the divider up so we don't exceed the max
457 * frequency
458 */
459 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
460 if (clk >= 256)
461 clk = 255;
462 host->cclk = host->mclk / (clk + 2);
463 } else {
464 /*
465 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
466 * => clkdiv = mclk / (2 * f) - 1
467 */
468 clk = host->mclk / (2 * desired) - 1;
469 if (clk >= 256)
470 clk = 255;
471 host->cclk = host->mclk / (2 * (clk + 1));
472 }
473
474 clk |= variant->clkreg_enable;
475 clk |= MCI_CLK_ENABLE;
476 /* This hasn't proven to be worthwhile */
477 /* clk |= MCI_CLK_PWRSAVE; */
478 }
479
480 /* Set actual clock for debug */
481 host->mmc->actual_clock = host->cclk;
482
483 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
484 clk |= MCI_4BIT_BUS;
485 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
486 clk |= variant->clkreg_8bit_bus_enable;
487
488 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
489 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
490 clk |= variant->clkreg_neg_edge_enable;
491
492 mmci_write_clkreg(host, clk);
493}
494
495static void mmci_dma_release(struct mmci_host *host)
496{
497 if (host->ops && host->ops->dma_release)
498 host->ops->dma_release(host);
499
500 host->use_dma = false;
501}
502
503static void mmci_dma_setup(struct mmci_host *host)
504{
505 if (!host->ops || !host->ops->dma_setup)
506 return;
507
508 if (host->ops->dma_setup(host))
509 return;
510
511 /* initialize pre request cookie */
512 host->next_cookie = 1;
513
514 host->use_dma = true;
515}
516
517/*
518 * Validate mmc prerequisites
519 */
520static int mmci_validate_data(struct mmci_host *host,
521 struct mmc_data *data)
522{
523 struct variant_data *variant = host->variant;
524
525 if (!data)
526 return 0;
527 if (!is_power_of_2(n: data->blksz) && !variant->datactrl_any_blocksz) {
528 dev_err(mmc_dev(host->mmc),
529 "unsupported block size (%d bytes)\n", data->blksz);
530 return -EINVAL;
531 }
532
533 if (host->ops && host->ops->validate_data)
534 return host->ops->validate_data(host, data);
535
536 return 0;
537}
538
539static int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
540{
541 int err;
542
543 if (!host->ops || !host->ops->prep_data)
544 return 0;
545
546 err = host->ops->prep_data(host, data, next);
547
548 if (next && !err)
549 data->host_cookie = ++host->next_cookie < 0 ?
550 1 : host->next_cookie;
551
552 return err;
553}
554
555static void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
556 int err)
557{
558 if (host->ops && host->ops->unprep_data)
559 host->ops->unprep_data(host, data, err);
560
561 data->host_cookie = 0;
562}
563
564static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
565{
566 WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
567
568 if (host->ops && host->ops->get_next_data)
569 host->ops->get_next_data(host, data);
570}
571
572static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
573{
574 struct mmc_data *data = host->data;
575 int ret;
576
577 if (!host->use_dma)
578 return -EINVAL;
579
580 ret = mmci_prep_data(host, data, next: false);
581 if (ret)
582 return ret;
583
584 if (!host->ops || !host->ops->dma_start)
585 return -EINVAL;
586
587 /* Okay, go for it. */
588 dev_vdbg(mmc_dev(host->mmc),
589 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
590 data->sg_len, data->blksz, data->blocks, data->flags);
591
592 ret = host->ops->dma_start(host, &datactrl);
593 if (ret)
594 return ret;
595
596 /* Trigger the DMA transfer */
597 mmci_write_datactrlreg(host, datactrl);
598
599 /*
600 * Let the MMCI say when the data is ended and it's time
601 * to fire next DMA request. When that happens, MMCI will
602 * call mmci_data_end()
603 */
604 writel(readl(addr: host->base + MMCIMASK0) | MCI_DATAENDMASK,
605 addr: host->base + MMCIMASK0);
606 return 0;
607}
608
609static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
610{
611 if (!host->use_dma)
612 return;
613
614 if (host->ops && host->ops->dma_finalize)
615 host->ops->dma_finalize(host, data);
616}
617
618static void mmci_dma_error(struct mmci_host *host)
619{
620 if (!host->use_dma)
621 return;
622
623 if (host->ops && host->ops->dma_error)
624 host->ops->dma_error(host);
625}
626
627static void
628mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
629{
630 writel(val: 0, addr: host->base + MMCICOMMAND);
631
632 BUG_ON(host->data);
633
634 host->mrq = NULL;
635 host->cmd = NULL;
636
637 mmc_request_done(host->mmc, mrq);
638}
639
640static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
641{
642 void __iomem *base = host->base;
643 struct variant_data *variant = host->variant;
644
645 if (host->singleirq) {
646 unsigned int mask0 = readl(addr: base + MMCIMASK0);
647
648 mask0 &= ~variant->irq_pio_mask;
649 mask0 |= mask;
650
651 writel(val: mask0, addr: base + MMCIMASK0);
652 }
653
654 if (variant->mmcimask1)
655 writel(val: mask, addr: base + MMCIMASK1);
656
657 host->mask1_reg = mask;
658}
659
660static void mmci_stop_data(struct mmci_host *host)
661{
662 mmci_write_datactrlreg(host, datactrl: 0);
663 mmci_set_mask1(host, mask: 0);
664 host->data = NULL;
665}
666
667static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
668{
669 unsigned int flags = SG_MITER_ATOMIC;
670
671 if (data->flags & MMC_DATA_READ)
672 flags |= SG_MITER_TO_SG;
673 else
674 flags |= SG_MITER_FROM_SG;
675
676 sg_miter_start(miter: &host->sg_miter, sgl: data->sg, nents: data->sg_len, flags);
677}
678
679static u32 mmci_get_dctrl_cfg(struct mmci_host *host)
680{
681 return MCI_DPSM_ENABLE | mmci_dctrl_blksz(host);
682}
683
684static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
685{
686 return MCI_DPSM_ENABLE | (host->data->blksz << 16);
687}
688
689static void ux500_busy_clear_mask_done(struct mmci_host *host)
690{
691 void __iomem *base = host->base;
692
693 writel(val: host->variant->busy_detect_mask, addr: base + MMCICLEAR);
694 writel(readl(addr: base + MMCIMASK0) &
695 ~host->variant->busy_detect_mask, addr: base + MMCIMASK0);
696 host->busy_state = MMCI_BUSY_DONE;
697 host->busy_status = 0;
698}
699
700/*
701 * ux500_busy_complete() - this will wait until the busy status
702 * goes off, saving any status that occur in the meantime into
703 * host->busy_status until we know the card is not busy any more.
704 * The function returns true when the busy detection is ended
705 * and we should continue processing the command.
706 *
707 * The Ux500 typically fires two IRQs over a busy cycle like this:
708 *
709 * DAT0 busy +-----------------+
710 * | |
711 * DAT0 not busy ----+ +--------
712 *
713 * ^ ^
714 * | |
715 * IRQ1 IRQ2
716 */
717static bool ux500_busy_complete(struct mmci_host *host, struct mmc_command *cmd,
718 u32 status, u32 err_msk)
719{
720 void __iomem *base = host->base;
721 int retries = 10;
722
723 if (status & err_msk) {
724 /* Stop any ongoing busy detection if an error occurs */
725 ux500_busy_clear_mask_done(host);
726 goto out_ret_state;
727 }
728
729 /*
730 * The state transitions are encoded in a state machine crossing
731 * the edges in this switch statement.
732 */
733 switch (host->busy_state) {
734
735 /*
736 * Before unmasking for the busy end IRQ, confirm that the
737 * command was sent successfully. To keep track of having a
738 * command in-progress, waiting for busy signaling to end,
739 * store the status in host->busy_status.
740 *
741 * Note that, the card may need a couple of clock cycles before
742 * it starts signaling busy on DAT0, hence re-read the
743 * MMCISTATUS register here, to allow the busy bit to be set.
744 */
745 case MMCI_BUSY_DONE:
746 /*
747 * Save the first status register read to be sure to catch
748 * all bits that may be lost will retrying. If the command
749 * is still busy this will result in assigning 0 to
750 * host->busy_status, which is what it should be in IDLE.
751 */
752 host->busy_status = status & (MCI_CMDSENT | MCI_CMDRESPEND);
753 while (retries) {
754 status = readl(addr: base + MMCISTATUS);
755 /* Keep accumulating status bits */
756 host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
757 if (status & host->variant->busy_detect_flag) {
758 writel(readl(addr: base + MMCIMASK0) |
759 host->variant->busy_detect_mask,
760 addr: base + MMCIMASK0);
761 host->busy_state = MMCI_BUSY_WAITING_FOR_START_IRQ;
762 schedule_delayed_work(dwork: &host->ux500_busy_timeout_work,
763 delay: msecs_to_jiffies(m: cmd->busy_timeout));
764 goto out_ret_state;
765 }
766 retries--;
767 }
768 dev_dbg(mmc_dev(host->mmc),
769 "no busy signalling in time CMD%02x\n", cmd->opcode);
770 ux500_busy_clear_mask_done(host);
771 break;
772
773 /*
774 * If there is a command in-progress that has been successfully
775 * sent, then bail out if busy status is set and wait for the
776 * busy end IRQ.
777 *
778 * Note that, the HW triggers an IRQ on both edges while
779 * monitoring DAT0 for busy completion, but there is only one
780 * status bit in MMCISTATUS for the busy state. Therefore
781 * both the start and the end interrupts needs to be cleared,
782 * one after the other. So, clear the busy start IRQ here.
783 */
784 case MMCI_BUSY_WAITING_FOR_START_IRQ:
785 if (status & host->variant->busy_detect_flag) {
786 host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
787 writel(val: host->variant->busy_detect_mask, addr: base + MMCICLEAR);
788 host->busy_state = MMCI_BUSY_WAITING_FOR_END_IRQ;
789 } else {
790 dev_dbg(mmc_dev(host->mmc),
791 "lost busy status when waiting for busy start IRQ CMD%02x\n",
792 cmd->opcode);
793 cancel_delayed_work(dwork: &host->ux500_busy_timeout_work);
794 ux500_busy_clear_mask_done(host);
795 }
796 break;
797
798 case MMCI_BUSY_WAITING_FOR_END_IRQ:
799 if (!(status & host->variant->busy_detect_flag)) {
800 host->busy_status |= status & (MCI_CMDSENT | MCI_CMDRESPEND);
801 writel(val: host->variant->busy_detect_mask, addr: base + MMCICLEAR);
802 cancel_delayed_work(dwork: &host->ux500_busy_timeout_work);
803 ux500_busy_clear_mask_done(host);
804 } else {
805 dev_dbg(mmc_dev(host->mmc),
806 "busy status still asserted when handling busy end IRQ - will keep waiting CMD%02x\n",
807 cmd->opcode);
808 }
809 break;
810
811 default:
812 dev_dbg(mmc_dev(host->mmc), "fell through on state %d, CMD%02x\n",
813 host->busy_state, cmd->opcode);
814 break;
815 }
816
817out_ret_state:
818 return (host->busy_state == MMCI_BUSY_DONE);
819}
820
821/*
822 * All the DMA operation mode stuff goes inside this ifdef.
823 * This assumes that you have a generic DMA device interface,
824 * no custom DMA interfaces are supported.
825 */
826#ifdef CONFIG_DMA_ENGINE
827struct mmci_dmae_next {
828 struct dma_async_tx_descriptor *desc;
829 struct dma_chan *chan;
830};
831
832struct mmci_dmae_priv {
833 struct dma_chan *cur;
834 struct dma_chan *rx_channel;
835 struct dma_chan *tx_channel;
836 struct dma_async_tx_descriptor *desc_current;
837 struct mmci_dmae_next next_data;
838};
839
840int mmci_dmae_setup(struct mmci_host *host)
841{
842 const char *rxname, *txname;
843 struct mmci_dmae_priv *dmae;
844
845 dmae = devm_kzalloc(mmc_dev(host->mmc), size: sizeof(*dmae), GFP_KERNEL);
846 if (!dmae)
847 return -ENOMEM;
848
849 host->dma_priv = dmae;
850
851 dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), name: "rx");
852 if (IS_ERR(ptr: dmae->rx_channel)) {
853 int ret = PTR_ERR(ptr: dmae->rx_channel);
854 dmae->rx_channel = NULL;
855 return ret;
856 }
857
858 dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), name: "tx");
859 if (IS_ERR(ptr: dmae->tx_channel)) {
860 if (PTR_ERR(ptr: dmae->tx_channel) == -EPROBE_DEFER)
861 dev_warn(mmc_dev(host->mmc),
862 "Deferred probe for TX channel ignored\n");
863 dmae->tx_channel = NULL;
864 }
865
866 /*
867 * If only an RX channel is specified, the driver will
868 * attempt to use it bidirectionally, however if it
869 * is specified but cannot be located, DMA will be disabled.
870 */
871 if (dmae->rx_channel && !dmae->tx_channel)
872 dmae->tx_channel = dmae->rx_channel;
873
874 if (dmae->rx_channel)
875 rxname = dma_chan_name(chan: dmae->rx_channel);
876 else
877 rxname = "none";
878
879 if (dmae->tx_channel)
880 txname = dma_chan_name(chan: dmae->tx_channel);
881 else
882 txname = "none";
883
884 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
885 rxname, txname);
886
887 /*
888 * Limit the maximum segment size in any SG entry according to
889 * the parameters of the DMA engine device.
890 */
891 if (dmae->tx_channel) {
892 struct device *dev = dmae->tx_channel->device->dev;
893 unsigned int max_seg_size = dma_get_max_seg_size(dev);
894
895 if (max_seg_size < host->mmc->max_seg_size)
896 host->mmc->max_seg_size = max_seg_size;
897 }
898 if (dmae->rx_channel) {
899 struct device *dev = dmae->rx_channel->device->dev;
900 unsigned int max_seg_size = dma_get_max_seg_size(dev);
901
902 if (max_seg_size < host->mmc->max_seg_size)
903 host->mmc->max_seg_size = max_seg_size;
904 }
905
906 if (!dmae->tx_channel || !dmae->rx_channel) {
907 mmci_dmae_release(host);
908 return -EINVAL;
909 }
910
911 return 0;
912}
913
914/*
915 * This is used in or so inline it
916 * so it can be discarded.
917 */
918void mmci_dmae_release(struct mmci_host *host)
919{
920 struct mmci_dmae_priv *dmae = host->dma_priv;
921
922 if (dmae->rx_channel)
923 dma_release_channel(chan: dmae->rx_channel);
924 if (dmae->tx_channel)
925 dma_release_channel(chan: dmae->tx_channel);
926 dmae->rx_channel = dmae->tx_channel = NULL;
927}
928
929static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
930{
931 struct mmci_dmae_priv *dmae = host->dma_priv;
932 struct dma_chan *chan;
933
934 if (data->flags & MMC_DATA_READ)
935 chan = dmae->rx_channel;
936 else
937 chan = dmae->tx_channel;
938
939 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
940 mmc_get_dma_dir(data));
941}
942
943void mmci_dmae_error(struct mmci_host *host)
944{
945 struct mmci_dmae_priv *dmae = host->dma_priv;
946
947 if (!dma_inprogress(host))
948 return;
949
950 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
951 dmaengine_terminate_all(chan: dmae->cur);
952 host->dma_in_progress = false;
953 dmae->cur = NULL;
954 dmae->desc_current = NULL;
955 host->data->host_cookie = 0;
956
957 mmci_dma_unmap(host, data: host->data);
958}
959
960void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data)
961{
962 struct mmci_dmae_priv *dmae = host->dma_priv;
963 u32 status;
964 int i;
965
966 if (!dma_inprogress(host))
967 return;
968
969 /* Wait up to 1ms for the DMA to complete */
970 for (i = 0; ; i++) {
971 status = readl(addr: host->base + MMCISTATUS);
972 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
973 break;
974 udelay(10);
975 }
976
977 /*
978 * Check to see whether we still have some data left in the FIFO -
979 * this catches DMA controllers which are unable to monitor the
980 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
981 * contiguous buffers. On TX, we'll get a FIFO underrun error.
982 */
983 if (status & MCI_RXDATAAVLBLMASK) {
984 mmci_dma_error(host);
985 if (!data->error)
986 data->error = -EIO;
987 } else if (!data->host_cookie) {
988 mmci_dma_unmap(host, data);
989 }
990
991 /*
992 * Use of DMA with scatter-gather is impossible.
993 * Give up with DMA and switch back to PIO mode.
994 */
995 if (status & MCI_RXDATAAVLBLMASK) {
996 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
997 mmci_dma_release(host);
998 }
999
1000 host->dma_in_progress = false;
1001 dmae->cur = NULL;
1002 dmae->desc_current = NULL;
1003}
1004
1005/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
1006static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
1007 struct dma_chan **dma_chan,
1008 struct dma_async_tx_descriptor **dma_desc)
1009{
1010 struct mmci_dmae_priv *dmae = host->dma_priv;
1011 struct variant_data *variant = host->variant;
1012 struct dma_slave_config conf = {
1013 .src_addr = host->phybase + MMCIFIFO,
1014 .dst_addr = host->phybase + MMCIFIFO,
1015 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
1016 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
1017 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
1018 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
1019 .device_fc = variant->dma_flow_controller,
1020 };
1021 struct dma_chan *chan;
1022 struct dma_device *device;
1023 struct dma_async_tx_descriptor *desc;
1024 int nr_sg;
1025 unsigned long flags = DMA_CTRL_ACK;
1026
1027 if (data->flags & MMC_DATA_READ) {
1028 conf.direction = DMA_DEV_TO_MEM;
1029 chan = dmae->rx_channel;
1030 } else {
1031 conf.direction = DMA_MEM_TO_DEV;
1032 chan = dmae->tx_channel;
1033 }
1034
1035 /* If there's no DMA channel, fall back to PIO */
1036 if (!chan)
1037 return -EINVAL;
1038
1039 /* If less than or equal to the fifo size, don't bother with DMA */
1040 if (data->blksz * data->blocks <= variant->fifosize)
1041 return -EINVAL;
1042
1043 /*
1044 * This is necessary to get SDIO working on the Ux500. We do not yet
1045 * know if this is a bug in:
1046 * - The Ux500 DMA controller (DMA40)
1047 * - The MMCI DMA interface on the Ux500
1048 * some power of two blocks (such as 64 bytes) are sent regularly
1049 * during SDIO traffic and those work fine so for these we enable DMA
1050 * transfers.
1051 */
1052 if (host->variant->dma_power_of_2 && !is_power_of_2(n: data->blksz))
1053 return -EINVAL;
1054
1055 device = chan->device;
1056 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
1057 mmc_get_dma_dir(data));
1058 if (nr_sg == 0)
1059 return -EINVAL;
1060
1061 if (host->variant->qcom_dml)
1062 flags |= DMA_PREP_INTERRUPT;
1063
1064 dmaengine_slave_config(chan, config: &conf);
1065 desc = dmaengine_prep_slave_sg(chan, sgl: data->sg, sg_len: nr_sg,
1066 dir: conf.direction, flags);
1067 if (!desc)
1068 goto unmap_exit;
1069
1070 *dma_chan = chan;
1071 *dma_desc = desc;
1072
1073 return 0;
1074
1075 unmap_exit:
1076 dma_unmap_sg(device->dev, data->sg, data->sg_len,
1077 mmc_get_dma_dir(data));
1078 return -ENOMEM;
1079}
1080
1081int mmci_dmae_prep_data(struct mmci_host *host,
1082 struct mmc_data *data,
1083 bool next)
1084{
1085 struct mmci_dmae_priv *dmae = host->dma_priv;
1086 struct mmci_dmae_next *nd = &dmae->next_data;
1087
1088 if (!host->use_dma)
1089 return -EINVAL;
1090
1091 if (next)
1092 return _mmci_dmae_prep_data(host, data, dma_chan: &nd->chan, dma_desc: &nd->desc);
1093 /* Check if next job is already prepared. */
1094 if (dmae->cur && dmae->desc_current)
1095 return 0;
1096
1097 /* No job were prepared thus do it now. */
1098 return _mmci_dmae_prep_data(host, data, dma_chan: &dmae->cur,
1099 dma_desc: &dmae->desc_current);
1100}
1101
1102int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
1103{
1104 struct mmci_dmae_priv *dmae = host->dma_priv;
1105 int ret;
1106
1107 host->dma_in_progress = true;
1108 ret = dma_submit_error(cookie: dmaengine_submit(desc: dmae->desc_current));
1109 if (ret < 0) {
1110 host->dma_in_progress = false;
1111 return ret;
1112 }
1113 dma_async_issue_pending(chan: dmae->cur);
1114
1115 *datactrl |= MCI_DPSM_DMAENABLE;
1116
1117 return 0;
1118}
1119
1120void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data)
1121{
1122 struct mmci_dmae_priv *dmae = host->dma_priv;
1123 struct mmci_dmae_next *next = &dmae->next_data;
1124
1125 if (!host->use_dma)
1126 return;
1127
1128 WARN_ON(!data->host_cookie && (next->desc || next->chan));
1129
1130 dmae->desc_current = next->desc;
1131 dmae->cur = next->chan;
1132 next->desc = NULL;
1133 next->chan = NULL;
1134}
1135
1136void mmci_dmae_unprep_data(struct mmci_host *host,
1137 struct mmc_data *data, int err)
1138
1139{
1140 struct mmci_dmae_priv *dmae = host->dma_priv;
1141
1142 if (!host->use_dma)
1143 return;
1144
1145 mmci_dma_unmap(host, data);
1146
1147 if (err) {
1148 struct mmci_dmae_next *next = &dmae->next_data;
1149 struct dma_chan *chan;
1150 if (data->flags & MMC_DATA_READ)
1151 chan = dmae->rx_channel;
1152 else
1153 chan = dmae->tx_channel;
1154 dmaengine_terminate_all(chan);
1155
1156 if (dmae->desc_current == next->desc)
1157 dmae->desc_current = NULL;
1158
1159 if (dmae->cur == next->chan) {
1160 host->dma_in_progress = false;
1161 dmae->cur = NULL;
1162 }
1163
1164 next->desc = NULL;
1165 next->chan = NULL;
1166 }
1167}
1168
1169static struct mmci_host_ops mmci_variant_ops = {
1170 .prep_data = mmci_dmae_prep_data,
1171 .unprep_data = mmci_dmae_unprep_data,
1172 .get_datactrl_cfg = mmci_get_dctrl_cfg,
1173 .get_next_data = mmci_dmae_get_next_data,
1174 .dma_setup = mmci_dmae_setup,
1175 .dma_release = mmci_dmae_release,
1176 .dma_start = mmci_dmae_start,
1177 .dma_finalize = mmci_dmae_finalize,
1178 .dma_error = mmci_dmae_error,
1179};
1180#else
1181static struct mmci_host_ops mmci_variant_ops = {
1182 .get_datactrl_cfg = mmci_get_dctrl_cfg,
1183};
1184#endif
1185
1186static void mmci_variant_init(struct mmci_host *host)
1187{
1188 host->ops = &mmci_variant_ops;
1189}
1190
1191static void ux500_variant_init(struct mmci_host *host)
1192{
1193 host->ops = &mmci_variant_ops;
1194 host->ops->busy_complete = ux500_busy_complete;
1195}
1196
1197static void ux500v2_variant_init(struct mmci_host *host)
1198{
1199 host->ops = &mmci_variant_ops;
1200 host->ops->busy_complete = ux500_busy_complete;
1201 host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
1202}
1203
1204static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
1205{
1206 struct mmci_host *host = mmc_priv(host: mmc);
1207 struct mmc_data *data = mrq->data;
1208
1209 if (!data)
1210 return;
1211
1212 WARN_ON(data->host_cookie);
1213
1214 if (mmci_validate_data(host, data))
1215 return;
1216
1217 mmci_prep_data(host, data, next: true);
1218}
1219
1220static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
1221 int err)
1222{
1223 struct mmci_host *host = mmc_priv(host: mmc);
1224 struct mmc_data *data = mrq->data;
1225
1226 if (!data || !data->host_cookie)
1227 return;
1228
1229 mmci_unprep_data(host, data, err);
1230}
1231
1232static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
1233{
1234 struct variant_data *variant = host->variant;
1235 unsigned int datactrl, timeout, irqmask;
1236 unsigned long long clks;
1237 void __iomem *base;
1238
1239 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
1240 data->blksz, data->blocks, data->flags);
1241
1242 host->data = data;
1243 host->size = data->blksz * data->blocks;
1244 data->bytes_xfered = 0;
1245
1246 clks = (unsigned long long)data->timeout_ns * host->cclk;
1247 do_div(clks, NSEC_PER_SEC);
1248
1249 timeout = data->timeout_clks + (unsigned int)clks;
1250
1251 base = host->base;
1252 writel(val: timeout, addr: base + MMCIDATATIMER);
1253 writel(val: host->size, addr: base + MMCIDATALENGTH);
1254
1255 datactrl = host->ops->get_datactrl_cfg(host);
1256 datactrl |= host->data->flags & MMC_DATA_READ ? MCI_DPSM_DIRECTION : 0;
1257
1258 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
1259 u32 clk;
1260
1261 datactrl |= variant->datactrl_mask_sdio;
1262
1263 /*
1264 * The ST Micro variant for SDIO small write transfers
1265 * needs to have clock H/W flow control disabled,
1266 * otherwise the transfer will not start. The threshold
1267 * depends on the rate of MCLK.
1268 */
1269 if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
1270 (host->size < 8 ||
1271 (host->size <= 8 && host->mclk > 50000000)))
1272 clk = host->clk_reg & ~variant->clkreg_enable;
1273 else
1274 clk = host->clk_reg | variant->clkreg_enable;
1275
1276 mmci_write_clkreg(host, clk);
1277 }
1278
1279 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
1280 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
1281 datactrl |= variant->datactrl_mask_ddrmode;
1282
1283 /*
1284 * Attempt to use DMA operation mode, if this
1285 * should fail, fall back to PIO mode
1286 */
1287 if (!mmci_dma_start(host, datactrl))
1288 return;
1289
1290 /* IRQ mode, map the SG list for CPU reading/writing */
1291 mmci_init_sg(host, data);
1292
1293 if (data->flags & MMC_DATA_READ) {
1294 irqmask = MCI_RXFIFOHALFFULLMASK;
1295
1296 /*
1297 * If we have less than the fifo 'half-full' threshold to
1298 * transfer, trigger a PIO interrupt as soon as any data
1299 * is available.
1300 */
1301 if (host->size < variant->fifohalfsize)
1302 irqmask |= MCI_RXDATAAVLBLMASK;
1303 } else {
1304 /*
1305 * We don't actually need to include "FIFO empty" here
1306 * since its implicit in "FIFO half empty".
1307 */
1308 irqmask = MCI_TXFIFOHALFEMPTYMASK;
1309 }
1310
1311 mmci_write_datactrlreg(host, datactrl);
1312 writel(readl(addr: base + MMCIMASK0) & ~MCI_DATAENDMASK, addr: base + MMCIMASK0);
1313 mmci_set_mask1(host, mask: irqmask);
1314}
1315
1316static void
1317mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
1318{
1319 void __iomem *base = host->base;
1320 bool busy_resp = cmd->flags & MMC_RSP_BUSY;
1321 unsigned long long clks;
1322
1323 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
1324 cmd->opcode, cmd->arg, cmd->flags);
1325
1326 if (readl(addr: base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) {
1327 writel(val: 0, addr: base + MMCICOMMAND);
1328 mmci_reg_delay(host);
1329 }
1330
1331 if (host->variant->cmdreg_stop &&
1332 cmd->opcode == MMC_STOP_TRANSMISSION)
1333 c |= host->variant->cmdreg_stop;
1334
1335 c |= cmd->opcode | host->variant->cmdreg_cpsm_enable;
1336 if (cmd->flags & MMC_RSP_PRESENT) {
1337 if (cmd->flags & MMC_RSP_136)
1338 c |= host->variant->cmdreg_lrsp_crc;
1339 else if (cmd->flags & MMC_RSP_CRC)
1340 c |= host->variant->cmdreg_srsp_crc;
1341 else
1342 c |= host->variant->cmdreg_srsp;
1343 }
1344
1345 host->busy_status = 0;
1346 host->busy_state = MMCI_BUSY_DONE;
1347
1348 /* Assign a default timeout if the core does not provide one */
1349 if (busy_resp && !cmd->busy_timeout)
1350 cmd->busy_timeout = 10 * MSEC_PER_SEC;
1351
1352 if (busy_resp && host->variant->busy_timeout) {
1353 if (cmd->busy_timeout > host->mmc->max_busy_timeout)
1354 clks = (unsigned long long)host->mmc->max_busy_timeout * host->cclk;
1355 else
1356 clks = (unsigned long long)cmd->busy_timeout * host->cclk;
1357
1358 do_div(clks, MSEC_PER_SEC);
1359 writel_relaxed(clks, host->base + MMCIDATATIMER);
1360 }
1361
1362 if (host->ops->pre_sig_volt_switch && cmd->opcode == SD_SWITCH_VOLTAGE)
1363 host->ops->pre_sig_volt_switch(host);
1364
1365 if (/*interrupt*/0)
1366 c |= MCI_CPSM_INTERRUPT;
1367
1368 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
1369 c |= host->variant->data_cmd_enable;
1370
1371 host->cmd = cmd;
1372
1373 writel(val: cmd->arg, addr: base + MMCIARGUMENT);
1374 writel(val: c, addr: base + MMCICOMMAND);
1375}
1376
1377static void mmci_stop_command(struct mmci_host *host)
1378{
1379 host->stop_abort.error = 0;
1380 mmci_start_command(host, cmd: &host->stop_abort, c: 0);
1381}
1382
1383static void
1384mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
1385 unsigned int status)
1386{
1387 unsigned int status_err;
1388
1389 /* Make sure we have data to handle */
1390 if (!data)
1391 return;
1392
1393 /* First check for errors */
1394 status_err = status & (host->variant->start_err |
1395 MCI_DATACRCFAIL | MCI_DATATIMEOUT |
1396 MCI_TXUNDERRUN | MCI_RXOVERRUN);
1397
1398 if (status_err) {
1399 u32 remain, success;
1400
1401 /* Terminate the DMA transfer */
1402 mmci_dma_error(host);
1403
1404 /*
1405 * Calculate how far we are into the transfer. Note that
1406 * the data counter gives the number of bytes transferred
1407 * on the MMC bus, not on the host side. On reads, this
1408 * can be as much as a FIFO-worth of data ahead. This
1409 * matters for FIFO overruns only.
1410 */
1411 if (!host->variant->datacnt_useless) {
1412 remain = readl(addr: host->base + MMCIDATACNT);
1413 success = data->blksz * data->blocks - remain;
1414 } else {
1415 success = 0;
1416 }
1417
1418 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
1419 status_err, success);
1420 if (status_err & MCI_DATACRCFAIL) {
1421 /* Last block was not successful */
1422 success -= 1;
1423 data->error = -EILSEQ;
1424 } else if (status_err & MCI_DATATIMEOUT) {
1425 data->error = -ETIMEDOUT;
1426 } else if (status_err & MCI_STARTBITERR) {
1427 data->error = -ECOMM;
1428 } else if (status_err & MCI_TXUNDERRUN) {
1429 data->error = -EIO;
1430 } else if (status_err & MCI_RXOVERRUN) {
1431 if (success > host->variant->fifosize)
1432 success -= host->variant->fifosize;
1433 else
1434 success = 0;
1435 data->error = -EIO;
1436 }
1437 data->bytes_xfered = round_down(success, data->blksz);
1438 }
1439
1440 if (status & MCI_DATABLOCKEND)
1441 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
1442
1443 if (status & MCI_DATAEND || data->error) {
1444 mmci_dma_finalize(host, data);
1445
1446 mmci_stop_data(host);
1447
1448 if (!data->error)
1449 /* The error clause is handled above, success! */
1450 data->bytes_xfered = data->blksz * data->blocks;
1451
1452 if (!data->stop) {
1453 if (host->variant->cmdreg_stop && data->error)
1454 mmci_stop_command(host);
1455 else
1456 mmci_request_end(host, mrq: data->mrq);
1457 } else if (host->mrq->sbc && !data->error) {
1458 mmci_request_end(host, mrq: data->mrq);
1459 } else {
1460 mmci_start_command(host, cmd: data->stop, c: 0);
1461 }
1462 }
1463}
1464
1465static void
1466mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
1467 unsigned int status)
1468{
1469 u32 err_msk = MCI_CMDCRCFAIL | MCI_CMDTIMEOUT;
1470 void __iomem *base = host->base;
1471 bool sbc, busy_resp;
1472
1473 if (!cmd)
1474 return;
1475
1476 sbc = (cmd == host->mrq->sbc);
1477 busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
1478
1479 /*
1480 * We need to be one of these interrupts to be considered worth
1481 * handling. Note that we tag on any latent IRQs postponed
1482 * due to waiting for busy status.
1483 */
1484 if (host->variant->busy_timeout && busy_resp)
1485 err_msk |= MCI_DATATIMEOUT;
1486
1487 if (!((status | host->busy_status) &
1488 (err_msk | MCI_CMDSENT | MCI_CMDRESPEND)))
1489 return;
1490
1491 /* Handle busy detection on DAT0 if the variant supports it. */
1492 if (busy_resp && host->variant->busy_detect)
1493 if (!host->ops->busy_complete(host, cmd, status, err_msk))
1494 return;
1495
1496 host->cmd = NULL;
1497
1498 if (status & MCI_CMDTIMEOUT) {
1499 cmd->error = -ETIMEDOUT;
1500 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
1501 cmd->error = -EILSEQ;
1502 } else if (host->variant->busy_timeout && busy_resp &&
1503 status & MCI_DATATIMEOUT) {
1504 cmd->error = -ETIMEDOUT;
1505 /*
1506 * This will wake up mmci_irq_thread() which will issue
1507 * a hardware reset of the MMCI block.
1508 */
1509 host->irq_action = IRQ_WAKE_THREAD;
1510 } else {
1511 cmd->resp[0] = readl(addr: base + MMCIRESPONSE0);
1512 cmd->resp[1] = readl(addr: base + MMCIRESPONSE1);
1513 cmd->resp[2] = readl(addr: base + MMCIRESPONSE2);
1514 cmd->resp[3] = readl(addr: base + MMCIRESPONSE3);
1515 }
1516
1517 if ((!sbc && !cmd->data) || cmd->error) {
1518 if (host->data) {
1519 /* Terminate the DMA transfer */
1520 mmci_dma_error(host);
1521
1522 mmci_stop_data(host);
1523 if (host->variant->cmdreg_stop && cmd->error) {
1524 mmci_stop_command(host);
1525 return;
1526 }
1527 }
1528
1529 if (host->irq_action != IRQ_WAKE_THREAD)
1530 mmci_request_end(host, mrq: host->mrq);
1531
1532 } else if (sbc) {
1533 mmci_start_command(host, cmd: host->mrq->cmd, c: 0);
1534 } else if (!host->variant->datactrl_first &&
1535 !(cmd->data->flags & MMC_DATA_READ)) {
1536 mmci_start_data(host, data: cmd->data);
1537 }
1538}
1539
1540static char *ux500_state_str(struct mmci_host *host)
1541{
1542 switch (host->busy_state) {
1543 case MMCI_BUSY_WAITING_FOR_START_IRQ:
1544 return "waiting for start IRQ";
1545 case MMCI_BUSY_WAITING_FOR_END_IRQ:
1546 return "waiting for end IRQ";
1547 case MMCI_BUSY_DONE:
1548 return "not waiting for IRQs";
1549 default:
1550 return "unknown";
1551 }
1552}
1553
1554/*
1555 * This busy timeout worker is used to "kick" the command IRQ if a
1556 * busy detect IRQ fails to appear in reasonable time. Only used on
1557 * variants with busy detection IRQ delivery.
1558 */
1559static void ux500_busy_timeout_work(struct work_struct *work)
1560{
1561 struct mmci_host *host = container_of(work, struct mmci_host,
1562 ux500_busy_timeout_work.work);
1563 unsigned long flags;
1564 u32 status;
1565
1566 spin_lock_irqsave(&host->lock, flags);
1567
1568 if (host->cmd) {
1569 /* If we are still busy let's tag on a cmd-timeout error. */
1570 status = readl(addr: host->base + MMCISTATUS);
1571 if (status & host->variant->busy_detect_flag) {
1572 status |= MCI_CMDTIMEOUT;
1573 dev_err(mmc_dev(host->mmc),
1574 "timeout in state %s still busy with CMD%02x\n",
1575 ux500_state_str(host), host->cmd->opcode);
1576 } else {
1577 dev_err(mmc_dev(host->mmc),
1578 "timeout in state %s waiting for busy CMD%02x\n",
1579 ux500_state_str(host), host->cmd->opcode);
1580 }
1581
1582 mmci_cmd_irq(host, cmd: host->cmd, status);
1583 }
1584
1585 spin_unlock_irqrestore(lock: &host->lock, flags);
1586}
1587
1588static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
1589{
1590 return remain - (readl(addr: host->base + MMCIFIFOCNT) << 2);
1591}
1592
1593static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
1594{
1595 /*
1596 * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
1597 * from the fifo range should be used
1598 */
1599 if (status & MCI_RXFIFOHALFFULL)
1600 return host->variant->fifohalfsize;
1601 else if (status & MCI_RXDATAAVLBL)
1602 return 4;
1603
1604 return 0;
1605}
1606
1607static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
1608{
1609 void __iomem *base = host->base;
1610 char *ptr = buffer;
1611 u32 status = readl(addr: host->base + MMCISTATUS);
1612 int host_remain = host->size;
1613
1614 do {
1615 int count = host->get_rx_fifocnt(host, status, host_remain);
1616
1617 if (count > remain)
1618 count = remain;
1619
1620 if (count <= 0)
1621 break;
1622
1623 /*
1624 * SDIO especially may want to send something that is
1625 * not divisible by 4 (as opposed to card sectors
1626 * etc). Therefore make sure to always read the last bytes
1627 * while only doing full 32-bit reads towards the FIFO.
1628 */
1629 if (unlikely(count & 0x3)) {
1630 if (count < 4) {
1631 unsigned char buf[4];
1632 ioread32_rep(port: base + MMCIFIFO, buf, count: 1);
1633 memcpy(ptr, buf, count);
1634 } else {
1635 ioread32_rep(port: base + MMCIFIFO, buf: ptr, count: count >> 2);
1636 count &= ~0x3;
1637 }
1638 } else {
1639 ioread32_rep(port: base + MMCIFIFO, buf: ptr, count: count >> 2);
1640 }
1641
1642 ptr += count;
1643 remain -= count;
1644 host_remain -= count;
1645
1646 if (remain == 0)
1647 break;
1648
1649 status = readl(addr: base + MMCISTATUS);
1650 } while (status & MCI_RXDATAAVLBL);
1651
1652 return ptr - buffer;
1653}
1654
1655static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
1656{
1657 struct variant_data *variant = host->variant;
1658 void __iomem *base = host->base;
1659 char *ptr = buffer;
1660
1661 do {
1662 unsigned int count, maxcnt;
1663
1664 maxcnt = status & MCI_TXFIFOEMPTY ?
1665 variant->fifosize : variant->fifohalfsize;
1666 count = min(remain, maxcnt);
1667
1668 /*
1669 * SDIO especially may want to send something that is
1670 * not divisible by 4 (as opposed to card sectors
1671 * etc), and the FIFO only accept full 32-bit writes.
1672 * So compensate by adding +3 on the count, a single
1673 * byte become a 32bit write, 7 bytes will be two
1674 * 32bit writes etc.
1675 */
1676 iowrite32_rep(port: base + MMCIFIFO, buf: ptr, count: (count + 3) >> 2);
1677
1678 ptr += count;
1679 remain -= count;
1680
1681 if (remain == 0)
1682 break;
1683
1684 status = readl(addr: base + MMCISTATUS);
1685 } while (status & MCI_TXFIFOHALFEMPTY);
1686
1687 return ptr - buffer;
1688}
1689
1690/*
1691 * PIO data transfer IRQ handler.
1692 */
1693static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
1694{
1695 struct mmci_host *host = dev_id;
1696 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1697 struct variant_data *variant = host->variant;
1698 void __iomem *base = host->base;
1699 u32 status;
1700
1701 status = readl(addr: base + MMCISTATUS);
1702
1703 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
1704
1705 do {
1706 unsigned int remain, len;
1707 char *buffer;
1708
1709 /*
1710 * For write, we only need to test the half-empty flag
1711 * here - if the FIFO is completely empty, then by
1712 * definition it is more than half empty.
1713 *
1714 * For read, check for data available.
1715 */
1716 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
1717 break;
1718
1719 if (!sg_miter_next(miter: sg_miter))
1720 break;
1721
1722 buffer = sg_miter->addr;
1723 remain = sg_miter->length;
1724
1725 len = 0;
1726 if (status & MCI_RXACTIVE)
1727 len = mmci_pio_read(host, buffer, remain);
1728 if (status & MCI_TXACTIVE)
1729 len = mmci_pio_write(host, buffer, remain, status);
1730
1731 sg_miter->consumed = len;
1732
1733 host->size -= len;
1734 remain -= len;
1735
1736 if (remain)
1737 break;
1738
1739 status = readl(addr: base + MMCISTATUS);
1740 } while (1);
1741
1742 sg_miter_stop(miter: sg_miter);
1743
1744 /*
1745 * If we have less than the fifo 'half-full' threshold to transfer,
1746 * trigger a PIO interrupt as soon as any data is available.
1747 */
1748 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
1749 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
1750
1751 /*
1752 * If we run out of data, disable the data IRQs; this
1753 * prevents a race where the FIFO becomes empty before
1754 * the chip itself has disabled the data path, and
1755 * stops us racing with our data end IRQ.
1756 */
1757 if (host->size == 0) {
1758 mmci_set_mask1(host, mask: 0);
1759 writel(readl(addr: base + MMCIMASK0) | MCI_DATAENDMASK, addr: base + MMCIMASK0);
1760 }
1761
1762 return IRQ_HANDLED;
1763}
1764
1765/*
1766 * Handle completion of command and data transfers.
1767 */
1768static irqreturn_t mmci_irq(int irq, void *dev_id)
1769{
1770 struct mmci_host *host = dev_id;
1771 u32 status;
1772
1773 spin_lock(lock: &host->lock);
1774 host->irq_action = IRQ_HANDLED;
1775
1776 do {
1777 status = readl(addr: host->base + MMCISTATUS);
1778 if (!status)
1779 break;
1780
1781 if (host->singleirq) {
1782 if (status & host->mask1_reg)
1783 mmci_pio_irq(irq, dev_id);
1784
1785 status &= ~host->variant->irq_pio_mask;
1786 }
1787
1788 /*
1789 * Busy detection is managed by mmci_cmd_irq(), including to
1790 * clear the corresponding IRQ.
1791 */
1792 status &= readl(addr: host->base + MMCIMASK0);
1793 if (host->variant->busy_detect)
1794 writel(val: status & ~host->variant->busy_detect_mask,
1795 addr: host->base + MMCICLEAR);
1796 else
1797 writel(val: status, addr: host->base + MMCICLEAR);
1798
1799 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1800
1801 if (host->variant->reversed_irq_handling) {
1802 mmci_data_irq(host, data: host->data, status);
1803 mmci_cmd_irq(host, cmd: host->cmd, status);
1804 } else {
1805 mmci_cmd_irq(host, cmd: host->cmd, status);
1806 mmci_data_irq(host, data: host->data, status);
1807 }
1808
1809 /*
1810 * Busy detection has been handled by mmci_cmd_irq() above.
1811 * Clear the status bit to prevent polling in IRQ context.
1812 */
1813 if (host->variant->busy_detect_flag)
1814 status &= ~host->variant->busy_detect_flag;
1815
1816 } while (status);
1817
1818 spin_unlock(lock: &host->lock);
1819
1820 return host->irq_action;
1821}
1822
1823/*
1824 * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW.
1825 *
1826 * A reset is needed for some variants, where a datatimeout for a R1B request
1827 * causes the DPSM to stay busy (non-functional).
1828 */
1829static irqreturn_t mmci_irq_thread(int irq, void *dev_id)
1830{
1831 struct mmci_host *host = dev_id;
1832 unsigned long flags;
1833
1834 if (host->rst) {
1835 reset_control_assert(rstc: host->rst);
1836 udelay(2);
1837 reset_control_deassert(rstc: host->rst);
1838 }
1839
1840 spin_lock_irqsave(&host->lock, flags);
1841 writel(val: host->clk_reg, addr: host->base + MMCICLOCK);
1842 writel(val: host->pwr_reg, addr: host->base + MMCIPOWER);
1843 writel(MCI_IRQENABLE | host->variant->start_err,
1844 addr: host->base + MMCIMASK0);
1845
1846 host->irq_action = IRQ_HANDLED;
1847 mmci_request_end(host, mrq: host->mrq);
1848 spin_unlock_irqrestore(lock: &host->lock, flags);
1849
1850 return host->irq_action;
1851}
1852
1853static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1854{
1855 struct mmci_host *host = mmc_priv(host: mmc);
1856 unsigned long flags;
1857
1858 WARN_ON(host->mrq != NULL);
1859
1860 mrq->cmd->error = mmci_validate_data(host, data: mrq->data);
1861 if (mrq->cmd->error) {
1862 mmc_request_done(mmc, mrq);
1863 return;
1864 }
1865
1866 spin_lock_irqsave(&host->lock, flags);
1867
1868 host->mrq = mrq;
1869
1870 if (mrq->data)
1871 mmci_get_next_data(host, data: mrq->data);
1872
1873 if (mrq->data &&
1874 (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
1875 mmci_start_data(host, data: mrq->data);
1876
1877 if (mrq->sbc)
1878 mmci_start_command(host, cmd: mrq->sbc, c: 0);
1879 else
1880 mmci_start_command(host, cmd: mrq->cmd, c: 0);
1881
1882 spin_unlock_irqrestore(lock: &host->lock, flags);
1883}
1884
1885static void mmci_set_max_busy_timeout(struct mmc_host *mmc)
1886{
1887 struct mmci_host *host = mmc_priv(host: mmc);
1888 u32 max_busy_timeout = 0;
1889
1890 if (!host->variant->busy_detect)
1891 return;
1892
1893 if (host->variant->busy_timeout && mmc->actual_clock)
1894 max_busy_timeout = U32_MAX / DIV_ROUND_UP(mmc->actual_clock,
1895 MSEC_PER_SEC);
1896
1897 mmc->max_busy_timeout = max_busy_timeout;
1898}
1899
1900static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1901{
1902 struct mmci_host *host = mmc_priv(host: mmc);
1903 struct variant_data *variant = host->variant;
1904 u32 pwr = 0;
1905 unsigned long flags;
1906 int ret;
1907
1908 switch (ios->power_mode) {
1909 case MMC_POWER_OFF:
1910 if (!IS_ERR(ptr: mmc->supply.vmmc))
1911 mmc_regulator_set_ocr(mmc, supply: mmc->supply.vmmc, vdd_bit: 0);
1912
1913 if (!IS_ERR(ptr: mmc->supply.vqmmc) && host->vqmmc_enabled) {
1914 regulator_disable(regulator: mmc->supply.vqmmc);
1915 host->vqmmc_enabled = false;
1916 }
1917
1918 break;
1919 case MMC_POWER_UP:
1920 if (!IS_ERR(ptr: mmc->supply.vmmc))
1921 mmc_regulator_set_ocr(mmc, supply: mmc->supply.vmmc, vdd_bit: ios->vdd);
1922
1923 /*
1924 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1925 * and instead uses MCI_PWR_ON so apply whatever value is
1926 * configured in the variant data.
1927 */
1928 pwr |= variant->pwrreg_powerup;
1929
1930 break;
1931 case MMC_POWER_ON:
1932 if (!IS_ERR(ptr: mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1933 ret = regulator_enable(regulator: mmc->supply.vqmmc);
1934 if (ret < 0)
1935 dev_err(mmc_dev(mmc),
1936 "failed to enable vqmmc regulator\n");
1937 else
1938 host->vqmmc_enabled = true;
1939 }
1940
1941 pwr |= MCI_PWR_ON;
1942 break;
1943 }
1944
1945 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1946 /*
1947 * The ST Micro variant has some additional bits
1948 * indicating signal direction for the signals in
1949 * the SD/MMC bus and feedback-clock usage.
1950 */
1951 pwr |= host->pwr_reg_add;
1952
1953 if (ios->bus_width == MMC_BUS_WIDTH_4)
1954 pwr &= ~MCI_ST_DATA74DIREN;
1955 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1956 pwr &= (~MCI_ST_DATA74DIREN &
1957 ~MCI_ST_DATA31DIREN &
1958 ~MCI_ST_DATA2DIREN);
1959 }
1960
1961 if (variant->opendrain) {
1962 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1963 pwr |= variant->opendrain;
1964 } else {
1965 /*
1966 * If the variant cannot configure the pads by its own, then we
1967 * expect the pinctrl to be able to do that for us
1968 */
1969 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1970 pinctrl_select_state(p: host->pinctrl, s: host->pins_opendrain);
1971 else
1972 pinctrl_select_default_state(mmc_dev(mmc));
1973 }
1974
1975 /*
1976 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1977 * gating the clock, the MCI_PWR_ON bit is cleared.
1978 */
1979 if (!ios->clock && variant->pwrreg_clkgate)
1980 pwr &= ~MCI_PWR_ON;
1981
1982 if (host->variant->explicit_mclk_control &&
1983 ios->clock != host->clock_cache) {
1984 ret = clk_set_rate(clk: host->clk, rate: ios->clock);
1985 if (ret < 0)
1986 dev_err(mmc_dev(host->mmc),
1987 "Error setting clock rate (%d)\n", ret);
1988 else
1989 host->mclk = clk_get_rate(clk: host->clk);
1990 }
1991 host->clock_cache = ios->clock;
1992
1993 spin_lock_irqsave(&host->lock, flags);
1994
1995 if (host->ops && host->ops->set_clkreg)
1996 host->ops->set_clkreg(host, ios->clock);
1997 else
1998 mmci_set_clkreg(host, desired: ios->clock);
1999
2000 mmci_set_max_busy_timeout(mmc);
2001
2002 if (host->ops && host->ops->set_pwrreg)
2003 host->ops->set_pwrreg(host, pwr);
2004 else
2005 mmci_write_pwrreg(host, pwr);
2006
2007 mmci_reg_delay(host);
2008
2009 spin_unlock_irqrestore(lock: &host->lock, flags);
2010}
2011
2012static int mmci_get_cd(struct mmc_host *mmc)
2013{
2014 struct mmci_host *host = mmc_priv(host: mmc);
2015 struct mmci_platform_data *plat = host->plat;
2016 unsigned int status = mmc_gpio_get_cd(host: mmc);
2017
2018 if (status == -ENOSYS) {
2019 if (!plat->status)
2020 return 1; /* Assume always present */
2021
2022 status = plat->status(mmc_dev(host->mmc));
2023 }
2024 return status;
2025}
2026
2027static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
2028{
2029 struct mmci_host *host = mmc_priv(host: mmc);
2030 int ret;
2031
2032 ret = mmc_regulator_set_vqmmc(mmc, ios);
2033
2034 if (!ret && host->ops && host->ops->post_sig_volt_switch)
2035 ret = host->ops->post_sig_volt_switch(host, ios);
2036 else if (ret)
2037 ret = 0;
2038
2039 if (ret < 0)
2040 dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
2041
2042 return ret;
2043}
2044
2045static struct mmc_host_ops mmci_ops = {
2046 .request = mmci_request,
2047 .pre_req = mmci_pre_request,
2048 .post_req = mmci_post_request,
2049 .set_ios = mmci_set_ios,
2050 .get_ro = mmc_gpio_get_ro,
2051 .get_cd = mmci_get_cd,
2052 .start_signal_voltage_switch = mmci_sig_volt_switch,
2053};
2054
2055static void mmci_probe_level_translator(struct mmc_host *mmc)
2056{
2057 struct device *dev = mmc_dev(mmc);
2058 struct mmci_host *host = mmc_priv(host: mmc);
2059 struct gpio_desc *cmd_gpio;
2060 struct gpio_desc *ck_gpio;
2061 struct gpio_desc *ckin_gpio;
2062 int clk_hi, clk_lo;
2063
2064 /*
2065 * Assume the level translator is present if st,use-ckin is set.
2066 * This is to cater for DTs which do not implement this test.
2067 */
2068 host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
2069
2070 cmd_gpio = gpiod_get(dev, con_id: "st,cmd", flags: GPIOD_OUT_HIGH);
2071 if (IS_ERR(ptr: cmd_gpio))
2072 goto exit_cmd;
2073
2074 ck_gpio = gpiod_get(dev, con_id: "st,ck", flags: GPIOD_OUT_HIGH);
2075 if (IS_ERR(ptr: ck_gpio))
2076 goto exit_ck;
2077
2078 ckin_gpio = gpiod_get(dev, con_id: "st,ckin", flags: GPIOD_IN);
2079 if (IS_ERR(ptr: ckin_gpio))
2080 goto exit_ckin;
2081
2082 /* All GPIOs are valid, test whether level translator works */
2083
2084 /* Sample CKIN */
2085 clk_hi = !!gpiod_get_value(desc: ckin_gpio);
2086
2087 /* Set CK low */
2088 gpiod_set_value(desc: ck_gpio, value: 0);
2089
2090 /* Sample CKIN */
2091 clk_lo = !!gpiod_get_value(desc: ckin_gpio);
2092
2093 /* Tristate all */
2094 gpiod_direction_input(desc: cmd_gpio);
2095 gpiod_direction_input(desc: ck_gpio);
2096
2097 /* Level translator is present if CK signal is propagated to CKIN */
2098 if (!clk_hi || clk_lo) {
2099 host->clk_reg_add &= ~MCI_STM32_CLK_SELCKIN;
2100 dev_warn(dev,
2101 "Level translator inoperable, CK signal not detected on CKIN, disabling.\n");
2102 }
2103
2104 gpiod_put(desc: ckin_gpio);
2105
2106exit_ckin:
2107 gpiod_put(desc: ck_gpio);
2108exit_ck:
2109 gpiod_put(desc: cmd_gpio);
2110exit_cmd:
2111 pinctrl_select_default_state(dev);
2112}
2113
2114static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
2115{
2116 struct mmci_host *host = mmc_priv(host: mmc);
2117 int ret = mmc_of_parse(host: mmc);
2118
2119 if (ret)
2120 return ret;
2121
2122 if (of_property_read_bool(np, propname: "st,sig-dir-dat0"))
2123 host->pwr_reg_add |= MCI_ST_DATA0DIREN;
2124 if (of_property_read_bool(np, propname: "st,sig-dir-dat2"))
2125 host->pwr_reg_add |= MCI_ST_DATA2DIREN;
2126 if (of_property_read_bool(np, propname: "st,sig-dir-dat31"))
2127 host->pwr_reg_add |= MCI_ST_DATA31DIREN;
2128 if (of_property_read_bool(np, propname: "st,sig-dir-dat74"))
2129 host->pwr_reg_add |= MCI_ST_DATA74DIREN;
2130 if (of_property_read_bool(np, propname: "st,sig-dir-cmd"))
2131 host->pwr_reg_add |= MCI_ST_CMDDIREN;
2132 if (of_property_read_bool(np, propname: "st,sig-pin-fbclk"))
2133 host->pwr_reg_add |= MCI_ST_FBCLKEN;
2134 if (of_property_read_bool(np, propname: "st,sig-dir"))
2135 host->pwr_reg_add |= MCI_STM32_DIRPOL;
2136 if (of_property_read_bool(np, propname: "st,neg-edge"))
2137 host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
2138 if (of_property_read_bool(np, propname: "st,use-ckin"))
2139 mmci_probe_level_translator(mmc);
2140
2141 if (of_property_read_bool(np, propname: "mmc-cap-mmc-highspeed"))
2142 mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
2143 if (of_property_read_bool(np, propname: "mmc-cap-sd-highspeed"))
2144 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
2145
2146 return 0;
2147}
2148
2149static int mmci_probe(struct amba_device *dev,
2150 const struct amba_id *id)
2151{
2152 struct mmci_platform_data *plat = dev->dev.platform_data;
2153 struct device_node *np = dev->dev.of_node;
2154 struct variant_data *variant = id->data;
2155 struct mmci_host *host;
2156 struct mmc_host *mmc;
2157 int ret;
2158
2159 /* Must have platform data or Device Tree. */
2160 if (!plat && !np) {
2161 dev_err(&dev->dev, "No plat data or DT found\n");
2162 return -EINVAL;
2163 }
2164
2165 if (!plat) {
2166 plat = devm_kzalloc(dev: &dev->dev, size: sizeof(*plat), GFP_KERNEL);
2167 if (!plat)
2168 return -ENOMEM;
2169 }
2170
2171 mmc = mmc_alloc_host(extra: sizeof(struct mmci_host), &dev->dev);
2172 if (!mmc)
2173 return -ENOMEM;
2174
2175 host = mmc_priv(host: mmc);
2176 host->mmc = mmc;
2177 host->mmc_ops = &mmci_ops;
2178 mmc->ops = &mmci_ops;
2179
2180 ret = mmci_of_parse(np, mmc);
2181 if (ret)
2182 goto host_free;
2183
2184 /*
2185 * Some variant (STM32) doesn't have opendrain bit, nevertheless
2186 * pins can be set accordingly using pinctrl
2187 */
2188 if (!variant->opendrain) {
2189 host->pinctrl = devm_pinctrl_get(dev: &dev->dev);
2190 if (IS_ERR(ptr: host->pinctrl)) {
2191 dev_err(&dev->dev, "failed to get pinctrl");
2192 ret = PTR_ERR(ptr: host->pinctrl);
2193 goto host_free;
2194 }
2195
2196 host->pins_opendrain = pinctrl_lookup_state(p: host->pinctrl,
2197 MMCI_PINCTRL_STATE_OPENDRAIN);
2198 if (IS_ERR(ptr: host->pins_opendrain)) {
2199 dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
2200 ret = PTR_ERR(ptr: host->pins_opendrain);
2201 goto host_free;
2202 }
2203 }
2204
2205 host->hw_designer = amba_manf(dev);
2206 host->hw_revision = amba_rev(dev);
2207 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
2208 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
2209
2210 host->clk = devm_clk_get(dev: &dev->dev, NULL);
2211 if (IS_ERR(ptr: host->clk)) {
2212 ret = PTR_ERR(ptr: host->clk);
2213 goto host_free;
2214 }
2215
2216 ret = clk_prepare_enable(clk: host->clk);
2217 if (ret)
2218 goto host_free;
2219
2220 if (variant->qcom_fifo)
2221 host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
2222 else
2223 host->get_rx_fifocnt = mmci_get_rx_fifocnt;
2224
2225 host->plat = plat;
2226 host->variant = variant;
2227 host->mclk = clk_get_rate(clk: host->clk);
2228 /*
2229 * According to the spec, mclk is max 100 MHz,
2230 * so we try to adjust the clock down to this,
2231 * (if possible).
2232 */
2233 if (host->mclk > variant->f_max) {
2234 ret = clk_set_rate(clk: host->clk, rate: variant->f_max);
2235 if (ret < 0)
2236 goto clk_disable;
2237 host->mclk = clk_get_rate(clk: host->clk);
2238 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
2239 host->mclk);
2240 }
2241
2242 host->phybase = dev->res.start;
2243 host->base = devm_ioremap_resource(dev: &dev->dev, res: &dev->res);
2244 if (IS_ERR(ptr: host->base)) {
2245 ret = PTR_ERR(ptr: host->base);
2246 goto clk_disable;
2247 }
2248
2249 if (variant->init)
2250 variant->init(host);
2251
2252 /*
2253 * The ARM and ST versions of the block have slightly different
2254 * clock divider equations which means that the minimum divider
2255 * differs too.
2256 * on Qualcomm like controllers get the nearest minimum clock to 100Khz
2257 */
2258 if (variant->st_clkdiv)
2259 mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
2260 else if (variant->stm32_clkdiv)
2261 mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
2262 else if (variant->explicit_mclk_control)
2263 mmc->f_min = clk_round_rate(clk: host->clk, rate: 100000);
2264 else
2265 mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
2266 /*
2267 * If no maximum operating frequency is supplied, fall back to use
2268 * the module parameter, which has a (low) default value in case it
2269 * is not specified. Either value must not exceed the clock rate into
2270 * the block, of course.
2271 */
2272 if (mmc->f_max)
2273 mmc->f_max = variant->explicit_mclk_control ?
2274 min(variant->f_max, mmc->f_max) :
2275 min(host->mclk, mmc->f_max);
2276 else
2277 mmc->f_max = variant->explicit_mclk_control ?
2278 fmax : min(host->mclk, fmax);
2279
2280
2281 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
2282
2283 host->rst = devm_reset_control_get_optional_exclusive(dev: &dev->dev, NULL);
2284 if (IS_ERR(ptr: host->rst)) {
2285 ret = PTR_ERR(ptr: host->rst);
2286 goto clk_disable;
2287 }
2288 ret = reset_control_deassert(rstc: host->rst);
2289 if (ret)
2290 dev_err(mmc_dev(mmc), "failed to de-assert reset\n");
2291
2292 /* Get regulators and the supported OCR mask */
2293 ret = mmc_regulator_get_supply(mmc);
2294 if (ret)
2295 goto clk_disable;
2296
2297 if (!mmc->ocr_avail)
2298 mmc->ocr_avail = plat->ocr_mask;
2299 else if (plat->ocr_mask)
2300 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
2301
2302 /* We support these capabilities. */
2303 mmc->caps |= MMC_CAP_CMD23;
2304
2305 /*
2306 * Enable busy detection.
2307 */
2308 if (variant->busy_detect) {
2309 mmci_ops.card_busy = mmci_card_busy;
2310 /*
2311 * Not all variants have a flag to enable busy detection
2312 * in the DPSM, but if they do, set it here.
2313 */
2314 if (variant->busy_dpsm_flag)
2315 mmci_write_datactrlreg(host,
2316 datactrl: host->variant->busy_dpsm_flag);
2317 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
2318 }
2319
2320 /* Variants with mandatory busy timeout in HW needs R1B responses. */
2321 if (variant->busy_timeout)
2322 mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
2323
2324 /* Prepare a CMD12 - needed to clear the DPSM on some variants. */
2325 host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
2326 host->stop_abort.arg = 0;
2327 host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
2328
2329 /* We support these PM capabilities. */
2330 mmc->pm_caps |= MMC_PM_KEEP_POWER;
2331
2332 /*
2333 * We can do SGIO
2334 */
2335 mmc->max_segs = NR_SG;
2336
2337 /*
2338 * Since only a certain number of bits are valid in the data length
2339 * register, we must ensure that we don't exceed 2^num-1 bytes in a
2340 * single request.
2341 */
2342 mmc->max_req_size = (1 << variant->datalength_bits) - 1;
2343
2344 /*
2345 * Set the maximum segment size. Since we aren't doing DMA
2346 * (yet) we are only limited by the data length register.
2347 */
2348 mmc->max_seg_size = mmc->max_req_size;
2349
2350 /*
2351 * Block size can be up to 2048 bytes, but must be a power of two.
2352 */
2353 mmc->max_blk_size = 1 << variant->datactrl_blocksz;
2354
2355 /*
2356 * Limit the number of blocks transferred so that we don't overflow
2357 * the maximum request size.
2358 */
2359 mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
2360
2361 spin_lock_init(&host->lock);
2362
2363 writel(val: 0, addr: host->base + MMCIMASK0);
2364
2365 if (variant->mmcimask1)
2366 writel(val: 0, addr: host->base + MMCIMASK1);
2367
2368 writel(val: 0xfff, addr: host->base + MMCICLEAR);
2369
2370 /*
2371 * If:
2372 * - not using DT but using a descriptor table, or
2373 * - using a table of descriptors ALONGSIDE DT, or
2374 * look up these descriptors named "cd" and "wp" right here, fail
2375 * silently of these do not exist
2376 */
2377 if (!np) {
2378 ret = mmc_gpiod_request_cd(host: mmc, con_id: "cd", idx: 0, override_active_level: false, debounce: 0);
2379 if (ret == -EPROBE_DEFER)
2380 goto clk_disable;
2381
2382 ret = mmc_gpiod_request_ro(host: mmc, con_id: "wp", idx: 0, debounce: 0);
2383 if (ret == -EPROBE_DEFER)
2384 goto clk_disable;
2385 }
2386
2387 ret = devm_request_threaded_irq(dev: &dev->dev, irq: dev->irq[0], handler: mmci_irq,
2388 thread_fn: mmci_irq_thread, IRQF_SHARED,
2389 DRIVER_NAME " (cmd)", dev_id: host);
2390 if (ret)
2391 goto clk_disable;
2392
2393 if (!dev->irq[1])
2394 host->singleirq = true;
2395 else {
2396 ret = devm_request_irq(dev: &dev->dev, irq: dev->irq[1], handler: mmci_pio_irq,
2397 IRQF_SHARED, DRIVER_NAME " (pio)", dev_id: host);
2398 if (ret)
2399 goto clk_disable;
2400 }
2401
2402 if (host->variant->busy_detect)
2403 INIT_DELAYED_WORK(&host->ux500_busy_timeout_work,
2404 ux500_busy_timeout_work);
2405
2406 writel(MCI_IRQENABLE | variant->start_err, addr: host->base + MMCIMASK0);
2407
2408 amba_set_drvdata(dev, mmc);
2409
2410 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
2411 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
2412 amba_rev(dev), (unsigned long long)dev->res.start,
2413 dev->irq[0], dev->irq[1]);
2414
2415 mmci_dma_setup(host);
2416
2417 pm_runtime_set_autosuspend_delay(dev: &dev->dev, delay: 50);
2418 pm_runtime_use_autosuspend(dev: &dev->dev);
2419
2420 ret = mmc_add_host(mmc);
2421 if (ret)
2422 goto clk_disable;
2423
2424 pm_runtime_put(dev: &dev->dev);
2425 return 0;
2426
2427 clk_disable:
2428 clk_disable_unprepare(clk: host->clk);
2429 host_free:
2430 mmc_free_host(mmc);
2431 return ret;
2432}
2433
2434static void mmci_remove(struct amba_device *dev)
2435{
2436 struct mmc_host *mmc = amba_get_drvdata(dev);
2437
2438 if (mmc) {
2439 struct mmci_host *host = mmc_priv(host: mmc);
2440 struct variant_data *variant = host->variant;
2441
2442 /*
2443 * Undo pm_runtime_put() in probe. We use the _sync
2444 * version here so that we can access the primecell.
2445 */
2446 pm_runtime_get_sync(dev: &dev->dev);
2447
2448 mmc_remove_host(mmc);
2449
2450 writel(val: 0, addr: host->base + MMCIMASK0);
2451
2452 if (variant->mmcimask1)
2453 writel(val: 0, addr: host->base + MMCIMASK1);
2454
2455 writel(val: 0, addr: host->base + MMCICOMMAND);
2456 writel(val: 0, addr: host->base + MMCIDATACTRL);
2457
2458 mmci_dma_release(host);
2459 clk_disable_unprepare(clk: host->clk);
2460 mmc_free_host(mmc);
2461 }
2462}
2463
2464#ifdef CONFIG_PM
2465static void mmci_save(struct mmci_host *host)
2466{
2467 unsigned long flags;
2468
2469 spin_lock_irqsave(&host->lock, flags);
2470
2471 writel(val: 0, addr: host->base + MMCIMASK0);
2472 if (host->variant->pwrreg_nopower) {
2473 writel(val: 0, addr: host->base + MMCIDATACTRL);
2474 writel(val: 0, addr: host->base + MMCIPOWER);
2475 writel(val: 0, addr: host->base + MMCICLOCK);
2476 }
2477 mmci_reg_delay(host);
2478
2479 spin_unlock_irqrestore(lock: &host->lock, flags);
2480}
2481
2482static void mmci_restore(struct mmci_host *host)
2483{
2484 unsigned long flags;
2485
2486 spin_lock_irqsave(&host->lock, flags);
2487
2488 if (host->variant->pwrreg_nopower) {
2489 writel(val: host->clk_reg, addr: host->base + MMCICLOCK);
2490 writel(val: host->datactrl_reg, addr: host->base + MMCIDATACTRL);
2491 writel(val: host->pwr_reg, addr: host->base + MMCIPOWER);
2492 }
2493 writel(MCI_IRQENABLE | host->variant->start_err,
2494 addr: host->base + MMCIMASK0);
2495 mmci_reg_delay(host);
2496
2497 spin_unlock_irqrestore(lock: &host->lock, flags);
2498}
2499
2500static int mmci_runtime_suspend(struct device *dev)
2501{
2502 struct amba_device *adev = to_amba_device(dev);
2503 struct mmc_host *mmc = amba_get_drvdata(adev);
2504
2505 if (mmc) {
2506 struct mmci_host *host = mmc_priv(host: mmc);
2507 pinctrl_pm_select_sleep_state(dev);
2508 mmci_save(host);
2509 clk_disable_unprepare(clk: host->clk);
2510 }
2511
2512 return 0;
2513}
2514
2515static int mmci_runtime_resume(struct device *dev)
2516{
2517 struct amba_device *adev = to_amba_device(dev);
2518 struct mmc_host *mmc = amba_get_drvdata(adev);
2519
2520 if (mmc) {
2521 struct mmci_host *host = mmc_priv(host: mmc);
2522 clk_prepare_enable(clk: host->clk);
2523 mmci_restore(host);
2524 pinctrl_select_default_state(dev);
2525 }
2526
2527 return 0;
2528}
2529#endif
2530
2531static const struct dev_pm_ops mmci_dev_pm_ops = {
2532 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2533 pm_runtime_force_resume)
2534 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
2535};
2536
2537static const struct amba_id mmci_ids[] = {
2538 {
2539 .id = 0x00041180,
2540 .mask = 0xff0fffff,
2541 .data = &variant_arm,
2542 },
2543 {
2544 .id = 0x01041180,
2545 .mask = 0xff0fffff,
2546 .data = &variant_arm_extended_fifo,
2547 },
2548 {
2549 .id = 0x02041180,
2550 .mask = 0xff0fffff,
2551 .data = &variant_arm_extended_fifo_hwfc,
2552 },
2553 {
2554 .id = 0x00041181,
2555 .mask = 0x000fffff,
2556 .data = &variant_arm,
2557 },
2558 /* ST Micro variants */
2559 {
2560 .id = 0x00180180,
2561 .mask = 0x00ffffff,
2562 .data = &variant_u300,
2563 },
2564 {
2565 .id = 0x10180180,
2566 .mask = 0xf0ffffff,
2567 .data = &variant_nomadik,
2568 },
2569 {
2570 .id = 0x00280180,
2571 .mask = 0x00ffffff,
2572 .data = &variant_nomadik,
2573 },
2574 {
2575 .id = 0x00480180,
2576 .mask = 0xf0ffffff,
2577 .data = &variant_ux500,
2578 },
2579 {
2580 .id = 0x10480180,
2581 .mask = 0xf0ffffff,
2582 .data = &variant_ux500v2,
2583 },
2584 {
2585 .id = 0x00880180,
2586 .mask = 0x00ffffff,
2587 .data = &variant_stm32,
2588 },
2589 {
2590 .id = 0x10153180,
2591 .mask = 0xf0ffffff,
2592 .data = &variant_stm32_sdmmc,
2593 },
2594 {
2595 .id = 0x00253180,
2596 .mask = 0xf0ffffff,
2597 .data = &variant_stm32_sdmmcv2,
2598 },
2599 {
2600 .id = 0x20253180,
2601 .mask = 0xf0ffffff,
2602 .data = &variant_stm32_sdmmcv2,
2603 },
2604 {
2605 .id = 0x00353180,
2606 .mask = 0xf0ffffff,
2607 .data = &variant_stm32_sdmmcv3,
2608 },
2609 /* Qualcomm variants */
2610 {
2611 .id = 0x00051180,
2612 .mask = 0x000fffff,
2613 .data = &variant_qcom,
2614 },
2615 { 0, 0 },
2616};
2617
2618MODULE_DEVICE_TABLE(amba, mmci_ids);
2619
2620static struct amba_driver mmci_driver = {
2621 .drv = {
2622 .name = DRIVER_NAME,
2623 .pm = &mmci_dev_pm_ops,
2624 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
2625 },
2626 .probe = mmci_probe,
2627 .remove = mmci_remove,
2628 .id_table = mmci_ids,
2629};
2630
2631module_amba_driver(mmci_driver);
2632
2633module_param(fmax, uint, 0444);
2634
2635MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
2636MODULE_LICENSE("GPL");
2637

source code of linux/drivers/mmc/host/mmci.c