1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) STMicroelectronics 2018 - All Rights Reserved |
4 | * Author: Ludovic.barre@st.com for STMicroelectronics. |
5 | */ |
6 | #include <linux/bitfield.h> |
7 | #include <linux/delay.h> |
8 | #include <linux/dma-mapping.h> |
9 | #include <linux/iopoll.h> |
10 | #include <linux/mmc/host.h> |
11 | #include <linux/mmc/card.h> |
12 | #include <linux/of_address.h> |
13 | #include <linux/reset.h> |
14 | #include <linux/scatterlist.h> |
15 | #include "mmci.h" |
16 | |
17 | #define SDMMC_LLI_BUF_LEN PAGE_SIZE |
18 | |
19 | #define DLYB_CR 0x0 |
20 | #define DLYB_CR_DEN BIT(0) |
21 | #define DLYB_CR_SEN BIT(1) |
22 | |
23 | #define DLYB_CFGR 0x4 |
24 | #define DLYB_CFGR_SEL_MASK GENMASK(3, 0) |
25 | #define DLYB_CFGR_UNIT_MASK GENMASK(14, 8) |
26 | #define DLYB_CFGR_LNG_MASK GENMASK(27, 16) |
27 | #define DLYB_CFGR_LNGF BIT(31) |
28 | |
29 | #define DLYB_NB_DELAY 11 |
30 | #define DLYB_CFGR_SEL_MAX (DLYB_NB_DELAY + 1) |
31 | #define DLYB_CFGR_UNIT_MAX 127 |
32 | |
33 | #define DLYB_LNG_TIMEOUT_US 1000 |
34 | #define SDMMC_VSWEND_TIMEOUT_US 10000 |
35 | |
36 | #define SYSCFG_DLYBSD_CR 0x0 |
37 | #define DLYBSD_CR_EN BIT(0) |
38 | #define DLYBSD_CR_RXTAPSEL_MASK GENMASK(6, 1) |
39 | #define DLYBSD_TAPSEL_NB 32 |
40 | #define DLYBSD_BYP_EN BIT(16) |
41 | #define DLYBSD_BYP_CMD GENMASK(21, 17) |
42 | #define DLYBSD_ANTIGLITCH_EN BIT(22) |
43 | |
44 | #define SYSCFG_DLYBSD_SR 0x4 |
45 | #define DLYBSD_SR_LOCK BIT(0) |
46 | #define DLYBSD_SR_RXTAPSEL_ACK BIT(1) |
47 | |
48 | #define DLYBSD_TIMEOUT_1S_IN_US 1000000 |
49 | |
50 | struct sdmmc_lli_desc { |
51 | u32 idmalar; |
52 | u32 idmabase; |
53 | u32 idmasize; |
54 | }; |
55 | |
56 | struct sdmmc_idma { |
57 | dma_addr_t sg_dma; |
58 | void *sg_cpu; |
59 | dma_addr_t bounce_dma_addr; |
60 | void *bounce_buf; |
61 | bool use_bounce_buffer; |
62 | }; |
63 | |
64 | struct sdmmc_dlyb; |
65 | |
66 | struct sdmmc_tuning_ops { |
67 | int (*dlyb_enable)(struct sdmmc_dlyb *dlyb); |
68 | void (*set_input_ck)(struct sdmmc_dlyb *dlyb); |
69 | int (*tuning_prepare)(struct mmci_host *host); |
70 | int (*set_cfg)(struct sdmmc_dlyb *dlyb, int unit __maybe_unused, |
71 | int phase, bool sampler __maybe_unused); |
72 | }; |
73 | |
74 | struct sdmmc_dlyb { |
75 | void __iomem *base; |
76 | u32 unit; |
77 | u32 max; |
78 | struct sdmmc_tuning_ops *ops; |
79 | }; |
80 | |
81 | static int sdmmc_idma_validate_data(struct mmci_host *host, |
82 | struct mmc_data *data) |
83 | { |
84 | struct sdmmc_idma *idma = host->dma_priv; |
85 | struct device *dev = mmc_dev(host->mmc); |
86 | struct scatterlist *sg; |
87 | int i; |
88 | |
89 | /* |
90 | * idma has constraints on idmabase & idmasize for each element |
91 | * excepted the last element which has no constraint on idmasize |
92 | */ |
93 | idma->use_bounce_buffer = false; |
94 | for_each_sg(data->sg, sg, data->sg_len - 1, i) { |
95 | if (!IS_ALIGNED(sg->offset, sizeof(u32)) || |
96 | !IS_ALIGNED(sg->length, |
97 | host->variant->stm32_idmabsize_align)) { |
98 | dev_dbg(mmc_dev(host->mmc), |
99 | "unaligned scatterlist: ofst:%x length:%d\n" , |
100 | data->sg->offset, data->sg->length); |
101 | goto use_bounce_buffer; |
102 | } |
103 | } |
104 | |
105 | if (!IS_ALIGNED(sg->offset, sizeof(u32))) { |
106 | dev_dbg(mmc_dev(host->mmc), |
107 | "unaligned last scatterlist: ofst:%x length:%d\n" , |
108 | data->sg->offset, data->sg->length); |
109 | goto use_bounce_buffer; |
110 | } |
111 | |
112 | return 0; |
113 | |
114 | use_bounce_buffer: |
115 | if (!idma->bounce_buf) { |
116 | idma->bounce_buf = dmam_alloc_coherent(dev, |
117 | size: host->mmc->max_req_size, |
118 | dma_handle: &idma->bounce_dma_addr, |
119 | GFP_KERNEL); |
120 | if (!idma->bounce_buf) { |
121 | dev_err(dev, "Unable to map allocate DMA bounce buffer.\n" ); |
122 | return -ENOMEM; |
123 | } |
124 | } |
125 | |
126 | idma->use_bounce_buffer = true; |
127 | |
128 | return 0; |
129 | } |
130 | |
131 | static int _sdmmc_idma_prep_data(struct mmci_host *host, |
132 | struct mmc_data *data) |
133 | { |
134 | struct sdmmc_idma *idma = host->dma_priv; |
135 | |
136 | if (idma->use_bounce_buffer) { |
137 | if (data->flags & MMC_DATA_WRITE) { |
138 | unsigned int xfer_bytes = data->blksz * data->blocks; |
139 | |
140 | sg_copy_to_buffer(sgl: data->sg, nents: data->sg_len, |
141 | buf: idma->bounce_buf, buflen: xfer_bytes); |
142 | dma_wmb(); |
143 | } |
144 | } else { |
145 | int n_elem; |
146 | |
147 | n_elem = dma_map_sg(mmc_dev(host->mmc), |
148 | data->sg, |
149 | data->sg_len, |
150 | mmc_get_dma_dir(data)); |
151 | |
152 | if (!n_elem) { |
153 | dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n" ); |
154 | return -EINVAL; |
155 | } |
156 | } |
157 | return 0; |
158 | } |
159 | |
160 | static int sdmmc_idma_prep_data(struct mmci_host *host, |
161 | struct mmc_data *data, bool next) |
162 | { |
163 | /* Check if job is already prepared. */ |
164 | if (!next && data->host_cookie == host->next_cookie) |
165 | return 0; |
166 | |
167 | return _sdmmc_idma_prep_data(host, data); |
168 | } |
169 | |
170 | static void sdmmc_idma_unprep_data(struct mmci_host *host, |
171 | struct mmc_data *data, int err) |
172 | { |
173 | struct sdmmc_idma *idma = host->dma_priv; |
174 | |
175 | if (idma->use_bounce_buffer) { |
176 | if (data->flags & MMC_DATA_READ) { |
177 | unsigned int xfer_bytes = data->blksz * data->blocks; |
178 | |
179 | sg_copy_from_buffer(sgl: data->sg, nents: data->sg_len, |
180 | buf: idma->bounce_buf, buflen: xfer_bytes); |
181 | } |
182 | } else { |
183 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, |
184 | mmc_get_dma_dir(data)); |
185 | } |
186 | } |
187 | |
188 | static int sdmmc_idma_setup(struct mmci_host *host) |
189 | { |
190 | struct sdmmc_idma *idma; |
191 | struct device *dev = mmc_dev(host->mmc); |
192 | |
193 | idma = devm_kzalloc(dev, size: sizeof(*idma), GFP_KERNEL); |
194 | if (!idma) |
195 | return -ENOMEM; |
196 | |
197 | host->dma_priv = idma; |
198 | |
199 | if (host->variant->dma_lli) { |
200 | idma->sg_cpu = dmam_alloc_coherent(dev, SDMMC_LLI_BUF_LEN, |
201 | dma_handle: &idma->sg_dma, GFP_KERNEL); |
202 | if (!idma->sg_cpu) { |
203 | dev_err(dev, "Failed to alloc IDMA descriptor\n" ); |
204 | return -ENOMEM; |
205 | } |
206 | host->mmc->max_segs = SDMMC_LLI_BUF_LEN / |
207 | sizeof(struct sdmmc_lli_desc); |
208 | host->mmc->max_seg_size = host->variant->stm32_idmabsize_mask; |
209 | |
210 | host->mmc->max_req_size = SZ_1M; |
211 | } else { |
212 | host->mmc->max_segs = 1; |
213 | host->mmc->max_seg_size = host->mmc->max_req_size; |
214 | } |
215 | |
216 | return dma_set_max_seg_size(dev, size: host->mmc->max_seg_size); |
217 | } |
218 | |
219 | static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl) |
220 | |
221 | { |
222 | struct sdmmc_idma *idma = host->dma_priv; |
223 | struct sdmmc_lli_desc *desc = (struct sdmmc_lli_desc *)idma->sg_cpu; |
224 | struct mmc_data *data = host->data; |
225 | struct scatterlist *sg; |
226 | int i; |
227 | |
228 | if (!host->variant->dma_lli || data->sg_len == 1 || |
229 | idma->use_bounce_buffer) { |
230 | u32 dma_addr; |
231 | |
232 | if (idma->use_bounce_buffer) |
233 | dma_addr = idma->bounce_dma_addr; |
234 | else |
235 | dma_addr = sg_dma_address(data->sg); |
236 | |
237 | writel_relaxed(dma_addr, |
238 | host->base + MMCI_STM32_IDMABASE0R); |
239 | writel_relaxed(MMCI_STM32_IDMAEN, |
240 | host->base + MMCI_STM32_IDMACTRLR); |
241 | return 0; |
242 | } |
243 | |
244 | for_each_sg(data->sg, sg, data->sg_len, i) { |
245 | desc[i].idmalar = (i + 1) * sizeof(struct sdmmc_lli_desc); |
246 | desc[i].idmalar |= MMCI_STM32_ULA | MMCI_STM32_ULS |
247 | | MMCI_STM32_ABR; |
248 | desc[i].idmabase = sg_dma_address(sg); |
249 | desc[i].idmasize = sg_dma_len(sg); |
250 | } |
251 | |
252 | /* notice the end of link list */ |
253 | desc[data->sg_len - 1].idmalar &= ~MMCI_STM32_ULA; |
254 | |
255 | dma_wmb(); |
256 | writel_relaxed(idma->sg_dma, host->base + MMCI_STM32_IDMABAR); |
257 | writel_relaxed(desc[0].idmalar, host->base + MMCI_STM32_IDMALAR); |
258 | writel_relaxed(desc[0].idmabase, host->base + MMCI_STM32_IDMABASE0R); |
259 | writel_relaxed(desc[0].idmasize, host->base + MMCI_STM32_IDMABSIZER); |
260 | writel_relaxed(MMCI_STM32_IDMAEN | MMCI_STM32_IDMALLIEN, |
261 | host->base + MMCI_STM32_IDMACTRLR); |
262 | |
263 | return 0; |
264 | } |
265 | |
266 | static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data) |
267 | { |
268 | writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR); |
269 | |
270 | if (!data->host_cookie) |
271 | sdmmc_idma_unprep_data(host, data, err: 0); |
272 | } |
273 | |
274 | static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired) |
275 | { |
276 | unsigned int clk = 0, ddr = 0; |
277 | |
278 | if (host->mmc->ios.timing == MMC_TIMING_MMC_DDR52 || |
279 | host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) |
280 | ddr = MCI_STM32_CLK_DDR; |
281 | |
282 | /* |
283 | * cclk = mclk / (2 * clkdiv) |
284 | * clkdiv 0 => bypass |
285 | * in ddr mode bypass is not possible |
286 | */ |
287 | if (desired) { |
288 | if (desired >= host->mclk && !ddr) { |
289 | host->cclk = host->mclk; |
290 | } else { |
291 | clk = DIV_ROUND_UP(host->mclk, 2 * desired); |
292 | if (clk > MCI_STM32_CLK_CLKDIV_MSK) |
293 | clk = MCI_STM32_CLK_CLKDIV_MSK; |
294 | host->cclk = host->mclk / (2 * clk); |
295 | } |
296 | } else { |
297 | /* |
298 | * while power-on phase the clock can't be define to 0, |
299 | * Only power-off and power-cyc deactivate the clock. |
300 | * if desired clock is 0, set max divider |
301 | */ |
302 | clk = MCI_STM32_CLK_CLKDIV_MSK; |
303 | host->cclk = host->mclk / (2 * clk); |
304 | } |
305 | |
306 | /* Set actual clock for debug */ |
307 | if (host->mmc->ios.power_mode == MMC_POWER_ON) |
308 | host->mmc->actual_clock = host->cclk; |
309 | else |
310 | host->mmc->actual_clock = 0; |
311 | |
312 | if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) |
313 | clk |= MCI_STM32_CLK_WIDEBUS_4; |
314 | if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) |
315 | clk |= MCI_STM32_CLK_WIDEBUS_8; |
316 | |
317 | clk |= MCI_STM32_CLK_HWFCEN; |
318 | clk |= host->clk_reg_add; |
319 | clk |= ddr; |
320 | |
321 | if (host->mmc->ios.timing >= MMC_TIMING_UHS_SDR50) |
322 | clk |= MCI_STM32_CLK_BUSSPEED; |
323 | |
324 | mmci_write_clkreg(host, clk); |
325 | } |
326 | |
327 | static void sdmmc_dlyb_mp15_input_ck(struct sdmmc_dlyb *dlyb) |
328 | { |
329 | if (!dlyb || !dlyb->base) |
330 | return; |
331 | |
332 | /* Output clock = Input clock */ |
333 | writel_relaxed(0, dlyb->base + DLYB_CR); |
334 | } |
335 | |
336 | static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr) |
337 | { |
338 | struct mmc_ios ios = host->mmc->ios; |
339 | struct sdmmc_dlyb *dlyb = host->variant_priv; |
340 | |
341 | /* adds OF options */ |
342 | pwr = host->pwr_reg_add; |
343 | |
344 | if (dlyb && dlyb->ops->set_input_ck) |
345 | dlyb->ops->set_input_ck(dlyb); |
346 | |
347 | if (ios.power_mode == MMC_POWER_OFF) { |
348 | /* Only a reset could power-off sdmmc */ |
349 | reset_control_assert(rstc: host->rst); |
350 | udelay(2); |
351 | reset_control_deassert(rstc: host->rst); |
352 | |
353 | /* |
354 | * Set the SDMMC in Power-cycle state. |
355 | * This will make that the SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK |
356 | * are driven low, to prevent the Card from being supplied |
357 | * through the signal lines. |
358 | */ |
359 | mmci_write_pwrreg(host, MCI_STM32_PWR_CYC | pwr); |
360 | } else if (ios.power_mode == MMC_POWER_ON) { |
361 | /* |
362 | * After power-off (reset): the irq mask defined in probe |
363 | * functionis lost |
364 | * ault irq mask (probe) must be activated |
365 | */ |
366 | writel(MCI_IRQENABLE | host->variant->start_err, |
367 | addr: host->base + MMCIMASK0); |
368 | |
369 | /* preserves voltage switch bits */ |
370 | pwr |= host->pwr_reg & (MCI_STM32_VSWITCHEN | |
371 | MCI_STM32_VSWITCH); |
372 | |
373 | /* |
374 | * After a power-cycle state, we must set the SDMMC in |
375 | * Power-off. The SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK are |
376 | * driven high. Then we can set the SDMMC to Power-on state |
377 | */ |
378 | mmci_write_pwrreg(host, MCI_PWR_OFF | pwr); |
379 | mdelay(1); |
380 | mmci_write_pwrreg(host, MCI_PWR_ON | pwr); |
381 | } |
382 | } |
383 | |
384 | static u32 sdmmc_get_dctrl_cfg(struct mmci_host *host) |
385 | { |
386 | u32 datactrl; |
387 | |
388 | datactrl = mmci_dctrl_blksz(host); |
389 | |
390 | if (host->hw_revision >= 3) { |
391 | u32 thr = 0; |
392 | |
393 | if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104 || |
394 | host->mmc->ios.timing == MMC_TIMING_MMC_HS200) { |
395 | thr = ffs(min_t(unsigned int, host->data->blksz, |
396 | host->variant->fifosize)); |
397 | thr = min_t(u32, thr, MMCI_STM32_THR_MASK); |
398 | } |
399 | |
400 | writel_relaxed(thr, host->base + MMCI_STM32_FIFOTHRR); |
401 | } |
402 | |
403 | if (host->mmc->card && mmc_card_sdio(host->mmc->card) && |
404 | host->data->blocks == 1) |
405 | datactrl |= MCI_DPSM_STM32_MODE_SDIO; |
406 | else if (host->data->stop && !host->mrq->sbc) |
407 | datactrl |= MCI_DPSM_STM32_MODE_BLOCK_STOP; |
408 | else |
409 | datactrl |= MCI_DPSM_STM32_MODE_BLOCK; |
410 | |
411 | return datactrl; |
412 | } |
413 | |
414 | static bool sdmmc_busy_complete(struct mmci_host *host, struct mmc_command *cmd, |
415 | u32 status, u32 err_msk) |
416 | { |
417 | void __iomem *base = host->base; |
418 | u32 busy_d0, busy_d0end, mask, sdmmc_status; |
419 | |
420 | mask = readl_relaxed(base + MMCIMASK0); |
421 | sdmmc_status = readl_relaxed(base + MMCISTATUS); |
422 | busy_d0end = sdmmc_status & MCI_STM32_BUSYD0END; |
423 | busy_d0 = sdmmc_status & MCI_STM32_BUSYD0; |
424 | |
425 | /* complete if there is an error or busy_d0end */ |
426 | if ((status & err_msk) || busy_d0end) |
427 | goto complete; |
428 | |
429 | /* |
430 | * On response the busy signaling is reflected in the BUSYD0 flag. |
431 | * if busy_d0 is in-progress we must activate busyd0end interrupt |
432 | * to wait this completion. Else this request has no busy step. |
433 | */ |
434 | if (busy_d0) { |
435 | if (!host->busy_status) { |
436 | writel_relaxed(mask | host->variant->busy_detect_mask, |
437 | base + MMCIMASK0); |
438 | host->busy_status = status & |
439 | (MCI_CMDSENT | MCI_CMDRESPEND); |
440 | } |
441 | return false; |
442 | } |
443 | |
444 | complete: |
445 | if (host->busy_status) { |
446 | writel_relaxed(mask & ~host->variant->busy_detect_mask, |
447 | base + MMCIMASK0); |
448 | host->busy_status = 0; |
449 | } |
450 | |
451 | writel_relaxed(host->variant->busy_detect_mask, base + MMCICLEAR); |
452 | |
453 | return true; |
454 | } |
455 | |
456 | static int sdmmc_dlyb_mp15_enable(struct sdmmc_dlyb *dlyb) |
457 | { |
458 | writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR); |
459 | |
460 | return 0; |
461 | } |
462 | |
463 | static int sdmmc_dlyb_mp15_set_cfg(struct sdmmc_dlyb *dlyb, |
464 | int unit, int phase, bool sampler) |
465 | { |
466 | u32 cfgr; |
467 | |
468 | writel_relaxed(DLYB_CR_SEN | DLYB_CR_DEN, dlyb->base + DLYB_CR); |
469 | |
470 | cfgr = FIELD_PREP(DLYB_CFGR_UNIT_MASK, unit) | |
471 | FIELD_PREP(DLYB_CFGR_SEL_MASK, phase); |
472 | writel_relaxed(cfgr, dlyb->base + DLYB_CFGR); |
473 | |
474 | if (!sampler) |
475 | writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR); |
476 | |
477 | return 0; |
478 | } |
479 | |
480 | static int sdmmc_dlyb_mp15_prepare(struct mmci_host *host) |
481 | { |
482 | struct sdmmc_dlyb *dlyb = host->variant_priv; |
483 | u32 cfgr; |
484 | int i, lng, ret; |
485 | |
486 | for (i = 0; i <= DLYB_CFGR_UNIT_MAX; i++) { |
487 | dlyb->ops->set_cfg(dlyb, i, DLYB_CFGR_SEL_MAX, true); |
488 | |
489 | ret = readl_relaxed_poll_timeout(dlyb->base + DLYB_CFGR, cfgr, |
490 | (cfgr & DLYB_CFGR_LNGF), |
491 | 1, DLYB_LNG_TIMEOUT_US); |
492 | if (ret) { |
493 | dev_warn(mmc_dev(host->mmc), |
494 | "delay line cfg timeout unit:%d cfgr:%d\n" , |
495 | i, cfgr); |
496 | continue; |
497 | } |
498 | |
499 | lng = FIELD_GET(DLYB_CFGR_LNG_MASK, cfgr); |
500 | if (lng < BIT(DLYB_NB_DELAY) && lng > 0) |
501 | break; |
502 | } |
503 | |
504 | if (i > DLYB_CFGR_UNIT_MAX) |
505 | return -EINVAL; |
506 | |
507 | dlyb->unit = i; |
508 | dlyb->max = __fls(word: lng); |
509 | |
510 | return 0; |
511 | } |
512 | |
513 | static int sdmmc_dlyb_mp25_enable(struct sdmmc_dlyb *dlyb) |
514 | { |
515 | u32 cr, sr; |
516 | |
517 | cr = readl_relaxed(dlyb->base + SYSCFG_DLYBSD_CR); |
518 | cr |= DLYBSD_CR_EN; |
519 | |
520 | writel_relaxed(cr, dlyb->base + SYSCFG_DLYBSD_CR); |
521 | |
522 | return readl_relaxed_poll_timeout(dlyb->base + SYSCFG_DLYBSD_SR, |
523 | sr, sr & DLYBSD_SR_LOCK, 1, |
524 | DLYBSD_TIMEOUT_1S_IN_US); |
525 | } |
526 | |
527 | static int sdmmc_dlyb_mp25_set_cfg(struct sdmmc_dlyb *dlyb, |
528 | int unit __maybe_unused, int phase, |
529 | bool sampler __maybe_unused) |
530 | { |
531 | u32 cr, sr; |
532 | |
533 | cr = readl_relaxed(dlyb->base + SYSCFG_DLYBSD_CR); |
534 | cr &= ~DLYBSD_CR_RXTAPSEL_MASK; |
535 | cr |= FIELD_PREP(DLYBSD_CR_RXTAPSEL_MASK, phase); |
536 | |
537 | writel_relaxed(cr, dlyb->base + SYSCFG_DLYBSD_CR); |
538 | |
539 | return readl_relaxed_poll_timeout(dlyb->base + SYSCFG_DLYBSD_SR, |
540 | sr, sr & DLYBSD_SR_RXTAPSEL_ACK, 1, |
541 | DLYBSD_TIMEOUT_1S_IN_US); |
542 | } |
543 | |
544 | static int sdmmc_dlyb_mp25_prepare(struct mmci_host *host) |
545 | { |
546 | struct sdmmc_dlyb *dlyb = host->variant_priv; |
547 | |
548 | dlyb->max = DLYBSD_TAPSEL_NB; |
549 | |
550 | return 0; |
551 | } |
552 | |
553 | static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode) |
554 | { |
555 | struct sdmmc_dlyb *dlyb = host->variant_priv; |
556 | int cur_len = 0, max_len = 0, end_of_len = 0; |
557 | int phase, ret; |
558 | |
559 | for (phase = 0; phase <= dlyb->max; phase++) { |
560 | ret = dlyb->ops->set_cfg(dlyb, dlyb->unit, phase, false); |
561 | if (ret) { |
562 | dev_err(mmc_dev(host->mmc), "tuning config failed\n" ); |
563 | return ret; |
564 | } |
565 | |
566 | if (mmc_send_tuning(host: host->mmc, opcode, NULL)) { |
567 | cur_len = 0; |
568 | } else { |
569 | cur_len++; |
570 | if (cur_len > max_len) { |
571 | max_len = cur_len; |
572 | end_of_len = phase; |
573 | } |
574 | } |
575 | } |
576 | |
577 | if (!max_len) { |
578 | dev_err(mmc_dev(host->mmc), "no tuning point found\n" ); |
579 | return -EINVAL; |
580 | } |
581 | |
582 | if (dlyb->ops->set_input_ck) |
583 | dlyb->ops->set_input_ck(dlyb); |
584 | |
585 | phase = end_of_len - max_len / 2; |
586 | ret = dlyb->ops->set_cfg(dlyb, dlyb->unit, phase, false); |
587 | if (ret) { |
588 | dev_err(mmc_dev(host->mmc), "tuning reconfig failed\n" ); |
589 | return ret; |
590 | } |
591 | |
592 | dev_dbg(mmc_dev(host->mmc), "unit:%d max_dly:%d phase:%d\n" , |
593 | dlyb->unit, dlyb->max, phase); |
594 | |
595 | return 0; |
596 | } |
597 | |
598 | static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode) |
599 | { |
600 | struct mmci_host *host = mmc_priv(host: mmc); |
601 | struct sdmmc_dlyb *dlyb = host->variant_priv; |
602 | u32 clk; |
603 | int ret; |
604 | |
605 | if ((host->mmc->ios.timing != MMC_TIMING_UHS_SDR104 && |
606 | host->mmc->ios.timing != MMC_TIMING_MMC_HS200) || |
607 | host->mmc->actual_clock <= 50000000) |
608 | return 0; |
609 | |
610 | if (!dlyb || !dlyb->base) |
611 | return -EINVAL; |
612 | |
613 | ret = dlyb->ops->dlyb_enable(dlyb); |
614 | if (ret) |
615 | return ret; |
616 | |
617 | /* |
618 | * SDMMC_FBCK is selected when an external Delay Block is needed |
619 | * with SDR104 or HS200. |
620 | */ |
621 | clk = host->clk_reg; |
622 | clk &= ~MCI_STM32_CLK_SEL_MSK; |
623 | clk |= MCI_STM32_CLK_SELFBCK; |
624 | mmci_write_clkreg(host, clk); |
625 | |
626 | ret = dlyb->ops->tuning_prepare(host); |
627 | if (ret) |
628 | return ret; |
629 | |
630 | return sdmmc_dlyb_phase_tuning(host, opcode); |
631 | } |
632 | |
633 | static void sdmmc_pre_sig_volt_vswitch(struct mmci_host *host) |
634 | { |
635 | /* clear the voltage switch completion flag */ |
636 | writel_relaxed(MCI_STM32_VSWENDC, host->base + MMCICLEAR); |
637 | /* enable Voltage switch procedure */ |
638 | mmci_write_pwrreg(host, pwr: host->pwr_reg | MCI_STM32_VSWITCHEN); |
639 | } |
640 | |
641 | static int sdmmc_post_sig_volt_switch(struct mmci_host *host, |
642 | struct mmc_ios *ios) |
643 | { |
644 | unsigned long flags; |
645 | u32 status; |
646 | int ret = 0; |
647 | |
648 | spin_lock_irqsave(&host->lock, flags); |
649 | if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180 && |
650 | host->pwr_reg & MCI_STM32_VSWITCHEN) { |
651 | mmci_write_pwrreg(host, pwr: host->pwr_reg | MCI_STM32_VSWITCH); |
652 | spin_unlock_irqrestore(lock: &host->lock, flags); |
653 | |
654 | /* wait voltage switch completion while 10ms */ |
655 | ret = readl_relaxed_poll_timeout(host->base + MMCISTATUS, |
656 | status, |
657 | (status & MCI_STM32_VSWEND), |
658 | 10, SDMMC_VSWEND_TIMEOUT_US); |
659 | |
660 | writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC, |
661 | host->base + MMCICLEAR); |
662 | spin_lock_irqsave(&host->lock, flags); |
663 | mmci_write_pwrreg(host, pwr: host->pwr_reg & |
664 | ~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH)); |
665 | } |
666 | spin_unlock_irqrestore(lock: &host->lock, flags); |
667 | |
668 | return ret; |
669 | } |
670 | |
671 | static struct mmci_host_ops sdmmc_variant_ops = { |
672 | .validate_data = sdmmc_idma_validate_data, |
673 | .prep_data = sdmmc_idma_prep_data, |
674 | .unprep_data = sdmmc_idma_unprep_data, |
675 | .get_datactrl_cfg = sdmmc_get_dctrl_cfg, |
676 | .dma_setup = sdmmc_idma_setup, |
677 | .dma_start = sdmmc_idma_start, |
678 | .dma_finalize = sdmmc_idma_finalize, |
679 | .set_clkreg = mmci_sdmmc_set_clkreg, |
680 | .set_pwrreg = mmci_sdmmc_set_pwrreg, |
681 | .busy_complete = sdmmc_busy_complete, |
682 | .pre_sig_volt_switch = sdmmc_pre_sig_volt_vswitch, |
683 | .post_sig_volt_switch = sdmmc_post_sig_volt_switch, |
684 | }; |
685 | |
686 | static struct sdmmc_tuning_ops dlyb_tuning_mp15_ops = { |
687 | .dlyb_enable = sdmmc_dlyb_mp15_enable, |
688 | .set_input_ck = sdmmc_dlyb_mp15_input_ck, |
689 | .tuning_prepare = sdmmc_dlyb_mp15_prepare, |
690 | .set_cfg = sdmmc_dlyb_mp15_set_cfg, |
691 | }; |
692 | |
693 | static struct sdmmc_tuning_ops dlyb_tuning_mp25_ops = { |
694 | .dlyb_enable = sdmmc_dlyb_mp25_enable, |
695 | .tuning_prepare = sdmmc_dlyb_mp25_prepare, |
696 | .set_cfg = sdmmc_dlyb_mp25_set_cfg, |
697 | }; |
698 | |
699 | void sdmmc_variant_init(struct mmci_host *host) |
700 | { |
701 | struct device_node *np = host->mmc->parent->of_node; |
702 | void __iomem *base_dlyb; |
703 | struct sdmmc_dlyb *dlyb; |
704 | |
705 | host->ops = &sdmmc_variant_ops; |
706 | host->pwr_reg = readl_relaxed(host->base + MMCIPOWER); |
707 | |
708 | base_dlyb = devm_of_iomap(mmc_dev(host->mmc), node: np, index: 1, NULL); |
709 | if (IS_ERR(ptr: base_dlyb)) |
710 | return; |
711 | |
712 | dlyb = devm_kzalloc(mmc_dev(host->mmc), size: sizeof(*dlyb), GFP_KERNEL); |
713 | if (!dlyb) |
714 | return; |
715 | |
716 | dlyb->base = base_dlyb; |
717 | if (of_device_is_compatible(device: np, "st,stm32mp25-sdmmc2" )) |
718 | dlyb->ops = &dlyb_tuning_mp25_ops; |
719 | else |
720 | dlyb->ops = &dlyb_tuning_mp15_ops; |
721 | |
722 | host->variant_priv = dlyb; |
723 | host->mmc_ops->execute_tuning = sdmmc_execute_tuning; |
724 | } |
725 | |