blob: a4bd85b200a3edf710da0138dd145d4ae5da5223 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Freescale eSDHC i.MX controller driver for the platform bus.
4 *
5 * derived from the OF-version.
6 *
7 * Copyright (c) 2010 Pengutronix e.K.
8 * Author: Wolfram Sang <kernel@pengutronix.de>
9 */
10
Olivier Deprez157378f2022-04-04 15:47:50 +020011#include <linux/bitfield.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012#include <linux/io.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020013#include <linux/iopoll.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014#include <linux/delay.h>
15#include <linux/err.h>
16#include <linux/clk.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017#include <linux/module.h>
18#include <linux/slab.h>
David Brazdil0f672f62019-12-10 10:32:29 +000019#include <linux/pm_qos.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000020#include <linux/mmc/host.h>
21#include <linux/mmc/mmc.h>
22#include <linux/mmc/sdio.h>
23#include <linux/mmc/slot-gpio.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000026#include <linux/pinctrl/consumer.h>
27#include <linux/platform_data/mmc-esdhc-imx.h>
28#include <linux/pm_runtime.h>
29#include "sdhci-pltfm.h"
30#include "sdhci-esdhc.h"
David Brazdil0f672f62019-12-10 10:32:29 +000031#include "cqhci.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000032
33#define ESDHC_SYS_CTRL_DTOCV_MASK 0x0f
34#define ESDHC_CTRL_D3CD 0x08
35#define ESDHC_BURST_LEN_EN_INCR (1 << 27)
36/* VENDOR SPEC register */
37#define ESDHC_VENDOR_SPEC 0xc0
38#define ESDHC_VENDOR_SPEC_SDIO_QUIRK (1 << 1)
39#define ESDHC_VENDOR_SPEC_VSELECT (1 << 1)
40#define ESDHC_VENDOR_SPEC_FRC_SDCLK_ON (1 << 8)
Olivier Deprez157378f2022-04-04 15:47:50 +020041#define ESDHC_DEBUG_SEL_AND_STATUS_REG 0xc2
42#define ESDHC_DEBUG_SEL_REG 0xc3
43#define ESDHC_DEBUG_SEL_MASK 0xf
44#define ESDHC_DEBUG_SEL_CMD_STATE 1
45#define ESDHC_DEBUG_SEL_DATA_STATE 2
46#define ESDHC_DEBUG_SEL_TRANS_STATE 3
47#define ESDHC_DEBUG_SEL_DMA_STATE 4
48#define ESDHC_DEBUG_SEL_ADMA_STATE 5
49#define ESDHC_DEBUG_SEL_FIFO_STATE 6
50#define ESDHC_DEBUG_SEL_ASYNC_FIFO_STATE 7
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051#define ESDHC_WTMK_LVL 0x44
52#define ESDHC_WTMK_DEFAULT_VAL 0x10401040
53#define ESDHC_WTMK_LVL_RD_WML_MASK 0x000000FF
54#define ESDHC_WTMK_LVL_RD_WML_SHIFT 0
55#define ESDHC_WTMK_LVL_WR_WML_MASK 0x00FF0000
56#define ESDHC_WTMK_LVL_WR_WML_SHIFT 16
57#define ESDHC_WTMK_LVL_WML_VAL_DEF 64
58#define ESDHC_WTMK_LVL_WML_VAL_MAX 128
59#define ESDHC_MIX_CTRL 0x48
60#define ESDHC_MIX_CTRL_DDREN (1 << 3)
61#define ESDHC_MIX_CTRL_AC23EN (1 << 7)
62#define ESDHC_MIX_CTRL_EXE_TUNE (1 << 22)
63#define ESDHC_MIX_CTRL_SMPCLK_SEL (1 << 23)
64#define ESDHC_MIX_CTRL_AUTO_TUNE_EN (1 << 24)
65#define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25)
66#define ESDHC_MIX_CTRL_HS400_EN (1 << 26)
David Brazdil0f672f62019-12-10 10:32:29 +000067#define ESDHC_MIX_CTRL_HS400_ES_EN (1 << 27)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068/* Bits 3 and 6 are not SDHCI standard definitions */
69#define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7
70/* Tuning bits */
71#define ESDHC_MIX_CTRL_TUNING_MASK 0x03c00000
72
73/* dll control register */
74#define ESDHC_DLL_CTRL 0x60
75#define ESDHC_DLL_OVERRIDE_VAL_SHIFT 9
76#define ESDHC_DLL_OVERRIDE_EN_SHIFT 8
77
78/* tune control register */
79#define ESDHC_TUNE_CTRL_STATUS 0x68
80#define ESDHC_TUNE_CTRL_STEP 1
81#define ESDHC_TUNE_CTRL_MIN 0
82#define ESDHC_TUNE_CTRL_MAX ((1 << 7) - 1)
83
84/* strobe dll register */
85#define ESDHC_STROBE_DLL_CTRL 0x70
86#define ESDHC_STROBE_DLL_CTRL_ENABLE (1 << 0)
87#define ESDHC_STROBE_DLL_CTRL_RESET (1 << 1)
Olivier Deprez157378f2022-04-04 15:47:50 +020088#define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_DEFAULT 0x7
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000089#define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT 3
David Brazdil0f672f62019-12-10 10:32:29 +000090#define ESDHC_STROBE_DLL_CTRL_SLV_UPDATE_INT_DEFAULT (4 << 20)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000091
92#define ESDHC_STROBE_DLL_STATUS 0x74
93#define ESDHC_STROBE_DLL_STS_REF_LOCK (1 << 1)
94#define ESDHC_STROBE_DLL_STS_SLV_LOCK 0x1
95
David Brazdil0f672f62019-12-10 10:32:29 +000096#define ESDHC_VEND_SPEC2 0xc8
97#define ESDHC_VEND_SPEC2_EN_BUSY_IRQ (1 << 8)
98
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000099#define ESDHC_TUNING_CTRL 0xcc
100#define ESDHC_STD_TUNING_EN (1 << 24)
101/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
102#define ESDHC_TUNING_START_TAP_DEFAULT 0x1
Olivier Deprez0e641232021-09-23 10:07:05 +0200103#define ESDHC_TUNING_START_TAP_MASK 0x7f
Olivier Deprez157378f2022-04-04 15:47:50 +0200104#define ESDHC_TUNING_CMD_CRC_CHECK_DISABLE (1 << 7)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000105#define ESDHC_TUNING_STEP_MASK 0x00070000
106#define ESDHC_TUNING_STEP_SHIFT 16
107
108/* pinctrl state */
109#define ESDHC_PINCTRL_STATE_100MHZ "state_100mhz"
110#define ESDHC_PINCTRL_STATE_200MHZ "state_200mhz"
111
112/*
113 * Our interpretation of the SDHCI_HOST_CONTROL register
114 */
115#define ESDHC_CTRL_4BITBUS (0x1 << 1)
116#define ESDHC_CTRL_8BITBUS (0x2 << 1)
117#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1)
118
119/*
120 * There is an INT DMA ERR mismatch between eSDHC and STD SDHC SPEC:
121 * Bit25 is used in STD SPEC, and is reserved in fsl eSDHC design,
122 * but bit28 is used as the INT DMA ERR in fsl eSDHC design.
123 * Define this macro DMA error INT for fsl eSDHC
124 */
125#define ESDHC_INT_VENDOR_SPEC_DMA_ERR (1 << 28)
126
David Brazdil0f672f62019-12-10 10:32:29 +0000127/* the address offset of CQHCI */
128#define ESDHC_CQHCI_ADDR_OFFSET 0x100
129
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130/*
131 * The CMDTYPE of the CMD register (offset 0xE) should be set to
132 * "11" when the STOP CMD12 is issued on imx53 to abort one
133 * open ended multi-blk IO. Otherwise the TC INT wouldn't
134 * be generated.
135 * In exact block transfer, the controller doesn't complete the
136 * operations automatically as required at the end of the
137 * transfer and remains on hold if the abort command is not sent.
138 * As a result, the TC flag is not asserted and SW received timeout
139 * exception. Bit1 of Vendor Spec register is used to fix it.
140 */
141#define ESDHC_FLAG_MULTIBLK_NO_INT BIT(1)
142/*
143 * The flag tells that the ESDHC controller is an USDHC block that is
144 * integrated on the i.MX6 series.
145 */
146#define ESDHC_FLAG_USDHC BIT(3)
147/* The IP supports manual tuning process */
148#define ESDHC_FLAG_MAN_TUNING BIT(4)
149/* The IP supports standard tuning process */
150#define ESDHC_FLAG_STD_TUNING BIT(5)
151/* The IP has SDHCI_CAPABILITIES_1 register */
152#define ESDHC_FLAG_HAVE_CAP1 BIT(6)
153/*
154 * The IP has erratum ERR004536
155 * uSDHC: ADMA Length Mismatch Error occurs if the AHB read access is slow,
156 * when reading data from the card
157 * This flag is also set for i.MX25 and i.MX35 in order to get
158 * SDHCI_QUIRK_BROKEN_ADMA, but for different reasons (ADMA capability bits).
159 */
160#define ESDHC_FLAG_ERR004536 BIT(7)
161/* The IP supports HS200 mode */
162#define ESDHC_FLAG_HS200 BIT(8)
163/* The IP supports HS400 mode */
164#define ESDHC_FLAG_HS400 BIT(9)
David Brazdil0f672f62019-12-10 10:32:29 +0000165/*
166 * The IP has errata ERR010450
167 * uSDHC: Due to the I/O timing limit, for SDR mode, SD card clock can't
168 * exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz.
169 */
170#define ESDHC_FLAG_ERR010450 BIT(10)
171/* The IP supports HS400ES mode */
172#define ESDHC_FLAG_HS400_ES BIT(11)
173/* The IP has Host Controller Interface for Command Queuing */
174#define ESDHC_FLAG_CQHCI BIT(12)
175/* need request pmqos during low power */
176#define ESDHC_FLAG_PMQOS BIT(13)
Olivier Deprez157378f2022-04-04 15:47:50 +0200177/* The IP state got lost in low power mode */
178#define ESDHC_FLAG_STATE_LOST_IN_LPMODE BIT(14)
179/* The IP lost clock rate in PM_RUNTIME */
180#define ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME BIT(15)
181/*
182 * The IP do not support the ACMD23 feature completely when use ADMA mode.
183 * In ADMA mode, it only use the 16 bit block count of the register 0x4
184 * (BLOCK_ATT) as the CMD23's argument for ACMD23 mode, which means it will
185 * ignore the upper 16 bit of the CMD23's argument. This will block the reliable
186 * write operation in RPMB, because RPMB reliable write need to set the bit31
187 * of the CMD23's argument.
188 * imx6qpdl/imx6sx/imx6sl/imx7d has this limitation only for ADMA mode, SDMA
189 * do not has this limitation. so when these SoC use ADMA mode, it need to
190 * disable the ACMD23 feature.
191 */
192#define ESDHC_FLAG_BROKEN_AUTO_CMD23 BIT(16)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000193
194struct esdhc_soc_data {
195 u32 flags;
196};
197
David Brazdil0f672f62019-12-10 10:32:29 +0000198static const struct esdhc_soc_data esdhc_imx25_data = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000199 .flags = ESDHC_FLAG_ERR004536,
200};
201
David Brazdil0f672f62019-12-10 10:32:29 +0000202static const struct esdhc_soc_data esdhc_imx35_data = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000203 .flags = ESDHC_FLAG_ERR004536,
204};
205
David Brazdil0f672f62019-12-10 10:32:29 +0000206static const struct esdhc_soc_data esdhc_imx51_data = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000207 .flags = 0,
208};
209
David Brazdil0f672f62019-12-10 10:32:29 +0000210static const struct esdhc_soc_data esdhc_imx53_data = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000211 .flags = ESDHC_FLAG_MULTIBLK_NO_INT,
212};
213
David Brazdil0f672f62019-12-10 10:32:29 +0000214static const struct esdhc_soc_data usdhc_imx6q_data = {
Olivier Deprez157378f2022-04-04 15:47:50 +0200215 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
216 | ESDHC_FLAG_BROKEN_AUTO_CMD23,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000217};
218
David Brazdil0f672f62019-12-10 10:32:29 +0000219static const struct esdhc_soc_data usdhc_imx6sl_data = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000220 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
221 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_ERR004536
Olivier Deprez157378f2022-04-04 15:47:50 +0200222 | ESDHC_FLAG_HS200
223 | ESDHC_FLAG_BROKEN_AUTO_CMD23,
224};
225
226static const struct esdhc_soc_data usdhc_imx6sll_data = {
227 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
228 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
229 | ESDHC_FLAG_HS400
230 | ESDHC_FLAG_STATE_LOST_IN_LPMODE,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000231};
232
David Brazdil0f672f62019-12-10 10:32:29 +0000233static const struct esdhc_soc_data usdhc_imx6sx_data = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000234 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
Olivier Deprez157378f2022-04-04 15:47:50 +0200235 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
236 | ESDHC_FLAG_STATE_LOST_IN_LPMODE
237 | ESDHC_FLAG_BROKEN_AUTO_CMD23,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000238};
239
David Brazdil0f672f62019-12-10 10:32:29 +0000240static const struct esdhc_soc_data usdhc_imx6ull_data = {
241 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
242 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
Olivier Deprez157378f2022-04-04 15:47:50 +0200243 | ESDHC_FLAG_ERR010450
244 | ESDHC_FLAG_STATE_LOST_IN_LPMODE,
David Brazdil0f672f62019-12-10 10:32:29 +0000245};
246
247static const struct esdhc_soc_data usdhc_imx7d_data = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000248 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
249 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
Olivier Deprez157378f2022-04-04 15:47:50 +0200250 | ESDHC_FLAG_HS400
251 | ESDHC_FLAG_STATE_LOST_IN_LPMODE
252 | ESDHC_FLAG_BROKEN_AUTO_CMD23,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000253};
254
David Brazdil0f672f62019-12-10 10:32:29 +0000255static struct esdhc_soc_data usdhc_imx7ulp_data = {
256 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
257 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
Olivier Deprez157378f2022-04-04 15:47:50 +0200258 | ESDHC_FLAG_PMQOS | ESDHC_FLAG_HS400
259 | ESDHC_FLAG_STATE_LOST_IN_LPMODE,
David Brazdil0f672f62019-12-10 10:32:29 +0000260};
261
262static struct esdhc_soc_data usdhc_imx8qxp_data = {
263 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
264 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
265 | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
Olivier Deprez157378f2022-04-04 15:47:50 +0200266 | ESDHC_FLAG_STATE_LOST_IN_LPMODE
267 | ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME,
268};
269
270static struct esdhc_soc_data usdhc_imx8mm_data = {
271 .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
272 | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
273 | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
274 | ESDHC_FLAG_STATE_LOST_IN_LPMODE,
David Brazdil0f672f62019-12-10 10:32:29 +0000275};
276
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000277struct pltfm_imx_data {
278 u32 scratchpad;
279 struct pinctrl *pinctrl;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000280 struct pinctrl_state *pins_100mhz;
281 struct pinctrl_state *pins_200mhz;
282 const struct esdhc_soc_data *socdata;
283 struct esdhc_platform_data boarddata;
284 struct clk *clk_ipg;
285 struct clk *clk_ahb;
286 struct clk *clk_per;
287 unsigned int actual_clock;
288 enum {
289 NO_CMD_PENDING, /* no multiblock command pending */
290 MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */
291 WAIT_FOR_INT, /* sent CMD12, waiting for response INT */
292 } multiblock_status;
293 u32 is_ddr;
David Brazdil0f672f62019-12-10 10:32:29 +0000294 struct pm_qos_request pm_qos_req;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000295};
296
297static const struct platform_device_id imx_esdhc_devtype[] = {
298 {
299 .name = "sdhci-esdhc-imx25",
300 .driver_data = (kernel_ulong_t) &esdhc_imx25_data,
301 }, {
302 .name = "sdhci-esdhc-imx35",
303 .driver_data = (kernel_ulong_t) &esdhc_imx35_data,
304 }, {
305 .name = "sdhci-esdhc-imx51",
306 .driver_data = (kernel_ulong_t) &esdhc_imx51_data,
307 }, {
308 /* sentinel */
309 }
310};
311MODULE_DEVICE_TABLE(platform, imx_esdhc_devtype);
312
313static const struct of_device_id imx_esdhc_dt_ids[] = {
314 { .compatible = "fsl,imx25-esdhc", .data = &esdhc_imx25_data, },
315 { .compatible = "fsl,imx35-esdhc", .data = &esdhc_imx35_data, },
316 { .compatible = "fsl,imx51-esdhc", .data = &esdhc_imx51_data, },
317 { .compatible = "fsl,imx53-esdhc", .data = &esdhc_imx53_data, },
318 { .compatible = "fsl,imx6sx-usdhc", .data = &usdhc_imx6sx_data, },
319 { .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, },
Olivier Deprez157378f2022-04-04 15:47:50 +0200320 { .compatible = "fsl,imx6sll-usdhc", .data = &usdhc_imx6sll_data, },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000321 { .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, },
David Brazdil0f672f62019-12-10 10:32:29 +0000322 { .compatible = "fsl,imx6ull-usdhc", .data = &usdhc_imx6ull_data, },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000323 { .compatible = "fsl,imx7d-usdhc", .data = &usdhc_imx7d_data, },
David Brazdil0f672f62019-12-10 10:32:29 +0000324 { .compatible = "fsl,imx7ulp-usdhc", .data = &usdhc_imx7ulp_data, },
325 { .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, },
Olivier Deprez157378f2022-04-04 15:47:50 +0200326 { .compatible = "fsl,imx8mm-usdhc", .data = &usdhc_imx8mm_data, },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000327 { /* sentinel */ }
328};
329MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
330
331static inline int is_imx25_esdhc(struct pltfm_imx_data *data)
332{
333 return data->socdata == &esdhc_imx25_data;
334}
335
336static inline int is_imx53_esdhc(struct pltfm_imx_data *data)
337{
338 return data->socdata == &esdhc_imx53_data;
339}
340
341static inline int is_imx6q_usdhc(struct pltfm_imx_data *data)
342{
343 return data->socdata == &usdhc_imx6q_data;
344}
345
346static inline int esdhc_is_usdhc(struct pltfm_imx_data *data)
347{
348 return !!(data->socdata->flags & ESDHC_FLAG_USDHC);
349}
350
351static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg)
352{
353 void __iomem *base = host->ioaddr + (reg & ~0x3);
354 u32 shift = (reg & 0x3) * 8;
355
356 writel(((readl(base) & ~(mask << shift)) | (val << shift)), base);
357}
358
Olivier Deprez157378f2022-04-04 15:47:50 +0200359#define DRIVER_NAME "sdhci-esdhc-imx"
360#define ESDHC_IMX_DUMP(f, x...) \
361 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
362static void esdhc_dump_debug_regs(struct sdhci_host *host)
363{
364 int i;
365 char *debug_status[7] = {
366 "cmd debug status",
367 "data debug status",
368 "trans debug status",
369 "dma debug status",
370 "adma debug status",
371 "fifo debug status",
372 "async fifo debug status"
373 };
374
375 ESDHC_IMX_DUMP("========= ESDHC IMX DEBUG STATUS DUMP =========\n");
376 for (i = 0; i < 7; i++) {
377 esdhc_clrset_le(host, ESDHC_DEBUG_SEL_MASK,
378 ESDHC_DEBUG_SEL_CMD_STATE + i, ESDHC_DEBUG_SEL_REG);
379 ESDHC_IMX_DUMP("%s: 0x%04x\n", debug_status[i],
380 readw(host->ioaddr + ESDHC_DEBUG_SEL_AND_STATUS_REG));
381 }
382
383 esdhc_clrset_le(host, ESDHC_DEBUG_SEL_MASK, 0, ESDHC_DEBUG_SEL_REG);
384
385}
386
387static inline void esdhc_wait_for_card_clock_gate_off(struct sdhci_host *host)
388{
389 u32 present_state;
390 int ret;
391
392 ret = readl_poll_timeout(host->ioaddr + ESDHC_PRSSTAT, present_state,
393 (present_state & ESDHC_CLOCK_GATE_OFF), 2, 100);
394 if (ret == -ETIMEDOUT)
395 dev_warn(mmc_dev(host->mmc), "%s: card clock still not gate off in 100us!.\n", __func__);
396}
397
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000398static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
399{
400 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
401 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
402 u32 val = readl(host->ioaddr + reg);
403
404 if (unlikely(reg == SDHCI_PRESENT_STATE)) {
405 u32 fsl_prss = val;
406 /* save the least 20 bits */
407 val = fsl_prss & 0x000FFFFF;
408 /* move dat[0-3] bits */
409 val |= (fsl_prss & 0x0F000000) >> 4;
410 /* move cmd line bit */
411 val |= (fsl_prss & 0x00800000) << 1;
412 }
413
414 if (unlikely(reg == SDHCI_CAPABILITIES)) {
415 /* ignore bit[0-15] as it stores cap_1 register val for mx6sl */
416 if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1)
417 val &= 0xffff0000;
418
419 /* In FSL esdhc IC module, only bit20 is used to indicate the
420 * ADMA2 capability of esdhc, but this bit is messed up on
421 * some SOCs (e.g. on MX25, MX35 this bit is set, but they
422 * don't actually support ADMA2). So set the BROKEN_ADMA
423 * quirk on MX25/35 platforms.
424 */
425
426 if (val & SDHCI_CAN_DO_ADMA1) {
427 val &= ~SDHCI_CAN_DO_ADMA1;
428 val |= SDHCI_CAN_DO_ADMA2;
429 }
430 }
431
432 if (unlikely(reg == SDHCI_CAPABILITIES_1)) {
433 if (esdhc_is_usdhc(imx_data)) {
434 if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1)
435 val = readl(host->ioaddr + SDHCI_CAPABILITIES) & 0xFFFF;
436 else
437 /* imx6q/dl does not have cap_1 register, fake one */
438 val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104
439 | SDHCI_SUPPORT_SDR50
440 | SDHCI_USE_SDR50_TUNING
Olivier Deprez157378f2022-04-04 15:47:50 +0200441 | FIELD_PREP(SDHCI_RETUNING_MODE_MASK,
442 SDHCI_TUNING_MODE_3);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000443
444 if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
445 val |= SDHCI_SUPPORT_HS400;
446
447 /*
448 * Do not advertise faster UHS modes if there are no
449 * pinctrl states for 100MHz/200MHz.
450 */
451 if (IS_ERR_OR_NULL(imx_data->pins_100mhz) ||
452 IS_ERR_OR_NULL(imx_data->pins_200mhz))
453 val &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50
454 | SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_HS400);
455 }
456 }
457
458 if (unlikely(reg == SDHCI_MAX_CURRENT) && esdhc_is_usdhc(imx_data)) {
459 val = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +0200460 val |= FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, 0xFF);
461 val |= FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, 0xFF);
462 val |= FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, 0xFF);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000463 }
464
465 if (unlikely(reg == SDHCI_INT_STATUS)) {
466 if (val & ESDHC_INT_VENDOR_SPEC_DMA_ERR) {
467 val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR;
468 val |= SDHCI_INT_ADMA_ERROR;
469 }
470
471 /*
472 * mask off the interrupt we get in response to the manually
473 * sent CMD12
474 */
475 if ((imx_data->multiblock_status == WAIT_FOR_INT) &&
476 ((val & SDHCI_INT_RESPONSE) == SDHCI_INT_RESPONSE)) {
477 val &= ~SDHCI_INT_RESPONSE;
478 writel(SDHCI_INT_RESPONSE, host->ioaddr +
479 SDHCI_INT_STATUS);
480 imx_data->multiblock_status = NO_CMD_PENDING;
481 }
482 }
483
484 return val;
485}
486
487static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
488{
489 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
490 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
491 u32 data;
492
493 if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE ||
494 reg == SDHCI_INT_STATUS)) {
495 if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) {
496 /*
497 * Clear and then set D3CD bit to avoid missing the
498 * card interrupt. This is an eSDHC controller problem
499 * so we need to apply the following workaround: clear
500 * and set D3CD bit will make eSDHC re-sample the card
501 * interrupt. In case a card interrupt was lost,
502 * re-sample it by the following steps.
503 */
504 data = readl(host->ioaddr + SDHCI_HOST_CONTROL);
505 data &= ~ESDHC_CTRL_D3CD;
506 writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
507 data |= ESDHC_CTRL_D3CD;
508 writel(data, host->ioaddr + SDHCI_HOST_CONTROL);
509 }
510
511 if (val & SDHCI_INT_ADMA_ERROR) {
512 val &= ~SDHCI_INT_ADMA_ERROR;
513 val |= ESDHC_INT_VENDOR_SPEC_DMA_ERR;
514 }
515 }
516
517 if (unlikely((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
518 && (reg == SDHCI_INT_STATUS)
519 && (val & SDHCI_INT_DATA_END))) {
520 u32 v;
521 v = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
522 v &= ~ESDHC_VENDOR_SPEC_SDIO_QUIRK;
523 writel(v, host->ioaddr + ESDHC_VENDOR_SPEC);
524
525 if (imx_data->multiblock_status == MULTIBLK_IN_PROCESS)
526 {
527 /* send a manual CMD12 with RESPTYP=none */
528 data = MMC_STOP_TRANSMISSION << 24 |
529 SDHCI_CMD_ABORTCMD << 16;
530 writel(data, host->ioaddr + SDHCI_TRANSFER_MODE);
531 imx_data->multiblock_status = WAIT_FOR_INT;
532 }
533 }
534
535 writel(val, host->ioaddr + reg);
536}
537
538static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
539{
540 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
541 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
542 u16 ret = 0;
543 u32 val;
544
545 if (unlikely(reg == SDHCI_HOST_VERSION)) {
546 reg ^= 2;
547 if (esdhc_is_usdhc(imx_data)) {
548 /*
549 * The usdhc register returns a wrong host version.
550 * Correct it here.
551 */
552 return SDHCI_SPEC_300;
553 }
554 }
555
556 if (unlikely(reg == SDHCI_HOST_CONTROL2)) {
557 val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
558 if (val & ESDHC_VENDOR_SPEC_VSELECT)
559 ret |= SDHCI_CTRL_VDD_180;
560
561 if (esdhc_is_usdhc(imx_data)) {
562 if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
563 val = readl(host->ioaddr + ESDHC_MIX_CTRL);
564 else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
565 /* the std tuning bits is in ACMD12_ERR for imx6sl */
David Brazdil0f672f62019-12-10 10:32:29 +0000566 val = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000567 }
568
569 if (val & ESDHC_MIX_CTRL_EXE_TUNE)
570 ret |= SDHCI_CTRL_EXEC_TUNING;
571 if (val & ESDHC_MIX_CTRL_SMPCLK_SEL)
572 ret |= SDHCI_CTRL_TUNED_CLK;
573
574 ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
575
576 return ret;
577 }
578
579 if (unlikely(reg == SDHCI_TRANSFER_MODE)) {
580 if (esdhc_is_usdhc(imx_data)) {
581 u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
582 ret = m & ESDHC_MIX_CTRL_SDHCI_MASK;
583 /* Swap AC23 bit */
584 if (m & ESDHC_MIX_CTRL_AC23EN) {
585 ret &= ~ESDHC_MIX_CTRL_AC23EN;
586 ret |= SDHCI_TRNS_AUTO_CMD23;
587 }
588 } else {
589 ret = readw(host->ioaddr + SDHCI_TRANSFER_MODE);
590 }
591
592 return ret;
593 }
594
595 return readw(host->ioaddr + reg);
596}
597
598static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
599{
600 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
601 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
602 u32 new_val = 0;
603
604 switch (reg) {
605 case SDHCI_CLOCK_CONTROL:
606 new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
607 if (val & SDHCI_CLOCK_CARD_EN)
608 new_val |= ESDHC_VENDOR_SPEC_FRC_SDCLK_ON;
609 else
610 new_val &= ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON;
611 writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
Olivier Deprez157378f2022-04-04 15:47:50 +0200612 if (!(new_val & ESDHC_VENDOR_SPEC_FRC_SDCLK_ON))
613 esdhc_wait_for_card_clock_gate_off(host);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000614 return;
615 case SDHCI_HOST_CONTROL2:
616 new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
617 if (val & SDHCI_CTRL_VDD_180)
618 new_val |= ESDHC_VENDOR_SPEC_VSELECT;
619 else
620 new_val &= ~ESDHC_VENDOR_SPEC_VSELECT;
621 writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC);
622 if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
623 new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
624 if (val & SDHCI_CTRL_TUNED_CLK) {
625 new_val |= ESDHC_MIX_CTRL_SMPCLK_SEL;
626 new_val |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
627 } else {
628 new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
629 new_val &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN;
630 }
631 writel(new_val , host->ioaddr + ESDHC_MIX_CTRL);
632 } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
David Brazdil0f672f62019-12-10 10:32:29 +0000633 u32 v = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000634 u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
635 if (val & SDHCI_CTRL_TUNED_CLK) {
636 v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
637 } else {
638 v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
639 m &= ~ESDHC_MIX_CTRL_FBCLK_SEL;
640 m &= ~ESDHC_MIX_CTRL_AUTO_TUNE_EN;
641 }
642
643 if (val & SDHCI_CTRL_EXEC_TUNING) {
644 v |= ESDHC_MIX_CTRL_EXE_TUNE;
645 m |= ESDHC_MIX_CTRL_FBCLK_SEL;
646 m |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
647 } else {
648 v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
649 }
650
David Brazdil0f672f62019-12-10 10:32:29 +0000651 writel(v, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000652 writel(m, host->ioaddr + ESDHC_MIX_CTRL);
653 }
654 return;
655 case SDHCI_TRANSFER_MODE:
656 if ((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
657 && (host->cmd->opcode == SD_IO_RW_EXTENDED)
658 && (host->cmd->data->blocks > 1)
659 && (host->cmd->data->flags & MMC_DATA_READ)) {
660 u32 v;
661 v = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
662 v |= ESDHC_VENDOR_SPEC_SDIO_QUIRK;
663 writel(v, host->ioaddr + ESDHC_VENDOR_SPEC);
664 }
665
666 if (esdhc_is_usdhc(imx_data)) {
667 u32 wml;
668 u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
669 /* Swap AC23 bit */
670 if (val & SDHCI_TRNS_AUTO_CMD23) {
671 val &= ~SDHCI_TRNS_AUTO_CMD23;
672 val |= ESDHC_MIX_CTRL_AC23EN;
673 }
674 m = val | (m & ~ESDHC_MIX_CTRL_SDHCI_MASK);
675 writel(m, host->ioaddr + ESDHC_MIX_CTRL);
676
677 /* Set watermark levels for PIO access to maximum value
678 * (128 words) to accommodate full 512 bytes buffer.
679 * For DMA access restore the levels to default value.
680 */
681 m = readl(host->ioaddr + ESDHC_WTMK_LVL);
Olivier Deprez157378f2022-04-04 15:47:50 +0200682 if (val & SDHCI_TRNS_DMA) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000683 wml = ESDHC_WTMK_LVL_WML_VAL_DEF;
Olivier Deprez157378f2022-04-04 15:47:50 +0200684 } else {
685 u8 ctrl;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000686 wml = ESDHC_WTMK_LVL_WML_VAL_MAX;
Olivier Deprez157378f2022-04-04 15:47:50 +0200687
688 /*
689 * Since already disable DMA mode, so also need
690 * to clear the DMASEL. Otherwise, for standard
691 * tuning, when send tuning command, usdhc will
692 * still prefetch the ADMA script from wrong
693 * DMA address, then we will see IOMMU report
694 * some error which show lack of TLB mapping.
695 */
696 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
697 ctrl &= ~SDHCI_CTRL_DMA_MASK;
698 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
699 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000700 m &= ~(ESDHC_WTMK_LVL_RD_WML_MASK |
701 ESDHC_WTMK_LVL_WR_WML_MASK);
702 m |= (wml << ESDHC_WTMK_LVL_RD_WML_SHIFT) |
703 (wml << ESDHC_WTMK_LVL_WR_WML_SHIFT);
704 writel(m, host->ioaddr + ESDHC_WTMK_LVL);
705 } else {
706 /*
707 * Postpone this write, we must do it together with a
708 * command write that is down below.
709 */
710 imx_data->scratchpad = val;
711 }
712 return;
713 case SDHCI_COMMAND:
714 if (host->cmd->opcode == MMC_STOP_TRANSMISSION)
715 val |= SDHCI_CMD_ABORTCMD;
716
717 if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
718 (imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
719 imx_data->multiblock_status = MULTIBLK_IN_PROCESS;
720
721 if (esdhc_is_usdhc(imx_data))
722 writel(val << 16,
723 host->ioaddr + SDHCI_TRANSFER_MODE);
724 else
725 writel(val << 16 | imx_data->scratchpad,
726 host->ioaddr + SDHCI_TRANSFER_MODE);
727 return;
728 case SDHCI_BLOCK_SIZE:
729 val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
730 break;
731 }
732 esdhc_clrset_le(host, 0xffff, val, reg);
733}
734
735static u8 esdhc_readb_le(struct sdhci_host *host, int reg)
736{
737 u8 ret;
738 u32 val;
739
740 switch (reg) {
741 case SDHCI_HOST_CONTROL:
742 val = readl(host->ioaddr + reg);
743
744 ret = val & SDHCI_CTRL_LED;
745 ret |= (val >> 5) & SDHCI_CTRL_DMA_MASK;
746 ret |= (val & ESDHC_CTRL_4BITBUS);
747 ret |= (val & ESDHC_CTRL_8BITBUS) << 3;
748 return ret;
749 }
750
751 return readb(host->ioaddr + reg);
752}
753
754static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
755{
756 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
757 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
758 u32 new_val = 0;
759 u32 mask;
760
761 switch (reg) {
762 case SDHCI_POWER_CONTROL:
763 /*
764 * FSL put some DMA bits here
765 * If your board has a regulator, code should be here
766 */
767 return;
768 case SDHCI_HOST_CONTROL:
769 /* FSL messed up here, so we need to manually compose it. */
770 new_val = val & SDHCI_CTRL_LED;
771 /* ensure the endianness */
772 new_val |= ESDHC_HOST_CONTROL_LE;
773 /* bits 8&9 are reserved on mx25 */
774 if (!is_imx25_esdhc(imx_data)) {
775 /* DMA mode bits are shifted */
776 new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5;
777 }
778
779 /*
780 * Do not touch buswidth bits here. This is done in
781 * esdhc_pltfm_bus_width.
782 * Do not touch the D3CD bit either which is used for the
783 * SDIO interrupt erratum workaround.
784 */
785 mask = 0xffff & ~(ESDHC_CTRL_BUSWIDTH_MASK | ESDHC_CTRL_D3CD);
786
787 esdhc_clrset_le(host, mask, new_val, reg);
788 return;
789 case SDHCI_SOFTWARE_RESET:
790 if (val & SDHCI_RESET_DATA)
791 new_val = readl(host->ioaddr + SDHCI_HOST_CONTROL);
792 break;
793 }
794 esdhc_clrset_le(host, 0xff, val, reg);
795
796 if (reg == SDHCI_SOFTWARE_RESET) {
797 if (val & SDHCI_RESET_ALL) {
798 /*
799 * The esdhc has a design violation to SDHC spec which
800 * tells that software reset should not affect card
801 * detection circuit. But esdhc clears its SYSCTL
802 * register bits [0..2] during the software reset. This
803 * will stop those clocks that card detection circuit
804 * relies on. To work around it, we turn the clocks on
805 * back to keep card detection circuit functional.
806 */
807 esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL);
808 /*
809 * The reset on usdhc fails to clear MIX_CTRL register.
810 * Do it manually here.
811 */
812 if (esdhc_is_usdhc(imx_data)) {
813 /*
814 * the tuning bits should be kept during reset
815 */
816 new_val = readl(host->ioaddr + ESDHC_MIX_CTRL);
817 writel(new_val & ESDHC_MIX_CTRL_TUNING_MASK,
818 host->ioaddr + ESDHC_MIX_CTRL);
819 imx_data->is_ddr = 0;
820 }
821 } else if (val & SDHCI_RESET_DATA) {
822 /*
823 * The eSDHC DAT line software reset clears at least the
824 * data transfer width on i.MX25, so make sure that the
825 * Host Control register is unaffected.
826 */
827 esdhc_clrset_le(host, 0xff, new_val,
828 SDHCI_HOST_CONTROL);
829 }
830 }
831}
832
833static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
834{
835 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
836
837 return pltfm_host->clock;
838}
839
840static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
841{
842 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
843
844 return pltfm_host->clock / 256 / 16;
845}
846
847static inline void esdhc_pltfm_set_clock(struct sdhci_host *host,
848 unsigned int clock)
849{
850 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
851 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
852 unsigned int host_clock = pltfm_host->clock;
853 int ddr_pre_div = imx_data->is_ddr ? 2 : 1;
854 int pre_div = 1;
855 int div = 1;
Olivier Deprez157378f2022-04-04 15:47:50 +0200856 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000857 u32 temp, val;
858
859 if (esdhc_is_usdhc(imx_data)) {
860 val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
861 writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
862 host->ioaddr + ESDHC_VENDOR_SPEC);
Olivier Deprez157378f2022-04-04 15:47:50 +0200863 esdhc_wait_for_card_clock_gate_off(host);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000864 }
865
866 if (clock == 0) {
867 host->mmc->actual_clock = 0;
868 return;
869 }
870
871 /* For i.MX53 eSDHCv3, SYSCTL.SDCLKFS may not be set to 0. */
872 if (is_imx53_esdhc(imx_data)) {
873 /*
874 * According to the i.MX53 reference manual, if DLLCTRL[10] can
875 * be set, then the controller is eSDHCv3, else it is eSDHCv2.
876 */
877 val = readl(host->ioaddr + ESDHC_DLL_CTRL);
878 writel(val | BIT(10), host->ioaddr + ESDHC_DLL_CTRL);
879 temp = readl(host->ioaddr + ESDHC_DLL_CTRL);
880 writel(val, host->ioaddr + ESDHC_DLL_CTRL);
881 if (temp & BIT(10))
882 pre_div = 2;
883 }
884
885 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
886 temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
887 | ESDHC_CLOCK_MASK);
888 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
889
David Brazdil0f672f62019-12-10 10:32:29 +0000890 if (imx_data->socdata->flags & ESDHC_FLAG_ERR010450) {
891 unsigned int max_clock;
892
893 max_clock = imx_data->is_ddr ? 45000000 : 150000000;
894
895 clock = min(clock, max_clock);
896 }
897
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000898 while (host_clock / (16 * pre_div * ddr_pre_div) > clock &&
899 pre_div < 256)
900 pre_div *= 2;
901
902 while (host_clock / (div * pre_div * ddr_pre_div) > clock && div < 16)
903 div++;
904
905 host->mmc->actual_clock = host_clock / (div * pre_div * ddr_pre_div);
906 dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
907 clock, host->mmc->actual_clock);
908
909 pre_div >>= 1;
910 div--;
911
912 temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
913 temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
914 | (div << ESDHC_DIVIDER_SHIFT)
915 | (pre_div << ESDHC_PREDIV_SHIFT));
916 sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
917
Olivier Deprez157378f2022-04-04 15:47:50 +0200918 /* need to wait the bit 3 of the PRSSTAT to be set, make sure card clock is stable */
919 ret = readl_poll_timeout(host->ioaddr + ESDHC_PRSSTAT, temp,
920 (temp & ESDHC_CLOCK_STABLE), 2, 100);
921 if (ret == -ETIMEDOUT)
922 dev_warn(mmc_dev(host->mmc), "card clock still not stable in 100us!.\n");
923
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000924 if (esdhc_is_usdhc(imx_data)) {
925 val = readl(host->ioaddr + ESDHC_VENDOR_SPEC);
926 writel(val | ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
927 host->ioaddr + ESDHC_VENDOR_SPEC);
928 }
929
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000930}
931
932static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
933{
934 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
935 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
936 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
937
938 switch (boarddata->wp_type) {
939 case ESDHC_WP_GPIO:
940 return mmc_gpio_get_ro(host->mmc);
941 case ESDHC_WP_CONTROLLER:
942 return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
943 SDHCI_WRITE_PROTECT);
944 case ESDHC_WP_NONE:
945 break;
946 }
947
948 return -ENOSYS;
949}
950
951static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
952{
953 u32 ctrl;
954
955 switch (width) {
956 case MMC_BUS_WIDTH_8:
957 ctrl = ESDHC_CTRL_8BITBUS;
958 break;
959 case MMC_BUS_WIDTH_4:
960 ctrl = ESDHC_CTRL_4BITBUS;
961 break;
962 default:
963 ctrl = 0;
964 break;
965 }
966
967 esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl,
968 SDHCI_HOST_CONTROL);
969}
970
David Brazdil0f672f62019-12-10 10:32:29 +0000971static int usdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
972{
973 struct sdhci_host *host = mmc_priv(mmc);
974
975 /*
976 * i.MX uSDHC internally already uses a fixed optimized timing for
977 * DDR50, normally does not require tuning for DDR50 mode.
978 */
979 if (host->timing == MMC_TIMING_UHS_DDR50)
980 return 0;
981
982 return sdhci_execute_tuning(mmc, opcode);
983}
984
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000985static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val)
986{
987 u32 reg;
Olivier Deprez157378f2022-04-04 15:47:50 +0200988 u8 sw_rst;
989 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000990
991 /* FIXME: delay a bit for card to be ready for next tuning due to errors */
992 mdelay(1);
993
Olivier Deprez157378f2022-04-04 15:47:50 +0200994 /* IC suggest to reset USDHC before every tuning command */
995 esdhc_clrset_le(host, 0xff, SDHCI_RESET_ALL, SDHCI_SOFTWARE_RESET);
996 ret = readb_poll_timeout(host->ioaddr + SDHCI_SOFTWARE_RESET, sw_rst,
997 !(sw_rst & SDHCI_RESET_ALL), 10, 100);
998 if (ret == -ETIMEDOUT)
999 dev_warn(mmc_dev(host->mmc),
1000 "warning! RESET_ALL never complete before sending tuning command\n");
1001
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001002 reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
1003 reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL |
1004 ESDHC_MIX_CTRL_FBCLK_SEL;
1005 writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
1006 writel(val << 8, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
1007 dev_dbg(mmc_dev(host->mmc),
1008 "tuning with delay 0x%x ESDHC_TUNE_CTRL_STATUS 0x%x\n",
1009 val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS));
1010}
1011
1012static void esdhc_post_tuning(struct sdhci_host *host)
1013{
1014 u32 reg;
1015
1016 reg = readl(host->ioaddr + ESDHC_MIX_CTRL);
1017 reg &= ~ESDHC_MIX_CTRL_EXE_TUNE;
1018 reg |= ESDHC_MIX_CTRL_AUTO_TUNE_EN;
1019 writel(reg, host->ioaddr + ESDHC_MIX_CTRL);
1020}
1021
1022static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
1023{
1024 int min, max, avg, ret;
1025
1026 /* find the mininum delay first which can pass tuning */
1027 min = ESDHC_TUNE_CTRL_MIN;
1028 while (min < ESDHC_TUNE_CTRL_MAX) {
1029 esdhc_prepare_tuning(host, min);
1030 if (!mmc_send_tuning(host->mmc, opcode, NULL))
1031 break;
1032 min += ESDHC_TUNE_CTRL_STEP;
1033 }
1034
1035 /* find the maxinum delay which can not pass tuning */
1036 max = min + ESDHC_TUNE_CTRL_STEP;
1037 while (max < ESDHC_TUNE_CTRL_MAX) {
1038 esdhc_prepare_tuning(host, max);
1039 if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1040 max -= ESDHC_TUNE_CTRL_STEP;
1041 break;
1042 }
1043 max += ESDHC_TUNE_CTRL_STEP;
1044 }
1045
1046 /* use average delay to get the best timing */
1047 avg = (min + max) / 2;
1048 esdhc_prepare_tuning(host, avg);
1049 ret = mmc_send_tuning(host->mmc, opcode, NULL);
1050 esdhc_post_tuning(host);
1051
1052 dev_dbg(mmc_dev(host->mmc), "tuning %s at 0x%x ret %d\n",
1053 ret ? "failed" : "passed", avg, ret);
1054
1055 return ret;
1056}
1057
David Brazdil0f672f62019-12-10 10:32:29 +00001058static void esdhc_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios)
1059{
1060 struct sdhci_host *host = mmc_priv(mmc);
1061 u32 m;
1062
1063 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
1064 if (ios->enhanced_strobe)
1065 m |= ESDHC_MIX_CTRL_HS400_ES_EN;
1066 else
1067 m &= ~ESDHC_MIX_CTRL_HS400_ES_EN;
1068 writel(m, host->ioaddr + ESDHC_MIX_CTRL);
1069}
1070
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001071static int esdhc_change_pinstate(struct sdhci_host *host,
1072 unsigned int uhs)
1073{
1074 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1075 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
1076 struct pinctrl_state *pinctrl;
1077
1078 dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs);
1079
1080 if (IS_ERR(imx_data->pinctrl) ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001081 IS_ERR(imx_data->pins_100mhz) ||
1082 IS_ERR(imx_data->pins_200mhz))
1083 return -EINVAL;
1084
1085 switch (uhs) {
1086 case MMC_TIMING_UHS_SDR50:
1087 case MMC_TIMING_UHS_DDR50:
1088 pinctrl = imx_data->pins_100mhz;
1089 break;
1090 case MMC_TIMING_UHS_SDR104:
1091 case MMC_TIMING_MMC_HS200:
1092 case MMC_TIMING_MMC_HS400:
1093 pinctrl = imx_data->pins_200mhz;
1094 break;
1095 default:
1096 /* back to default state for other legacy timing */
Olivier Deprez157378f2022-04-04 15:47:50 +02001097 return pinctrl_select_default_state(mmc_dev(host->mmc));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001098 }
1099
1100 return pinctrl_select_state(imx_data->pinctrl, pinctrl);
1101}
1102
1103/*
1104 * For HS400 eMMC, there is a data_strobe line. This signal is generated
1105 * by the device and used for data output and CRC status response output
1106 * in HS400 mode. The frequency of this signal follows the frequency of
1107 * CLK generated by host. The host receives the data which is aligned to the
1108 * edge of data_strobe line. Due to the time delay between CLK line and
1109 * data_strobe line, if the delay time is larger than one clock cycle,
1110 * then CLK and data_strobe line will be misaligned, read error shows up.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001111 */
1112static void esdhc_set_strobe_dll(struct sdhci_host *host)
1113{
Olivier Deprez157378f2022-04-04 15:47:50 +02001114 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1115 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
1116 u32 strobe_delay;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001117 u32 v;
Olivier Deprez157378f2022-04-04 15:47:50 +02001118 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001119
David Brazdil0f672f62019-12-10 10:32:29 +00001120 /* disable clock before enabling strobe dll */
1121 writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) &
1122 ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON,
1123 host->ioaddr + ESDHC_VENDOR_SPEC);
Olivier Deprez157378f2022-04-04 15:47:50 +02001124 esdhc_wait_for_card_clock_gate_off(host);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001125
David Brazdil0f672f62019-12-10 10:32:29 +00001126 /* force a reset on strobe dll */
1127 writel(ESDHC_STROBE_DLL_CTRL_RESET,
1128 host->ioaddr + ESDHC_STROBE_DLL_CTRL);
1129 /* clear the reset bit on strobe dll before any setting */
1130 writel(0, host->ioaddr + ESDHC_STROBE_DLL_CTRL);
1131
1132 /*
1133 * enable strobe dll ctrl and adjust the delay target
1134 * for the uSDHC loopback read clock
1135 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001136 if (imx_data->boarddata.strobe_dll_delay_target)
1137 strobe_delay = imx_data->boarddata.strobe_dll_delay_target;
1138 else
1139 strobe_delay = ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_DEFAULT;
David Brazdil0f672f62019-12-10 10:32:29 +00001140 v = ESDHC_STROBE_DLL_CTRL_ENABLE |
1141 ESDHC_STROBE_DLL_CTRL_SLV_UPDATE_INT_DEFAULT |
Olivier Deprez157378f2022-04-04 15:47:50 +02001142 (strobe_delay << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT);
David Brazdil0f672f62019-12-10 10:32:29 +00001143 writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL);
Olivier Deprez157378f2022-04-04 15:47:50 +02001144
1145 /* wait max 50us to get the REF/SLV lock */
1146 ret = readl_poll_timeout(host->ioaddr + ESDHC_STROBE_DLL_STATUS, v,
1147 ((v & ESDHC_STROBE_DLL_STS_REF_LOCK) && (v & ESDHC_STROBE_DLL_STS_SLV_LOCK)), 1, 50);
1148 if (ret == -ETIMEDOUT)
David Brazdil0f672f62019-12-10 10:32:29 +00001149 dev_warn(mmc_dev(host->mmc),
Olivier Deprez157378f2022-04-04 15:47:50 +02001150 "warning! HS400 strobe DLL status REF/SLV not lock in 50us, STROBE DLL status is %x!\n", v);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001151}
1152
1153static void esdhc_reset_tuning(struct sdhci_host *host)
1154{
1155 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1156 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
1157 u32 ctrl;
Olivier Deprez157378f2022-04-04 15:47:50 +02001158 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001159
1160 /* Reset the tuning circuit */
1161 if (esdhc_is_usdhc(imx_data)) {
1162 if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
1163 ctrl = readl(host->ioaddr + ESDHC_MIX_CTRL);
1164 ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
1165 ctrl &= ~ESDHC_MIX_CTRL_FBCLK_SEL;
1166 writel(ctrl, host->ioaddr + ESDHC_MIX_CTRL);
1167 writel(0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
1168 } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
David Brazdil0f672f62019-12-10 10:32:29 +00001169 ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001170 ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
Olivier Deprez157378f2022-04-04 15:47:50 +02001171 ctrl &= ~ESDHC_MIX_CTRL_EXE_TUNE;
David Brazdil0f672f62019-12-10 10:32:29 +00001172 writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
Olivier Deprez157378f2022-04-04 15:47:50 +02001173 /* Make sure ESDHC_MIX_CTRL_EXE_TUNE cleared */
1174 ret = readl_poll_timeout(host->ioaddr + SDHCI_AUTO_CMD_STATUS,
1175 ctrl, !(ctrl & ESDHC_MIX_CTRL_EXE_TUNE), 1, 50);
1176 if (ret == -ETIMEDOUT)
1177 dev_warn(mmc_dev(host->mmc),
1178 "Warning! clear execute tuning bit failed\n");
1179 /*
1180 * SDHCI_INT_DATA_AVAIL is W1C bit, set this bit will clear the
1181 * usdhc IP internal logic flag execute_tuning_with_clr_buf, which
1182 * will finally make sure the normal data transfer logic correct.
1183 */
1184 ctrl = readl(host->ioaddr + SDHCI_INT_STATUS);
1185 ctrl |= SDHCI_INT_DATA_AVAIL;
1186 writel(ctrl, host->ioaddr + SDHCI_INT_STATUS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001187 }
1188 }
1189}
1190
1191static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1192{
1193 u32 m;
1194 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1195 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
1196 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
1197
1198 /* disable ddr mode and disable HS400 mode */
1199 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
1200 m &= ~(ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN);
1201 imx_data->is_ddr = 0;
1202
1203 switch (timing) {
1204 case MMC_TIMING_UHS_SDR12:
1205 case MMC_TIMING_UHS_SDR25:
1206 case MMC_TIMING_UHS_SDR50:
1207 case MMC_TIMING_UHS_SDR104:
David Brazdil0f672f62019-12-10 10:32:29 +00001208 case MMC_TIMING_MMC_HS:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001209 case MMC_TIMING_MMC_HS200:
1210 writel(m, host->ioaddr + ESDHC_MIX_CTRL);
1211 break;
1212 case MMC_TIMING_UHS_DDR50:
1213 case MMC_TIMING_MMC_DDR52:
1214 m |= ESDHC_MIX_CTRL_DDREN;
1215 writel(m, host->ioaddr + ESDHC_MIX_CTRL);
1216 imx_data->is_ddr = 1;
1217 if (boarddata->delay_line) {
1218 u32 v;
1219 v = boarddata->delay_line <<
1220 ESDHC_DLL_OVERRIDE_VAL_SHIFT |
1221 (1 << ESDHC_DLL_OVERRIDE_EN_SHIFT);
1222 if (is_imx53_esdhc(imx_data))
1223 v <<= 1;
1224 writel(v, host->ioaddr + ESDHC_DLL_CTRL);
1225 }
1226 break;
1227 case MMC_TIMING_MMC_HS400:
1228 m |= ESDHC_MIX_CTRL_DDREN | ESDHC_MIX_CTRL_HS400_EN;
1229 writel(m, host->ioaddr + ESDHC_MIX_CTRL);
1230 imx_data->is_ddr = 1;
1231 /* update clock after enable DDR for strobe DLL lock */
1232 host->ops->set_clock(host, host->clock);
1233 esdhc_set_strobe_dll(host);
1234 break;
1235 case MMC_TIMING_LEGACY:
1236 default:
1237 esdhc_reset_tuning(host);
1238 break;
1239 }
1240
1241 esdhc_change_pinstate(host, timing);
1242}
1243
1244static void esdhc_reset(struct sdhci_host *host, u8 mask)
1245{
1246 sdhci_reset(host, mask);
1247
1248 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1249 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1250}
1251
1252static unsigned int esdhc_get_max_timeout_count(struct sdhci_host *host)
1253{
1254 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1255 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
1256
1257 /* Doc Erratum: the uSDHC actual maximum timeout count is 1 << 29 */
1258 return esdhc_is_usdhc(imx_data) ? 1 << 29 : 1 << 27;
1259}
1260
1261static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1262{
1263 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1264 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
1265
1266 /* use maximum timeout counter */
1267 esdhc_clrset_le(host, ESDHC_SYS_CTRL_DTOCV_MASK,
1268 esdhc_is_usdhc(imx_data) ? 0xF : 0xE,
1269 SDHCI_TIMEOUT_CONTROL);
1270}
1271
David Brazdil0f672f62019-12-10 10:32:29 +00001272static u32 esdhc_cqhci_irq(struct sdhci_host *host, u32 intmask)
1273{
1274 int cmd_error = 0;
1275 int data_error = 0;
1276
1277 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1278 return intmask;
1279
1280 cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1281
1282 return 0;
1283}
1284
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001285static struct sdhci_ops sdhci_esdhc_ops = {
1286 .read_l = esdhc_readl_le,
1287 .read_w = esdhc_readw_le,
1288 .read_b = esdhc_readb_le,
1289 .write_l = esdhc_writel_le,
1290 .write_w = esdhc_writew_le,
1291 .write_b = esdhc_writeb_le,
1292 .set_clock = esdhc_pltfm_set_clock,
1293 .get_max_clock = esdhc_pltfm_get_max_clock,
1294 .get_min_clock = esdhc_pltfm_get_min_clock,
1295 .get_max_timeout_count = esdhc_get_max_timeout_count,
1296 .get_ro = esdhc_pltfm_get_ro,
1297 .set_timeout = esdhc_set_timeout,
1298 .set_bus_width = esdhc_pltfm_set_bus_width,
1299 .set_uhs_signaling = esdhc_set_uhs_signaling,
1300 .reset = esdhc_reset,
David Brazdil0f672f62019-12-10 10:32:29 +00001301 .irq = esdhc_cqhci_irq,
Olivier Deprez157378f2022-04-04 15:47:50 +02001302 .dump_vendor_regs = esdhc_dump_debug_regs,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001303};
1304
1305static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
1306 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_NO_HISPD_BIT
1307 | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
1308 | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC
1309 | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
1310 .ops = &sdhci_esdhc_ops,
1311};
1312
1313static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
1314{
1315 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1316 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
Olivier Deprez157378f2022-04-04 15:47:50 +02001317 struct cqhci_host *cq_host = host->mmc->cqe_private;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001318 int tmp;
1319
1320 if (esdhc_is_usdhc(imx_data)) {
1321 /*
1322 * The imx6q ROM code will change the default watermark
1323 * level setting to something insane. Change it back here.
1324 */
1325 writel(ESDHC_WTMK_DEFAULT_VAL, host->ioaddr + ESDHC_WTMK_LVL);
1326
1327 /*
1328 * ROM code will change the bit burst_length_enable setting
1329 * to zero if this usdhc is chosen to boot system. Change
1330 * it back here, otherwise it will impact the performance a
1331 * lot. This bit is used to enable/disable the burst length
1332 * for the external AHB2AXI bridge. It's useful especially
1333 * for INCR transfer because without burst length indicator,
1334 * the AHB2AXI bridge does not know the burst length in
1335 * advance. And without burst length indicator, AHB INCR
1336 * transfer can only be converted to singles on the AXI side.
1337 */
1338 writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
1339 | ESDHC_BURST_LEN_EN_INCR,
1340 host->ioaddr + SDHCI_HOST_CONTROL);
David Brazdil0f672f62019-12-10 10:32:29 +00001341
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001342 /*
David Brazdil0f672f62019-12-10 10:32:29 +00001343 * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
1344 * TO1.1, it's harmless for MX6SL
1345 */
1346 writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001347 host->ioaddr + 0x6c);
1348
1349 /* disable DLL_CTRL delay line settings */
1350 writel(0x0, host->ioaddr + ESDHC_DLL_CTRL);
1351
David Brazdil0f672f62019-12-10 10:32:29 +00001352 /*
1353 * For the case of command with busy, if set the bit
1354 * ESDHC_VEND_SPEC2_EN_BUSY_IRQ, USDHC will generate a
1355 * transfer complete interrupt when busy is deasserted.
1356 * When CQHCI use DCMD to send a CMD need R1b respons,
1357 * CQHCI require to set ESDHC_VEND_SPEC2_EN_BUSY_IRQ,
1358 * otherwise DCMD will always meet timeout waiting for
1359 * hardware interrupt issue.
1360 */
1361 if (imx_data->socdata->flags & ESDHC_FLAG_CQHCI) {
1362 tmp = readl(host->ioaddr + ESDHC_VEND_SPEC2);
1363 tmp |= ESDHC_VEND_SPEC2_EN_BUSY_IRQ;
1364 writel(tmp, host->ioaddr + ESDHC_VEND_SPEC2);
1365
1366 host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
1367 }
1368
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001369 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
1370 tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
1371 tmp |= ESDHC_STD_TUNING_EN |
1372 ESDHC_TUNING_START_TAP_DEFAULT;
1373 if (imx_data->boarddata.tuning_start_tap) {
1374 tmp &= ~ESDHC_TUNING_START_TAP_MASK;
1375 tmp |= imx_data->boarddata.tuning_start_tap;
1376 }
1377
1378 if (imx_data->boarddata.tuning_step) {
1379 tmp &= ~ESDHC_TUNING_STEP_MASK;
1380 tmp |= imx_data->boarddata.tuning_step
1381 << ESDHC_TUNING_STEP_SHIFT;
1382 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001383
1384 /* Disable the CMD CRC check for tuning, if not, need to
1385 * add some delay after every tuning command, because
1386 * hardware standard tuning logic will directly go to next
1387 * step once it detect the CMD CRC error, will not wait for
1388 * the card side to finally send out the tuning data, trigger
1389 * the buffer read ready interrupt immediately. If usdhc send
1390 * the next tuning command some eMMC card will stuck, can't
1391 * response, block the tuning procedure or the first command
1392 * after the whole tuning procedure always can't get any response.
1393 */
1394 tmp |= ESDHC_TUNING_CMD_CRC_CHECK_DISABLE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001395 writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
David Brazdil0f672f62019-12-10 10:32:29 +00001396 } else if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
1397 /*
1398 * ESDHC_STD_TUNING_EN may be configed in bootloader
1399 * or ROM code, so clear this bit here to make sure
1400 * the manual tuning can work.
1401 */
1402 tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
1403 tmp &= ~ESDHC_STD_TUNING_EN;
1404 writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001405 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001406
1407 /*
1408 * On i.MX8MM, we are running Dual Linux OS, with 1st Linux using SD Card
1409 * as rootfs storage, 2nd Linux using eMMC as rootfs storage. We let the
1410 * the 1st linux configure power/clock for the 2nd Linux.
1411 *
1412 * When the 2nd Linux is booting into rootfs stage, we let the 1st Linux
1413 * to destroy the 2nd linux, then restart the 2nd linux, we met SDHCI dump.
1414 * After we clear the pending interrupt and halt CQCTL, issue gone.
1415 */
1416 if (cq_host) {
1417 tmp = cqhci_readl(cq_host, CQHCI_IS);
1418 cqhci_writel(cq_host, tmp, CQHCI_IS);
1419 cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
1420 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001421 }
1422}
1423
David Brazdil0f672f62019-12-10 10:32:29 +00001424static void esdhc_cqe_enable(struct mmc_host *mmc)
1425{
1426 struct sdhci_host *host = mmc_priv(mmc);
1427 struct cqhci_host *cq_host = mmc->cqe_private;
1428 u32 reg;
1429 u16 mode;
1430 int count = 10;
1431
1432 /*
1433 * CQE gets stuck if it sees Buffer Read Enable bit set, which can be
1434 * the case after tuning, so ensure the buffer is drained.
1435 */
1436 reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
1437 while (reg & SDHCI_DATA_AVAILABLE) {
1438 sdhci_readl(host, SDHCI_BUFFER);
1439 reg = sdhci_readl(host, SDHCI_PRESENT_STATE);
1440 if (count-- == 0) {
1441 dev_warn(mmc_dev(host->mmc),
1442 "CQE may get stuck because the Buffer Read Enable bit is set\n");
1443 break;
1444 }
1445 mdelay(1);
1446 }
1447
1448 /*
1449 * Runtime resume will reset the entire host controller, which
1450 * will also clear the DMAEN/BCEN of register ESDHC_MIX_CTRL.
1451 * Here set DMAEN and BCEN when enable CMDQ.
1452 */
1453 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1454 if (host->flags & SDHCI_REQ_USE_DMA)
1455 mode |= SDHCI_TRNS_DMA;
1456 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1457 mode |= SDHCI_TRNS_BLK_CNT_EN;
1458 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1459
1460 /*
1461 * Though Runtime resume reset the entire host controller,
1462 * but do not impact the CQHCI side, need to clear the
1463 * HALT bit, avoid CQHCI stuck in the first request when
1464 * system resume back.
1465 */
1466 cqhci_writel(cq_host, 0, CQHCI_CTL);
1467 if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT)
1468 dev_err(mmc_dev(host->mmc),
1469 "failed to exit halt state when enable CQE\n");
1470
1471
1472 sdhci_cqe_enable(mmc);
1473}
1474
1475static void esdhc_sdhci_dumpregs(struct mmc_host *mmc)
1476{
1477 sdhci_dumpregs(mmc_priv(mmc));
1478}
1479
1480static const struct cqhci_host_ops esdhc_cqhci_ops = {
1481 .enable = esdhc_cqe_enable,
1482 .disable = sdhci_cqe_disable,
1483 .dumpregs = esdhc_sdhci_dumpregs,
1484};
1485
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001486#ifdef CONFIG_OF
1487static int
1488sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
1489 struct sdhci_host *host,
1490 struct pltfm_imx_data *imx_data)
1491{
1492 struct device_node *np = pdev->dev.of_node;
1493 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
1494 int ret;
1495
1496 if (of_get_property(np, "fsl,wp-controller", NULL))
1497 boarddata->wp_type = ESDHC_WP_CONTROLLER;
1498
David Brazdil0f672f62019-12-10 10:32:29 +00001499 /*
1500 * If we have this property, then activate WP check.
1501 * Retrieveing and requesting the actual WP GPIO will happen
1502 * in the call to mmc_of_parse().
1503 */
1504 if (of_property_read_bool(np, "wp-gpios"))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001505 boarddata->wp_type = ESDHC_WP_GPIO;
1506
1507 of_property_read_u32(np, "fsl,tuning-step", &boarddata->tuning_step);
1508 of_property_read_u32(np, "fsl,tuning-start-tap",
1509 &boarddata->tuning_start_tap);
1510
Olivier Deprez157378f2022-04-04 15:47:50 +02001511 of_property_read_u32(np, "fsl,strobe-dll-delay-target",
1512 &boarddata->strobe_dll_delay_target);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001513 if (of_find_property(np, "no-1-8-v", NULL))
1514 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1515
1516 if (of_property_read_u32(np, "fsl,delay-line", &boarddata->delay_line))
1517 boarddata->delay_line = 0;
1518
1519 mmc_of_parse_voltage(np, &host->ocr_mask);
1520
Olivier Deprez157378f2022-04-04 15:47:50 +02001521 if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pinctrl)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001522 imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
1523 ESDHC_PINCTRL_STATE_100MHZ);
1524 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
1525 ESDHC_PINCTRL_STATE_200MHZ);
1526 }
1527
1528 /* call to generic mmc_of_parse to support additional capabilities */
1529 ret = mmc_of_parse(host->mmc);
1530 if (ret)
1531 return ret;
1532
1533 if (mmc_gpio_get_cd(host->mmc) >= 0)
1534 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1535
1536 return 0;
1537}
1538#else
1539static inline int
1540sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
1541 struct sdhci_host *host,
1542 struct pltfm_imx_data *imx_data)
1543{
1544 return -ENODEV;
1545}
1546#endif
1547
1548static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
1549 struct sdhci_host *host,
1550 struct pltfm_imx_data *imx_data)
1551{
1552 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
1553 int err;
1554
1555 if (!host->mmc->parent->platform_data) {
1556 dev_err(mmc_dev(host->mmc), "no board data!\n");
1557 return -EINVAL;
1558 }
1559
1560 imx_data->boarddata = *((struct esdhc_platform_data *)
1561 host->mmc->parent->platform_data);
1562 /* write_protect */
1563 if (boarddata->wp_type == ESDHC_WP_GPIO) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001564 host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1565
Olivier Deprez157378f2022-04-04 15:47:50 +02001566 err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001567 if (err) {
1568 dev_err(mmc_dev(host->mmc),
1569 "failed to request write-protect gpio!\n");
1570 return err;
1571 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001572 }
1573
1574 /* card_detect */
1575 switch (boarddata->cd_type) {
1576 case ESDHC_CD_GPIO:
Olivier Deprez157378f2022-04-04 15:47:50 +02001577 err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001578 if (err) {
1579 dev_err(mmc_dev(host->mmc),
1580 "failed to request card-detect gpio!\n");
1581 return err;
1582 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001583 fallthrough;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001584
1585 case ESDHC_CD_CONTROLLER:
1586 /* we have a working card_detect back */
1587 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1588 break;
1589
1590 case ESDHC_CD_PERMANENT:
1591 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
1592 break;
1593
1594 case ESDHC_CD_NONE:
1595 break;
1596 }
1597
1598 switch (boarddata->max_bus_width) {
1599 case 8:
1600 host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
1601 break;
1602 case 4:
1603 host->mmc->caps |= MMC_CAP_4_BIT_DATA;
1604 break;
1605 case 1:
1606 default:
1607 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
1608 break;
1609 }
1610
1611 return 0;
1612}
1613
1614static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
1615{
1616 const struct of_device_id *of_id =
1617 of_match_device(imx_esdhc_dt_ids, &pdev->dev);
1618 struct sdhci_pltfm_host *pltfm_host;
1619 struct sdhci_host *host;
David Brazdil0f672f62019-12-10 10:32:29 +00001620 struct cqhci_host *cq_host;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001621 int err;
1622 struct pltfm_imx_data *imx_data;
1623
1624 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata,
1625 sizeof(*imx_data));
1626 if (IS_ERR(host))
1627 return PTR_ERR(host);
1628
1629 pltfm_host = sdhci_priv(host);
1630
1631 imx_data = sdhci_pltfm_priv(pltfm_host);
1632
1633 imx_data->socdata = of_id ? of_id->data : (struct esdhc_soc_data *)
1634 pdev->id_entry->driver_data;
1635
David Brazdil0f672f62019-12-10 10:32:29 +00001636 if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
Olivier Deprez157378f2022-04-04 15:47:50 +02001637 cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0);
David Brazdil0f672f62019-12-10 10:32:29 +00001638
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001639 imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1640 if (IS_ERR(imx_data->clk_ipg)) {
1641 err = PTR_ERR(imx_data->clk_ipg);
1642 goto free_sdhci;
1643 }
1644
1645 imx_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1646 if (IS_ERR(imx_data->clk_ahb)) {
1647 err = PTR_ERR(imx_data->clk_ahb);
1648 goto free_sdhci;
1649 }
1650
1651 imx_data->clk_per = devm_clk_get(&pdev->dev, "per");
1652 if (IS_ERR(imx_data->clk_per)) {
1653 err = PTR_ERR(imx_data->clk_per);
1654 goto free_sdhci;
1655 }
1656
1657 pltfm_host->clk = imx_data->clk_per;
1658 pltfm_host->clock = clk_get_rate(pltfm_host->clk);
1659 err = clk_prepare_enable(imx_data->clk_per);
1660 if (err)
1661 goto free_sdhci;
1662 err = clk_prepare_enable(imx_data->clk_ipg);
1663 if (err)
1664 goto disable_per_clk;
1665 err = clk_prepare_enable(imx_data->clk_ahb);
1666 if (err)
1667 goto disable_ipg_clk;
1668
1669 imx_data->pinctrl = devm_pinctrl_get(&pdev->dev);
Olivier Deprez157378f2022-04-04 15:47:50 +02001670 if (IS_ERR(imx_data->pinctrl))
1671 dev_warn(mmc_dev(host->mmc), "could not get pinctrl\n");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001672
1673 if (esdhc_is_usdhc(imx_data)) {
1674 host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN;
1675 host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
Olivier Deprez157378f2022-04-04 15:47:50 +02001676
1677 /* GPIO CD can be set as a wakeup source */
1678 host->mmc->caps |= MMC_CAP_CD_WAKE;
1679
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001680 if (!(imx_data->socdata->flags & ESDHC_FLAG_HS200))
1681 host->quirks2 |= SDHCI_QUIRK2_BROKEN_HS200;
1682
1683 /* clear tuning bits in case ROM has set it already */
1684 writel(0x0, host->ioaddr + ESDHC_MIX_CTRL);
David Brazdil0f672f62019-12-10 10:32:29 +00001685 writel(0x0, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001686 writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
David Brazdil0f672f62019-12-10 10:32:29 +00001687
1688 /*
1689 * Link usdhc specific mmc_host_ops execute_tuning function,
1690 * to replace the standard one in sdhci_ops.
1691 */
1692 host->mmc_host_ops.execute_tuning = usdhc_execute_tuning;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001693 }
1694
1695 if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
1696 sdhci_esdhc_ops.platform_execute_tuning =
1697 esdhc_executing_tuning;
1698
1699 if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
1700 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
1701
1702 if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
1703 host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400;
1704
Olivier Deprez157378f2022-04-04 15:47:50 +02001705 if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
1706 host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
1707
David Brazdil0f672f62019-12-10 10:32:29 +00001708 if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
1709 host->mmc->caps2 |= MMC_CAP2_HS400_ES;
1710 host->mmc_host_ops.hs400_enhanced_strobe =
1711 esdhc_hs400_enhanced_strobe;
1712 }
1713
1714 if (imx_data->socdata->flags & ESDHC_FLAG_CQHCI) {
1715 host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1716 cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1717 if (!cq_host) {
1718 err = -ENOMEM;
1719 goto disable_ahb_clk;
1720 }
1721
1722 cq_host->mmio = host->ioaddr + ESDHC_CQHCI_ADDR_OFFSET;
1723 cq_host->ops = &esdhc_cqhci_ops;
1724
1725 err = cqhci_init(cq_host, host->mmc, false);
1726 if (err)
1727 goto disable_ahb_clk;
1728 }
1729
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001730 if (of_id)
1731 err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
1732 else
1733 err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data);
1734 if (err)
1735 goto disable_ahb_clk;
1736
1737 sdhci_esdhc_imx_hwinit(host);
1738
1739 err = sdhci_add_host(host);
1740 if (err)
1741 goto disable_ahb_clk;
1742
1743 pm_runtime_set_active(&pdev->dev);
1744 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1745 pm_runtime_use_autosuspend(&pdev->dev);
1746 pm_suspend_ignore_children(&pdev->dev, 1);
1747 pm_runtime_enable(&pdev->dev);
1748
1749 return 0;
1750
1751disable_ahb_clk:
1752 clk_disable_unprepare(imx_data->clk_ahb);
1753disable_ipg_clk:
1754 clk_disable_unprepare(imx_data->clk_ipg);
1755disable_per_clk:
1756 clk_disable_unprepare(imx_data->clk_per);
1757free_sdhci:
David Brazdil0f672f62019-12-10 10:32:29 +00001758 if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
Olivier Deprez157378f2022-04-04 15:47:50 +02001759 cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001760 sdhci_pltfm_free(pdev);
1761 return err;
1762}
1763
1764static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
1765{
1766 struct sdhci_host *host = platform_get_drvdata(pdev);
1767 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1768 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
Olivier Deprez0e641232021-09-23 10:07:05 +02001769 int dead;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001770
1771 pm_runtime_get_sync(&pdev->dev);
Olivier Deprez0e641232021-09-23 10:07:05 +02001772 dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001773 pm_runtime_disable(&pdev->dev);
1774 pm_runtime_put_noidle(&pdev->dev);
1775
1776 sdhci_remove_host(host, dead);
1777
1778 clk_disable_unprepare(imx_data->clk_per);
1779 clk_disable_unprepare(imx_data->clk_ipg);
1780 clk_disable_unprepare(imx_data->clk_ahb);
1781
David Brazdil0f672f62019-12-10 10:32:29 +00001782 if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
Olivier Deprez157378f2022-04-04 15:47:50 +02001783 cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
David Brazdil0f672f62019-12-10 10:32:29 +00001784
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001785 sdhci_pltfm_free(pdev);
1786
1787 return 0;
1788}
1789
1790#ifdef CONFIG_PM_SLEEP
1791static int sdhci_esdhc_suspend(struct device *dev)
1792{
1793 struct sdhci_host *host = dev_get_drvdata(dev);
Olivier Deprez157378f2022-04-04 15:47:50 +02001794 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1795 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
David Brazdil0f672f62019-12-10 10:32:29 +00001796 int ret;
1797
1798 if (host->mmc->caps2 & MMC_CAP2_CQE) {
1799 ret = cqhci_suspend(host->mmc);
1800 if (ret)
1801 return ret;
1802 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001803
Olivier Deprez157378f2022-04-04 15:47:50 +02001804 if ((imx_data->socdata->flags & ESDHC_FLAG_STATE_LOST_IN_LPMODE) &&
1805 (host->tuning_mode != SDHCI_TUNING_MODE_1)) {
1806 mmc_retune_timer_stop(host->mmc);
1807 mmc_retune_needed(host->mmc);
1808 }
1809
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001810 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
1811 mmc_retune_needed(host->mmc);
1812
Olivier Deprez157378f2022-04-04 15:47:50 +02001813 ret = sdhci_suspend_host(host);
1814 if (ret)
1815 return ret;
1816
1817 ret = pinctrl_pm_select_sleep_state(dev);
1818 if (ret)
1819 return ret;
1820
1821 ret = mmc_gpio_set_cd_wake(host->mmc, true);
1822
1823 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001824}
1825
1826static int sdhci_esdhc_resume(struct device *dev)
1827{
1828 struct sdhci_host *host = dev_get_drvdata(dev);
David Brazdil0f672f62019-12-10 10:32:29 +00001829 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001830
Olivier Deprez157378f2022-04-04 15:47:50 +02001831 ret = pinctrl_pm_select_default_state(dev);
1832 if (ret)
1833 return ret;
1834
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001835 /* re-initialize hw state in case it's lost in low power mode */
1836 sdhci_esdhc_imx_hwinit(host);
1837
David Brazdil0f672f62019-12-10 10:32:29 +00001838 ret = sdhci_resume_host(host);
1839 if (ret)
1840 return ret;
1841
1842 if (host->mmc->caps2 & MMC_CAP2_CQE)
1843 ret = cqhci_resume(host->mmc);
1844
Olivier Deprez157378f2022-04-04 15:47:50 +02001845 if (!ret)
1846 ret = mmc_gpio_set_cd_wake(host->mmc, false);
1847
David Brazdil0f672f62019-12-10 10:32:29 +00001848 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001849}
1850#endif
1851
1852#ifdef CONFIG_PM
1853static int sdhci_esdhc_runtime_suspend(struct device *dev)
1854{
1855 struct sdhci_host *host = dev_get_drvdata(dev);
1856 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1857 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
1858 int ret;
1859
David Brazdil0f672f62019-12-10 10:32:29 +00001860 if (host->mmc->caps2 & MMC_CAP2_CQE) {
1861 ret = cqhci_suspend(host->mmc);
1862 if (ret)
1863 return ret;
1864 }
1865
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001866 ret = sdhci_runtime_suspend_host(host);
1867 if (ret)
1868 return ret;
1869
1870 if (host->tuning_mode != SDHCI_TUNING_MODE_3)
1871 mmc_retune_needed(host->mmc);
1872
David Brazdil0f672f62019-12-10 10:32:29 +00001873 imx_data->actual_clock = host->mmc->actual_clock;
1874 esdhc_pltfm_set_clock(host, 0);
1875 clk_disable_unprepare(imx_data->clk_per);
1876 clk_disable_unprepare(imx_data->clk_ipg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001877 clk_disable_unprepare(imx_data->clk_ahb);
1878
David Brazdil0f672f62019-12-10 10:32:29 +00001879 if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
Olivier Deprez157378f2022-04-04 15:47:50 +02001880 cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
David Brazdil0f672f62019-12-10 10:32:29 +00001881
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001882 return ret;
1883}
1884
1885static int sdhci_esdhc_runtime_resume(struct device *dev)
1886{
1887 struct sdhci_host *host = dev_get_drvdata(dev);
1888 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1889 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
1890 int err;
1891
David Brazdil0f672f62019-12-10 10:32:29 +00001892 if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
Olivier Deprez157378f2022-04-04 15:47:50 +02001893 cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0);
1894
1895 if (imx_data->socdata->flags & ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME)
1896 clk_set_rate(imx_data->clk_per, pltfm_host->clock);
David Brazdil0f672f62019-12-10 10:32:29 +00001897
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001898 err = clk_prepare_enable(imx_data->clk_ahb);
1899 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +00001900 goto remove_pm_qos_request;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001901
David Brazdil0f672f62019-12-10 10:32:29 +00001902 err = clk_prepare_enable(imx_data->clk_per);
1903 if (err)
1904 goto disable_ahb_clk;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001905
David Brazdil0f672f62019-12-10 10:32:29 +00001906 err = clk_prepare_enable(imx_data->clk_ipg);
1907 if (err)
1908 goto disable_per_clk;
1909
1910 esdhc_pltfm_set_clock(host, imx_data->actual_clock);
1911
1912 err = sdhci_runtime_resume_host(host, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001913 if (err)
1914 goto disable_ipg_clk;
1915
David Brazdil0f672f62019-12-10 10:32:29 +00001916 if (host->mmc->caps2 & MMC_CAP2_CQE)
1917 err = cqhci_resume(host->mmc);
1918
1919 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001920
1921disable_ipg_clk:
David Brazdil0f672f62019-12-10 10:32:29 +00001922 clk_disable_unprepare(imx_data->clk_ipg);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001923disable_per_clk:
David Brazdil0f672f62019-12-10 10:32:29 +00001924 clk_disable_unprepare(imx_data->clk_per);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001925disable_ahb_clk:
1926 clk_disable_unprepare(imx_data->clk_ahb);
David Brazdil0f672f62019-12-10 10:32:29 +00001927remove_pm_qos_request:
1928 if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
Olivier Deprez157378f2022-04-04 15:47:50 +02001929 cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001930 return err;
1931}
1932#endif
1933
1934static const struct dev_pm_ops sdhci_esdhc_pmops = {
1935 SET_SYSTEM_SLEEP_PM_OPS(sdhci_esdhc_suspend, sdhci_esdhc_resume)
1936 SET_RUNTIME_PM_OPS(sdhci_esdhc_runtime_suspend,
1937 sdhci_esdhc_runtime_resume, NULL)
1938};
1939
1940static struct platform_driver sdhci_esdhc_imx_driver = {
1941 .driver = {
1942 .name = "sdhci-esdhc-imx",
Olivier Deprez157378f2022-04-04 15:47:50 +02001943 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001944 .of_match_table = imx_esdhc_dt_ids,
1945 .pm = &sdhci_esdhc_pmops,
1946 },
1947 .id_table = imx_esdhc_devtype,
1948 .probe = sdhci_esdhc_imx_probe,
1949 .remove = sdhci_esdhc_imx_remove,
1950};
1951
1952module_platform_driver(sdhci_esdhc_imx_driver);
1953
1954MODULE_DESCRIPTION("SDHCI driver for Freescale i.MX eSDHC");
1955MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>");
1956MODULE_LICENSE("GPL v2");