blob: 7e4bc9124efdea18c7da1d09510ff8af7ad85cf1 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 *
5 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
6 * Copyright (C) 2010 ST-Ericsson SA
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007 */
8#include <linux/module.h>
9#include <linux/moduleparam.h>
10#include <linux/init.h>
11#include <linux/ioport.h>
12#include <linux/device.h>
13#include <linux/io.h>
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/delay.h>
18#include <linux/err.h>
19#include <linux/highmem.h>
20#include <linux/log2.h>
David Brazdil0f672f62019-12-10 10:32:29 +000021#include <linux/mmc/mmc.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000022#include <linux/mmc/pm.h>
23#include <linux/mmc/host.h>
24#include <linux/mmc/card.h>
25#include <linux/mmc/slot-gpio.h>
26#include <linux/amba/bus.h>
27#include <linux/clk.h>
28#include <linux/scatterlist.h>
David Brazdil0f672f62019-12-10 10:32:29 +000029#include <linux/of.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000030#include <linux/regulator/consumer.h>
31#include <linux/dmaengine.h>
32#include <linux/dma-mapping.h>
33#include <linux/amba/mmci.h>
34#include <linux/pm_runtime.h>
35#include <linux/types.h>
36#include <linux/pinctrl/consumer.h>
David Brazdil0f672f62019-12-10 10:32:29 +000037#include <linux/reset.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000038
39#include <asm/div64.h>
40#include <asm/io.h>
41
42#include "mmci.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000043
44#define DRIVER_NAME "mmci-pl18x"
45
David Brazdil0f672f62019-12-10 10:32:29 +000046static void mmci_variant_init(struct mmci_host *host);
47static void ux500v2_variant_init(struct mmci_host *host);
48
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000049static unsigned int fmax = 515633;
50
51static struct variant_data variant_arm = {
52 .fifosize = 16 * 4,
53 .fifohalfsize = 8 * 4,
David Brazdil0f672f62019-12-10 10:32:29 +000054 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
55 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
56 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
57 .cmdreg_srsp = MCI_CPSM_RESPONSE,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000058 .datalength_bits = 16,
David Brazdil0f672f62019-12-10 10:32:29 +000059 .datactrl_blocksz = 11,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000060 .pwrreg_powerup = MCI_PWR_UP,
61 .f_max = 100000000,
62 .reversed_irq_handling = true,
63 .mmcimask1 = true,
David Brazdil0f672f62019-12-10 10:32:29 +000064 .irq_pio_mask = MCI_IRQ_PIO_MASK,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000065 .start_err = MCI_STARTBITERR,
66 .opendrain = MCI_ROD,
David Brazdil0f672f62019-12-10 10:32:29 +000067 .init = mmci_variant_init,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068};
69
70static struct variant_data variant_arm_extended_fifo = {
71 .fifosize = 128 * 4,
72 .fifohalfsize = 64 * 4,
David Brazdil0f672f62019-12-10 10:32:29 +000073 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
74 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
75 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
76 .cmdreg_srsp = MCI_CPSM_RESPONSE,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000077 .datalength_bits = 16,
David Brazdil0f672f62019-12-10 10:32:29 +000078 .datactrl_blocksz = 11,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079 .pwrreg_powerup = MCI_PWR_UP,
80 .f_max = 100000000,
81 .mmcimask1 = true,
David Brazdil0f672f62019-12-10 10:32:29 +000082 .irq_pio_mask = MCI_IRQ_PIO_MASK,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083 .start_err = MCI_STARTBITERR,
84 .opendrain = MCI_ROD,
David Brazdil0f672f62019-12-10 10:32:29 +000085 .init = mmci_variant_init,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086};
87
88static struct variant_data variant_arm_extended_fifo_hwfc = {
89 .fifosize = 128 * 4,
90 .fifohalfsize = 64 * 4,
91 .clkreg_enable = MCI_ARM_HWFCEN,
David Brazdil0f672f62019-12-10 10:32:29 +000092 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
93 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
94 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
95 .cmdreg_srsp = MCI_CPSM_RESPONSE,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096 .datalength_bits = 16,
David Brazdil0f672f62019-12-10 10:32:29 +000097 .datactrl_blocksz = 11,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098 .pwrreg_powerup = MCI_PWR_UP,
99 .f_max = 100000000,
100 .mmcimask1 = true,
David Brazdil0f672f62019-12-10 10:32:29 +0000101 .irq_pio_mask = MCI_IRQ_PIO_MASK,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000102 .start_err = MCI_STARTBITERR,
103 .opendrain = MCI_ROD,
David Brazdil0f672f62019-12-10 10:32:29 +0000104 .init = mmci_variant_init,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000105};
106
107static struct variant_data variant_u300 = {
108 .fifosize = 16 * 4,
109 .fifohalfsize = 8 * 4,
110 .clkreg_enable = MCI_ST_U300_HWFCEN,
111 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
David Brazdil0f672f62019-12-10 10:32:29 +0000112 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
113 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
114 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
115 .cmdreg_srsp = MCI_CPSM_RESPONSE,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116 .datalength_bits = 16,
David Brazdil0f672f62019-12-10 10:32:29 +0000117 .datactrl_blocksz = 11,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
119 .st_sdio = true,
120 .pwrreg_powerup = MCI_PWR_ON,
121 .f_max = 100000000,
122 .signal_direction = true,
123 .pwrreg_clkgate = true,
124 .pwrreg_nopower = true,
125 .mmcimask1 = true,
David Brazdil0f672f62019-12-10 10:32:29 +0000126 .irq_pio_mask = MCI_IRQ_PIO_MASK,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000127 .start_err = MCI_STARTBITERR,
128 .opendrain = MCI_OD,
David Brazdil0f672f62019-12-10 10:32:29 +0000129 .init = mmci_variant_init,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130};
131
132static struct variant_data variant_nomadik = {
133 .fifosize = 16 * 4,
134 .fifohalfsize = 8 * 4,
135 .clkreg = MCI_CLK_ENABLE,
136 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
David Brazdil0f672f62019-12-10 10:32:29 +0000137 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
138 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
139 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
140 .cmdreg_srsp = MCI_CPSM_RESPONSE,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000141 .datalength_bits = 24,
David Brazdil0f672f62019-12-10 10:32:29 +0000142 .datactrl_blocksz = 11,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000143 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
144 .st_sdio = true,
145 .st_clkdiv = true,
146 .pwrreg_powerup = MCI_PWR_ON,
147 .f_max = 100000000,
148 .signal_direction = true,
149 .pwrreg_clkgate = true,
150 .pwrreg_nopower = true,
151 .mmcimask1 = true,
David Brazdil0f672f62019-12-10 10:32:29 +0000152 .irq_pio_mask = MCI_IRQ_PIO_MASK,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000153 .start_err = MCI_STARTBITERR,
154 .opendrain = MCI_OD,
David Brazdil0f672f62019-12-10 10:32:29 +0000155 .init = mmci_variant_init,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000156};
157
158static struct variant_data variant_ux500 = {
159 .fifosize = 30 * 4,
160 .fifohalfsize = 8 * 4,
161 .clkreg = MCI_CLK_ENABLE,
162 .clkreg_enable = MCI_ST_UX500_HWFCEN,
163 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
164 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
David Brazdil0f672f62019-12-10 10:32:29 +0000165 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
166 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
167 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
168 .cmdreg_srsp = MCI_CPSM_RESPONSE,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000169 .datalength_bits = 24,
David Brazdil0f672f62019-12-10 10:32:29 +0000170 .datactrl_blocksz = 11,
Olivier Deprez0e641232021-09-23 10:07:05 +0200171 .datactrl_any_blocksz = true,
172 .dma_power_of_2 = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000173 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
174 .st_sdio = true,
175 .st_clkdiv = true,
176 .pwrreg_powerup = MCI_PWR_ON,
177 .f_max = 100000000,
178 .signal_direction = true,
179 .pwrreg_clkgate = true,
180 .busy_detect = true,
181 .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
182 .busy_detect_flag = MCI_ST_CARDBUSY,
183 .busy_detect_mask = MCI_ST_BUSYENDMASK,
184 .pwrreg_nopower = true,
185 .mmcimask1 = true,
David Brazdil0f672f62019-12-10 10:32:29 +0000186 .irq_pio_mask = MCI_IRQ_PIO_MASK,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000187 .start_err = MCI_STARTBITERR,
188 .opendrain = MCI_OD,
David Brazdil0f672f62019-12-10 10:32:29 +0000189 .init = mmci_variant_init,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000190};
191
192static struct variant_data variant_ux500v2 = {
193 .fifosize = 30 * 4,
194 .fifohalfsize = 8 * 4,
195 .clkreg = MCI_CLK_ENABLE,
196 .clkreg_enable = MCI_ST_UX500_HWFCEN,
197 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
198 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
David Brazdil0f672f62019-12-10 10:32:29 +0000199 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
200 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
201 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
202 .cmdreg_srsp = MCI_CPSM_RESPONSE,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000203 .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
204 .datalength_bits = 24,
David Brazdil0f672f62019-12-10 10:32:29 +0000205 .datactrl_blocksz = 11,
Olivier Deprez0e641232021-09-23 10:07:05 +0200206 .datactrl_any_blocksz = true,
207 .dma_power_of_2 = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000208 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
209 .st_sdio = true,
210 .st_clkdiv = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000211 .pwrreg_powerup = MCI_PWR_ON,
212 .f_max = 100000000,
213 .signal_direction = true,
214 .pwrreg_clkgate = true,
215 .busy_detect = true,
216 .busy_dpsm_flag = MCI_DPSM_ST_BUSYMODE,
217 .busy_detect_flag = MCI_ST_CARDBUSY,
218 .busy_detect_mask = MCI_ST_BUSYENDMASK,
219 .pwrreg_nopower = true,
220 .mmcimask1 = true,
David Brazdil0f672f62019-12-10 10:32:29 +0000221 .irq_pio_mask = MCI_IRQ_PIO_MASK,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000222 .start_err = MCI_STARTBITERR,
223 .opendrain = MCI_OD,
David Brazdil0f672f62019-12-10 10:32:29 +0000224 .init = ux500v2_variant_init,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000225};
226
227static struct variant_data variant_stm32 = {
228 .fifosize = 32 * 4,
229 .fifohalfsize = 8 * 4,
230 .clkreg = MCI_CLK_ENABLE,
231 .clkreg_enable = MCI_ST_UX500_HWFCEN,
232 .clkreg_8bit_bus_enable = MCI_ST_8BIT_BUS,
233 .clkreg_neg_edge_enable = MCI_ST_UX500_NEG_EDGE,
David Brazdil0f672f62019-12-10 10:32:29 +0000234 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
235 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
236 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
237 .cmdreg_srsp = MCI_CPSM_RESPONSE,
238 .irq_pio_mask = MCI_IRQ_PIO_MASK,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000239 .datalength_bits = 24,
David Brazdil0f672f62019-12-10 10:32:29 +0000240 .datactrl_blocksz = 11,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000241 .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
242 .st_sdio = true,
243 .st_clkdiv = true,
244 .pwrreg_powerup = MCI_PWR_ON,
245 .f_max = 48000000,
246 .pwrreg_clkgate = true,
247 .pwrreg_nopower = true,
David Brazdil0f672f62019-12-10 10:32:29 +0000248 .init = mmci_variant_init,
249};
250
251static struct variant_data variant_stm32_sdmmc = {
252 .fifosize = 16 * 4,
253 .fifohalfsize = 8 * 4,
254 .f_max = 208000000,
255 .stm32_clkdiv = true,
256 .cmdreg_cpsm_enable = MCI_CPSM_STM32_ENABLE,
257 .cmdreg_lrsp_crc = MCI_CPSM_STM32_LRSP_CRC,
258 .cmdreg_srsp_crc = MCI_CPSM_STM32_SRSP_CRC,
259 .cmdreg_srsp = MCI_CPSM_STM32_SRSP,
260 .cmdreg_stop = MCI_CPSM_STM32_CMDSTOP,
261 .data_cmd_enable = MCI_CPSM_STM32_CMDTRANS,
262 .irq_pio_mask = MCI_IRQ_PIO_STM32_MASK,
263 .datactrl_first = true,
264 .datacnt_useless = true,
265 .datalength_bits = 25,
266 .datactrl_blocksz = 14,
Olivier Deprez0e641232021-09-23 10:07:05 +0200267 .datactrl_any_blocksz = true,
David Brazdil0f672f62019-12-10 10:32:29 +0000268 .stm32_idmabsize_mask = GENMASK(12, 5),
269 .init = sdmmc_variant_init,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000270};
271
272static struct variant_data variant_qcom = {
273 .fifosize = 16 * 4,
274 .fifohalfsize = 8 * 4,
275 .clkreg = MCI_CLK_ENABLE,
276 .clkreg_enable = MCI_QCOM_CLK_FLOWENA |
277 MCI_QCOM_CLK_SELECT_IN_FBCLK,
278 .clkreg_8bit_bus_enable = MCI_QCOM_CLK_WIDEBUS_8,
279 .datactrl_mask_ddrmode = MCI_QCOM_CLK_SELECT_IN_DDR_MODE,
David Brazdil0f672f62019-12-10 10:32:29 +0000280 .cmdreg_cpsm_enable = MCI_CPSM_ENABLE,
281 .cmdreg_lrsp_crc = MCI_CPSM_RESPONSE | MCI_CPSM_LONGRSP,
282 .cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
283 .cmdreg_srsp = MCI_CPSM_RESPONSE,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000284 .data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000285 .datalength_bits = 24,
David Brazdil0f672f62019-12-10 10:32:29 +0000286 .datactrl_blocksz = 11,
Olivier Deprez0e641232021-09-23 10:07:05 +0200287 .datactrl_any_blocksz = true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000288 .pwrreg_powerup = MCI_PWR_UP,
289 .f_max = 208000000,
290 .explicit_mclk_control = true,
291 .qcom_fifo = true,
292 .qcom_dml = true,
293 .mmcimask1 = true,
David Brazdil0f672f62019-12-10 10:32:29 +0000294 .irq_pio_mask = MCI_IRQ_PIO_MASK,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000295 .start_err = MCI_STARTBITERR,
296 .opendrain = MCI_ROD,
297 .init = qcom_variant_init,
298};
299
300/* Busy detection for the ST Micro variant */
301static int mmci_card_busy(struct mmc_host *mmc)
302{
303 struct mmci_host *host = mmc_priv(mmc);
304 unsigned long flags;
305 int busy = 0;
306
307 spin_lock_irqsave(&host->lock, flags);
308 if (readl(host->base + MMCISTATUS) & host->variant->busy_detect_flag)
309 busy = 1;
310 spin_unlock_irqrestore(&host->lock, flags);
311
312 return busy;
313}
314
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000315static void mmci_reg_delay(struct mmci_host *host)
316{
317 /*
318 * According to the spec, at least three feedback clock cycles
319 * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
320 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
321 * Worst delay time during card init is at 100 kHz => 30 us.
322 * Worst delay time when up and running is at 25 MHz => 120 ns.
323 */
324 if (host->cclk < 25000000)
325 udelay(30);
326 else
327 ndelay(120);
328}
329
330/*
331 * This must be called with host->lock held
332 */
David Brazdil0f672f62019-12-10 10:32:29 +0000333void mmci_write_clkreg(struct mmci_host *host, u32 clk)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000334{
335 if (host->clk_reg != clk) {
336 host->clk_reg = clk;
337 writel(clk, host->base + MMCICLOCK);
338 }
339}
340
341/*
342 * This must be called with host->lock held
343 */
David Brazdil0f672f62019-12-10 10:32:29 +0000344void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000345{
346 if (host->pwr_reg != pwr) {
347 host->pwr_reg = pwr;
348 writel(pwr, host->base + MMCIPOWER);
349 }
350}
351
352/*
353 * This must be called with host->lock held
354 */
355static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
356{
357 /* Keep busy mode in DPSM if enabled */
358 datactrl |= host->datactrl_reg & host->variant->busy_dpsm_flag;
359
360 if (host->datactrl_reg != datactrl) {
361 host->datactrl_reg = datactrl;
362 writel(datactrl, host->base + MMCIDATACTRL);
363 }
364}
365
366/*
367 * This must be called with host->lock held
368 */
369static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
370{
371 struct variant_data *variant = host->variant;
372 u32 clk = variant->clkreg;
373
374 /* Make sure cclk reflects the current calculated clock */
375 host->cclk = 0;
376
377 if (desired) {
378 if (variant->explicit_mclk_control) {
379 host->cclk = host->mclk;
380 } else if (desired >= host->mclk) {
381 clk = MCI_CLK_BYPASS;
382 if (variant->st_clkdiv)
383 clk |= MCI_ST_UX500_NEG_EDGE;
384 host->cclk = host->mclk;
385 } else if (variant->st_clkdiv) {
386 /*
387 * DB8500 TRM says f = mclk / (clkdiv + 2)
388 * => clkdiv = (mclk / f) - 2
389 * Round the divider up so we don't exceed the max
390 * frequency
391 */
392 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
393 if (clk >= 256)
394 clk = 255;
395 host->cclk = host->mclk / (clk + 2);
396 } else {
397 /*
398 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
399 * => clkdiv = mclk / (2 * f) - 1
400 */
401 clk = host->mclk / (2 * desired) - 1;
402 if (clk >= 256)
403 clk = 255;
404 host->cclk = host->mclk / (2 * (clk + 1));
405 }
406
407 clk |= variant->clkreg_enable;
408 clk |= MCI_CLK_ENABLE;
409 /* This hasn't proven to be worthwhile */
410 /* clk |= MCI_CLK_PWRSAVE; */
411 }
412
413 /* Set actual clock for debug */
414 host->mmc->actual_clock = host->cclk;
415
416 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
417 clk |= MCI_4BIT_BUS;
418 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
419 clk |= variant->clkreg_8bit_bus_enable;
420
421 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
422 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
423 clk |= variant->clkreg_neg_edge_enable;
424
425 mmci_write_clkreg(host, clk);
426}
427
David Brazdil0f672f62019-12-10 10:32:29 +0000428void mmci_dma_release(struct mmci_host *host)
429{
430 if (host->ops && host->ops->dma_release)
431 host->ops->dma_release(host);
432
433 host->use_dma = false;
434}
435
436void mmci_dma_setup(struct mmci_host *host)
437{
438 if (!host->ops || !host->ops->dma_setup)
439 return;
440
441 if (host->ops->dma_setup(host))
442 return;
443
444 /* initialize pre request cookie */
445 host->next_cookie = 1;
446
447 host->use_dma = true;
448}
449
450/*
451 * Validate mmc prerequisites
452 */
453static int mmci_validate_data(struct mmci_host *host,
454 struct mmc_data *data)
455{
Olivier Deprez0e641232021-09-23 10:07:05 +0200456 struct variant_data *variant = host->variant;
457
David Brazdil0f672f62019-12-10 10:32:29 +0000458 if (!data)
459 return 0;
Olivier Deprez0e641232021-09-23 10:07:05 +0200460 if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) {
David Brazdil0f672f62019-12-10 10:32:29 +0000461 dev_err(mmc_dev(host->mmc),
462 "unsupported block size (%d bytes)\n", data->blksz);
463 return -EINVAL;
464 }
465
466 if (host->ops && host->ops->validate_data)
467 return host->ops->validate_data(host, data);
468
469 return 0;
470}
471
472int mmci_prep_data(struct mmci_host *host, struct mmc_data *data, bool next)
473{
474 int err;
475
476 if (!host->ops || !host->ops->prep_data)
477 return 0;
478
479 err = host->ops->prep_data(host, data, next);
480
481 if (next && !err)
482 data->host_cookie = ++host->next_cookie < 0 ?
483 1 : host->next_cookie;
484
485 return err;
486}
487
488void mmci_unprep_data(struct mmci_host *host, struct mmc_data *data,
489 int err)
490{
491 if (host->ops && host->ops->unprep_data)
492 host->ops->unprep_data(host, data, err);
493
494 data->host_cookie = 0;
495}
496
497void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
498{
499 WARN_ON(data->host_cookie && data->host_cookie != host->next_cookie);
500
501 if (host->ops && host->ops->get_next_data)
502 host->ops->get_next_data(host, data);
503}
504
505int mmci_dma_start(struct mmci_host *host, unsigned int datactrl)
506{
507 struct mmc_data *data = host->data;
508 int ret;
509
510 if (!host->use_dma)
511 return -EINVAL;
512
513 ret = mmci_prep_data(host, data, false);
514 if (ret)
515 return ret;
516
517 if (!host->ops || !host->ops->dma_start)
518 return -EINVAL;
519
520 /* Okay, go for it. */
521 dev_vdbg(mmc_dev(host->mmc),
522 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
523 data->sg_len, data->blksz, data->blocks, data->flags);
524
Olivier Deprez0e641232021-09-23 10:07:05 +0200525 ret = host->ops->dma_start(host, &datactrl);
526 if (ret)
527 return ret;
David Brazdil0f672f62019-12-10 10:32:29 +0000528
529 /* Trigger the DMA transfer */
530 mmci_write_datactrlreg(host, datactrl);
531
532 /*
533 * Let the MMCI say when the data is ended and it's time
534 * to fire next DMA request. When that happens, MMCI will
535 * call mmci_data_end()
536 */
537 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
538 host->base + MMCIMASK0);
539 return 0;
540}
541
542void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
543{
544 if (!host->use_dma)
545 return;
546
547 if (host->ops && host->ops->dma_finalize)
548 host->ops->dma_finalize(host, data);
549}
550
551void mmci_dma_error(struct mmci_host *host)
552{
553 if (!host->use_dma)
554 return;
555
556 if (host->ops && host->ops->dma_error)
557 host->ops->dma_error(host);
558}
559
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000560static void
561mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
562{
563 writel(0, host->base + MMCICOMMAND);
564
565 BUG_ON(host->data);
566
567 host->mrq = NULL;
568 host->cmd = NULL;
569
570 mmc_request_done(host->mmc, mrq);
571}
572
573static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
574{
575 void __iomem *base = host->base;
576 struct variant_data *variant = host->variant;
577
578 if (host->singleirq) {
579 unsigned int mask0 = readl(base + MMCIMASK0);
580
David Brazdil0f672f62019-12-10 10:32:29 +0000581 mask0 &= ~variant->irq_pio_mask;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000582 mask0 |= mask;
583
584 writel(mask0, base + MMCIMASK0);
585 }
586
587 if (variant->mmcimask1)
588 writel(mask, base + MMCIMASK1);
589
590 host->mask1_reg = mask;
591}
592
593static void mmci_stop_data(struct mmci_host *host)
594{
595 mmci_write_datactrlreg(host, 0);
596 mmci_set_mask1(host, 0);
597 host->data = NULL;
598}
599
600static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
601{
602 unsigned int flags = SG_MITER_ATOMIC;
603
604 if (data->flags & MMC_DATA_READ)
605 flags |= SG_MITER_TO_SG;
606 else
607 flags |= SG_MITER_FROM_SG;
608
609 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
610}
611
David Brazdil0f672f62019-12-10 10:32:29 +0000612static u32 mmci_get_dctrl_cfg(struct mmci_host *host)
613{
614 return MCI_DPSM_ENABLE | mmci_dctrl_blksz(host);
615}
616
617static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
618{
619 return MCI_DPSM_ENABLE | (host->data->blksz << 16);
620}
621
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000622/*
623 * All the DMA operation mode stuff goes inside this ifdef.
624 * This assumes that you have a generic DMA device interface,
625 * no custom DMA interfaces are supported.
626 */
627#ifdef CONFIG_DMA_ENGINE
David Brazdil0f672f62019-12-10 10:32:29 +0000628struct mmci_dmae_next {
629 struct dma_async_tx_descriptor *desc;
630 struct dma_chan *chan;
631};
632
633struct mmci_dmae_priv {
634 struct dma_chan *cur;
635 struct dma_chan *rx_channel;
636 struct dma_chan *tx_channel;
637 struct dma_async_tx_descriptor *desc_current;
638 struct mmci_dmae_next next_data;
639};
640
641int mmci_dmae_setup(struct mmci_host *host)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000642{
643 const char *rxname, *txname;
David Brazdil0f672f62019-12-10 10:32:29 +0000644 struct mmci_dmae_priv *dmae;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000645
David Brazdil0f672f62019-12-10 10:32:29 +0000646 dmae = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dmae), GFP_KERNEL);
647 if (!dmae)
648 return -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000649
David Brazdil0f672f62019-12-10 10:32:29 +0000650 host->dma_priv = dmae;
651
652 dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
653 "rx");
654 dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc),
655 "tx");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000656
657 /*
658 * If only an RX channel is specified, the driver will
659 * attempt to use it bidirectionally, however if it is
660 * is specified but cannot be located, DMA will be disabled.
661 */
David Brazdil0f672f62019-12-10 10:32:29 +0000662 if (dmae->rx_channel && !dmae->tx_channel)
663 dmae->tx_channel = dmae->rx_channel;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000664
David Brazdil0f672f62019-12-10 10:32:29 +0000665 if (dmae->rx_channel)
666 rxname = dma_chan_name(dmae->rx_channel);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000667 else
668 rxname = "none";
669
David Brazdil0f672f62019-12-10 10:32:29 +0000670 if (dmae->tx_channel)
671 txname = dma_chan_name(dmae->tx_channel);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000672 else
673 txname = "none";
674
675 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
676 rxname, txname);
677
678 /*
679 * Limit the maximum segment size in any SG entry according to
680 * the parameters of the DMA engine device.
681 */
David Brazdil0f672f62019-12-10 10:32:29 +0000682 if (dmae->tx_channel) {
683 struct device *dev = dmae->tx_channel->device->dev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000684 unsigned int max_seg_size = dma_get_max_seg_size(dev);
685
686 if (max_seg_size < host->mmc->max_seg_size)
687 host->mmc->max_seg_size = max_seg_size;
688 }
David Brazdil0f672f62019-12-10 10:32:29 +0000689 if (dmae->rx_channel) {
690 struct device *dev = dmae->rx_channel->device->dev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000691 unsigned int max_seg_size = dma_get_max_seg_size(dev);
692
693 if (max_seg_size < host->mmc->max_seg_size)
694 host->mmc->max_seg_size = max_seg_size;
695 }
696
David Brazdil0f672f62019-12-10 10:32:29 +0000697 if (!dmae->tx_channel || !dmae->rx_channel) {
698 mmci_dmae_release(host);
699 return -EINVAL;
700 }
701
702 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000703}
704
705/*
706 * This is used in or so inline it
707 * so it can be discarded.
708 */
David Brazdil0f672f62019-12-10 10:32:29 +0000709void mmci_dmae_release(struct mmci_host *host)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000710{
David Brazdil0f672f62019-12-10 10:32:29 +0000711 struct mmci_dmae_priv *dmae = host->dma_priv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000712
David Brazdil0f672f62019-12-10 10:32:29 +0000713 if (dmae->rx_channel)
714 dma_release_channel(dmae->rx_channel);
715 if (dmae->tx_channel)
716 dma_release_channel(dmae->tx_channel);
717 dmae->rx_channel = dmae->tx_channel = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000718}
719
720static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
721{
David Brazdil0f672f62019-12-10 10:32:29 +0000722 struct mmci_dmae_priv *dmae = host->dma_priv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000723 struct dma_chan *chan;
724
725 if (data->flags & MMC_DATA_READ)
David Brazdil0f672f62019-12-10 10:32:29 +0000726 chan = dmae->rx_channel;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000727 else
David Brazdil0f672f62019-12-10 10:32:29 +0000728 chan = dmae->tx_channel;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000729
730 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
731 mmc_get_dma_dir(data));
732}
733
David Brazdil0f672f62019-12-10 10:32:29 +0000734void mmci_dmae_error(struct mmci_host *host)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000735{
David Brazdil0f672f62019-12-10 10:32:29 +0000736 struct mmci_dmae_priv *dmae = host->dma_priv;
737
738 if (!dma_inprogress(host))
739 return;
740
741 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
742 dmaengine_terminate_all(dmae->cur);
743 host->dma_in_progress = false;
744 dmae->cur = NULL;
745 dmae->desc_current = NULL;
746 host->data->host_cookie = 0;
747
748 mmci_dma_unmap(host, host->data);
749}
750
751void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data)
752{
753 struct mmci_dmae_priv *dmae = host->dma_priv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000754 u32 status;
755 int i;
756
David Brazdil0f672f62019-12-10 10:32:29 +0000757 if (!dma_inprogress(host))
758 return;
759
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000760 /* Wait up to 1ms for the DMA to complete */
761 for (i = 0; ; i++) {
762 status = readl(host->base + MMCISTATUS);
763 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
764 break;
765 udelay(10);
766 }
767
768 /*
769 * Check to see whether we still have some data left in the FIFO -
770 * this catches DMA controllers which are unable to monitor the
771 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
772 * contiguous buffers. On TX, we'll get a FIFO underrun error.
773 */
774 if (status & MCI_RXDATAAVLBLMASK) {
David Brazdil0f672f62019-12-10 10:32:29 +0000775 mmci_dma_error(host);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000776 if (!data->error)
777 data->error = -EIO;
David Brazdil0f672f62019-12-10 10:32:29 +0000778 } else if (!data->host_cookie) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000779 mmci_dma_unmap(host, data);
David Brazdil0f672f62019-12-10 10:32:29 +0000780 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000781
782 /*
783 * Use of DMA with scatter-gather is impossible.
784 * Give up with DMA and switch back to PIO mode.
785 */
786 if (status & MCI_RXDATAAVLBLMASK) {
787 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
788 mmci_dma_release(host);
789 }
790
791 host->dma_in_progress = false;
David Brazdil0f672f62019-12-10 10:32:29 +0000792 dmae->cur = NULL;
793 dmae->desc_current = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000794}
795
796/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
David Brazdil0f672f62019-12-10 10:32:29 +0000797static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000798 struct dma_chan **dma_chan,
799 struct dma_async_tx_descriptor **dma_desc)
800{
David Brazdil0f672f62019-12-10 10:32:29 +0000801 struct mmci_dmae_priv *dmae = host->dma_priv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000802 struct variant_data *variant = host->variant;
803 struct dma_slave_config conf = {
804 .src_addr = host->phybase + MMCIFIFO,
805 .dst_addr = host->phybase + MMCIFIFO,
806 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
807 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
808 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
809 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
810 .device_fc = false,
811 };
812 struct dma_chan *chan;
813 struct dma_device *device;
814 struct dma_async_tx_descriptor *desc;
815 int nr_sg;
816 unsigned long flags = DMA_CTRL_ACK;
817
818 if (data->flags & MMC_DATA_READ) {
819 conf.direction = DMA_DEV_TO_MEM;
David Brazdil0f672f62019-12-10 10:32:29 +0000820 chan = dmae->rx_channel;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000821 } else {
822 conf.direction = DMA_MEM_TO_DEV;
David Brazdil0f672f62019-12-10 10:32:29 +0000823 chan = dmae->tx_channel;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000824 }
825
826 /* If there's no DMA channel, fall back to PIO */
827 if (!chan)
828 return -EINVAL;
829
830 /* If less than or equal to the fifo size, don't bother with DMA */
831 if (data->blksz * data->blocks <= variant->fifosize)
832 return -EINVAL;
833
Olivier Deprez0e641232021-09-23 10:07:05 +0200834 /*
835 * This is necessary to get SDIO working on the Ux500. We do not yet
836 * know if this is a bug in:
837 * - The Ux500 DMA controller (DMA40)
838 * - The MMCI DMA interface on the Ux500
839 * some power of two blocks (such as 64 bytes) are sent regularly
840 * during SDIO traffic and those work fine so for these we enable DMA
841 * transfers.
842 */
843 if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz))
844 return -EINVAL;
845
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000846 device = chan->device;
847 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
848 mmc_get_dma_dir(data));
849 if (nr_sg == 0)
850 return -EINVAL;
851
852 if (host->variant->qcom_dml)
853 flags |= DMA_PREP_INTERRUPT;
854
855 dmaengine_slave_config(chan, &conf);
856 desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
857 conf.direction, flags);
858 if (!desc)
859 goto unmap_exit;
860
861 *dma_chan = chan;
862 *dma_desc = desc;
863
864 return 0;
865
866 unmap_exit:
867 dma_unmap_sg(device->dev, data->sg, data->sg_len,
868 mmc_get_dma_dir(data));
869 return -ENOMEM;
870}
871
David Brazdil0f672f62019-12-10 10:32:29 +0000872int mmci_dmae_prep_data(struct mmci_host *host,
873 struct mmc_data *data,
874 bool next)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000875{
David Brazdil0f672f62019-12-10 10:32:29 +0000876 struct mmci_dmae_priv *dmae = host->dma_priv;
877 struct mmci_dmae_next *nd = &dmae->next_data;
878
879 if (!host->use_dma)
880 return -EINVAL;
881
882 if (next)
883 return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000884 /* Check if next job is already prepared. */
David Brazdil0f672f62019-12-10 10:32:29 +0000885 if (dmae->cur && dmae->desc_current)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000886 return 0;
887
888 /* No job were prepared thus do it now. */
David Brazdil0f672f62019-12-10 10:32:29 +0000889 return _mmci_dmae_prep_data(host, data, &dmae->cur,
890 &dmae->desc_current);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000891}
892
David Brazdil0f672f62019-12-10 10:32:29 +0000893int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000894{
David Brazdil0f672f62019-12-10 10:32:29 +0000895 struct mmci_dmae_priv *dmae = host->dma_priv;
Olivier Deprez0e641232021-09-23 10:07:05 +0200896 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000897
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000898 host->dma_in_progress = true;
Olivier Deprez0e641232021-09-23 10:07:05 +0200899 ret = dma_submit_error(dmaengine_submit(dmae->desc_current));
900 if (ret < 0) {
901 host->dma_in_progress = false;
902 return ret;
903 }
David Brazdil0f672f62019-12-10 10:32:29 +0000904 dma_async_issue_pending(dmae->cur);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000905
David Brazdil0f672f62019-12-10 10:32:29 +0000906 *datactrl |= MCI_DPSM_DMAENABLE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000907
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000908 return 0;
909}
910
David Brazdil0f672f62019-12-10 10:32:29 +0000911void mmci_dmae_get_next_data(struct mmci_host *host, struct mmc_data *data)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000912{
David Brazdil0f672f62019-12-10 10:32:29 +0000913 struct mmci_dmae_priv *dmae = host->dma_priv;
914 struct mmci_dmae_next *next = &dmae->next_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000915
David Brazdil0f672f62019-12-10 10:32:29 +0000916 if (!host->use_dma)
917 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000918
David Brazdil0f672f62019-12-10 10:32:29 +0000919 WARN_ON(!data->host_cookie && (next->desc || next->chan));
920
921 dmae->desc_current = next->desc;
922 dmae->cur = next->chan;
923 next->desc = NULL;
924 next->chan = NULL;
925}
926
927void mmci_dmae_unprep_data(struct mmci_host *host,
928 struct mmc_data *data, int err)
929
930{
931 struct mmci_dmae_priv *dmae = host->dma_priv;
932
933 if (!host->use_dma)
934 return;
935
936 mmci_dma_unmap(host, data);
937
938 if (err) {
939 struct mmci_dmae_next *next = &dmae->next_data;
940 struct dma_chan *chan;
941 if (data->flags & MMC_DATA_READ)
942 chan = dmae->rx_channel;
943 else
944 chan = dmae->tx_channel;
945 dmaengine_terminate_all(chan);
946
947 if (dmae->desc_current == next->desc)
948 dmae->desc_current = NULL;
949
950 if (dmae->cur == next->chan) {
951 host->dma_in_progress = false;
952 dmae->cur = NULL;
953 }
954
955 next->desc = NULL;
956 next->chan = NULL;
957 }
958}
959
960static struct mmci_host_ops mmci_variant_ops = {
961 .prep_data = mmci_dmae_prep_data,
962 .unprep_data = mmci_dmae_unprep_data,
963 .get_datactrl_cfg = mmci_get_dctrl_cfg,
964 .get_next_data = mmci_dmae_get_next_data,
965 .dma_setup = mmci_dmae_setup,
966 .dma_release = mmci_dmae_release,
967 .dma_start = mmci_dmae_start,
968 .dma_finalize = mmci_dmae_finalize,
969 .dma_error = mmci_dmae_error,
970};
971#else
972static struct mmci_host_ops mmci_variant_ops = {
973 .get_datactrl_cfg = mmci_get_dctrl_cfg,
974};
975#endif
976
977void mmci_variant_init(struct mmci_host *host)
978{
979 host->ops = &mmci_variant_ops;
980}
981
982void ux500v2_variant_init(struct mmci_host *host)
983{
984 host->ops = &mmci_variant_ops;
985 host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000986}
987
988static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
989{
990 struct mmci_host *host = mmc_priv(mmc);
991 struct mmc_data *data = mrq->data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000992
993 if (!data)
994 return;
995
David Brazdil0f672f62019-12-10 10:32:29 +0000996 WARN_ON(data->host_cookie);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000997
998 if (mmci_validate_data(host, data))
999 return;
1000
David Brazdil0f672f62019-12-10 10:32:29 +00001001 mmci_prep_data(host, data, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001002}
1003
1004static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
1005 int err)
1006{
1007 struct mmci_host *host = mmc_priv(mmc);
1008 struct mmc_data *data = mrq->data;
1009
1010 if (!data || !data->host_cookie)
1011 return;
1012
David Brazdil0f672f62019-12-10 10:32:29 +00001013 mmci_unprep_data(host, data, err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001014}
1015
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001016static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
1017{
1018 struct variant_data *variant = host->variant;
1019 unsigned int datactrl, timeout, irqmask;
1020 unsigned long long clks;
1021 void __iomem *base;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001022
1023 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
1024 data->blksz, data->blocks, data->flags);
1025
1026 host->data = data;
1027 host->size = data->blksz * data->blocks;
1028 data->bytes_xfered = 0;
1029
1030 clks = (unsigned long long)data->timeout_ns * host->cclk;
1031 do_div(clks, NSEC_PER_SEC);
1032
1033 timeout = data->timeout_clks + (unsigned int)clks;
1034
1035 base = host->base;
1036 writel(timeout, base + MMCIDATATIMER);
1037 writel(host->size, base + MMCIDATALENGTH);
1038
David Brazdil0f672f62019-12-10 10:32:29 +00001039 datactrl = host->ops->get_datactrl_cfg(host);
1040 datactrl |= host->data->flags & MMC_DATA_READ ? MCI_DPSM_DIRECTION : 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001041
1042 if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
1043 u32 clk;
1044
1045 datactrl |= variant->datactrl_mask_sdio;
1046
1047 /*
1048 * The ST Micro variant for SDIO small write transfers
1049 * needs to have clock H/W flow control disabled,
1050 * otherwise the transfer will not start. The threshold
1051 * depends on the rate of MCLK.
1052 */
1053 if (variant->st_sdio && data->flags & MMC_DATA_WRITE &&
1054 (host->size < 8 ||
1055 (host->size <= 8 && host->mclk > 50000000)))
1056 clk = host->clk_reg & ~variant->clkreg_enable;
1057 else
1058 clk = host->clk_reg | variant->clkreg_enable;
1059
1060 mmci_write_clkreg(host, clk);
1061 }
1062
1063 if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
1064 host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
1065 datactrl |= variant->datactrl_mask_ddrmode;
1066
1067 /*
1068 * Attempt to use DMA operation mode, if this
1069 * should fail, fall back to PIO mode
1070 */
David Brazdil0f672f62019-12-10 10:32:29 +00001071 if (!mmci_dma_start(host, datactrl))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001072 return;
1073
1074 /* IRQ mode, map the SG list for CPU reading/writing */
1075 mmci_init_sg(host, data);
1076
1077 if (data->flags & MMC_DATA_READ) {
1078 irqmask = MCI_RXFIFOHALFFULLMASK;
1079
1080 /*
1081 * If we have less than the fifo 'half-full' threshold to
1082 * transfer, trigger a PIO interrupt as soon as any data
1083 * is available.
1084 */
1085 if (host->size < variant->fifohalfsize)
1086 irqmask |= MCI_RXDATAAVLBLMASK;
1087 } else {
1088 /*
1089 * We don't actually need to include "FIFO empty" here
1090 * since its implicit in "FIFO half empty".
1091 */
1092 irqmask = MCI_TXFIFOHALFEMPTYMASK;
1093 }
1094
1095 mmci_write_datactrlreg(host, datactrl);
1096 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
1097 mmci_set_mask1(host, irqmask);
1098}
1099
1100static void
1101mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
1102{
1103 void __iomem *base = host->base;
1104
1105 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
1106 cmd->opcode, cmd->arg, cmd->flags);
1107
David Brazdil0f672f62019-12-10 10:32:29 +00001108 if (readl(base + MMCICOMMAND) & host->variant->cmdreg_cpsm_enable) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001109 writel(0, base + MMCICOMMAND);
1110 mmci_reg_delay(host);
1111 }
1112
David Brazdil0f672f62019-12-10 10:32:29 +00001113 if (host->variant->cmdreg_stop &&
1114 cmd->opcode == MMC_STOP_TRANSMISSION)
1115 c |= host->variant->cmdreg_stop;
1116
1117 c |= cmd->opcode | host->variant->cmdreg_cpsm_enable;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001118 if (cmd->flags & MMC_RSP_PRESENT) {
1119 if (cmd->flags & MMC_RSP_136)
David Brazdil0f672f62019-12-10 10:32:29 +00001120 c |= host->variant->cmdreg_lrsp_crc;
1121 else if (cmd->flags & MMC_RSP_CRC)
1122 c |= host->variant->cmdreg_srsp_crc;
1123 else
1124 c |= host->variant->cmdreg_srsp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001125 }
1126 if (/*interrupt*/0)
1127 c |= MCI_CPSM_INTERRUPT;
1128
1129 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
1130 c |= host->variant->data_cmd_enable;
1131
1132 host->cmd = cmd;
1133
1134 writel(cmd->arg, base + MMCIARGUMENT);
1135 writel(c, base + MMCICOMMAND);
1136}
1137
David Brazdil0f672f62019-12-10 10:32:29 +00001138static void mmci_stop_command(struct mmci_host *host)
1139{
1140 host->stop_abort.error = 0;
1141 mmci_start_command(host, &host->stop_abort, 0);
1142}
1143
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001144static void
1145mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
1146 unsigned int status)
1147{
David Brazdil0f672f62019-12-10 10:32:29 +00001148 unsigned int status_err;
1149
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001150 /* Make sure we have data to handle */
1151 if (!data)
1152 return;
1153
1154 /* First check for errors */
David Brazdil0f672f62019-12-10 10:32:29 +00001155 status_err = status & (host->variant->start_err |
1156 MCI_DATACRCFAIL | MCI_DATATIMEOUT |
1157 MCI_TXUNDERRUN | MCI_RXOVERRUN);
1158
1159 if (status_err) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001160 u32 remain, success;
1161
1162 /* Terminate the DMA transfer */
David Brazdil0f672f62019-12-10 10:32:29 +00001163 mmci_dma_error(host);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001164
1165 /*
1166 * Calculate how far we are into the transfer. Note that
1167 * the data counter gives the number of bytes transferred
1168 * on the MMC bus, not on the host side. On reads, this
1169 * can be as much as a FIFO-worth of data ahead. This
1170 * matters for FIFO overruns only.
1171 */
David Brazdil0f672f62019-12-10 10:32:29 +00001172 if (!host->variant->datacnt_useless) {
1173 remain = readl(host->base + MMCIDATACNT);
1174 success = data->blksz * data->blocks - remain;
1175 } else {
1176 success = 0;
1177 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001178
1179 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
David Brazdil0f672f62019-12-10 10:32:29 +00001180 status_err, success);
1181 if (status_err & MCI_DATACRCFAIL) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001182 /* Last block was not successful */
1183 success -= 1;
1184 data->error = -EILSEQ;
David Brazdil0f672f62019-12-10 10:32:29 +00001185 } else if (status_err & MCI_DATATIMEOUT) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001186 data->error = -ETIMEDOUT;
David Brazdil0f672f62019-12-10 10:32:29 +00001187 } else if (status_err & MCI_STARTBITERR) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001188 data->error = -ECOMM;
David Brazdil0f672f62019-12-10 10:32:29 +00001189 } else if (status_err & MCI_TXUNDERRUN) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001190 data->error = -EIO;
David Brazdil0f672f62019-12-10 10:32:29 +00001191 } else if (status_err & MCI_RXOVERRUN) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001192 if (success > host->variant->fifosize)
1193 success -= host->variant->fifosize;
1194 else
1195 success = 0;
1196 data->error = -EIO;
1197 }
1198 data->bytes_xfered = round_down(success, data->blksz);
1199 }
1200
1201 if (status & MCI_DATABLOCKEND)
1202 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
1203
1204 if (status & MCI_DATAEND || data->error) {
David Brazdil0f672f62019-12-10 10:32:29 +00001205 mmci_dma_finalize(host, data);
1206
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001207 mmci_stop_data(host);
1208
1209 if (!data->error)
1210 /* The error clause is handled above, success! */
1211 data->bytes_xfered = data->blksz * data->blocks;
1212
David Brazdil0f672f62019-12-10 10:32:29 +00001213 if (!data->stop) {
1214 if (host->variant->cmdreg_stop && data->error)
1215 mmci_stop_command(host);
1216 else
1217 mmci_request_end(host, data->mrq);
1218 } else if (host->mrq->sbc && !data->error) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001219 mmci_request_end(host, data->mrq);
1220 } else {
1221 mmci_start_command(host, data->stop, 0);
1222 }
1223 }
1224}
1225
1226static void
1227mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
1228 unsigned int status)
1229{
1230 void __iomem *base = host->base;
David Brazdil0f672f62019-12-10 10:32:29 +00001231 bool sbc, busy_resp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001232
1233 if (!cmd)
1234 return;
1235
1236 sbc = (cmd == host->mrq->sbc);
David Brazdil0f672f62019-12-10 10:32:29 +00001237 busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001238
1239 /*
1240 * We need to be one of these interrupts to be considered worth
1241 * handling. Note that we tag on any latent IRQs postponed
1242 * due to waiting for busy status.
1243 */
1244 if (!((status|host->busy_status) &
1245 (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND)))
1246 return;
1247
David Brazdil0f672f62019-12-10 10:32:29 +00001248 /* Handle busy detection on DAT0 if the variant supports it. */
1249 if (busy_resp && host->variant->busy_detect) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001250
1251 /*
David Brazdil0f672f62019-12-10 10:32:29 +00001252 * Before unmasking for the busy end IRQ, confirm that the
1253 * command was sent successfully. To keep track of having a
1254 * command in-progress, waiting for busy signaling to end,
1255 * store the status in host->busy_status.
1256 *
1257 * Note that, the card may need a couple of clock cycles before
1258 * it starts signaling busy on DAT0, hence re-read the
1259 * MMCISTATUS register here, to allow the busy bit to be set.
1260 * Potentially we may even need to poll the register for a
1261 * while, to allow it to be set, but tests indicates that it
1262 * isn't needed.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001263 */
David Brazdil0f672f62019-12-10 10:32:29 +00001264 if (!host->busy_status &&
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001265 !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
1266 (readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
1267
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001268 writel(readl(base + MMCIMASK0) |
1269 host->variant->busy_detect_mask,
1270 base + MMCIMASK0);
David Brazdil0f672f62019-12-10 10:32:29 +00001271
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001272 host->busy_status =
1273 status & (MCI_CMDSENT|MCI_CMDRESPEND);
1274 return;
1275 }
1276
1277 /*
David Brazdil0f672f62019-12-10 10:32:29 +00001278 * If there is a command in-progress that has been successfully
1279 * sent, then bail out if busy status is set and wait for the
1280 * busy end IRQ.
1281 *
1282 * Note that, the HW triggers an IRQ on both edges while
1283 * monitoring DAT0 for busy completion, but there is only one
1284 * status bit in MMCISTATUS for the busy state. Therefore
1285 * both the start and the end interrupts needs to be cleared,
1286 * one after the other. So, clear the busy start IRQ here.
1287 */
1288 if (host->busy_status &&
1289 (status & host->variant->busy_detect_flag)) {
1290 writel(host->variant->busy_detect_mask,
1291 host->base + MMCICLEAR);
1292 return;
1293 }
1294
1295 /*
1296 * If there is a command in-progress that has been successfully
1297 * sent and the busy bit isn't set, it means we have received
1298 * the busy end IRQ. Clear and mask the IRQ, then continue to
1299 * process the command.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001300 */
1301 if (host->busy_status) {
1302
1303 writel(host->variant->busy_detect_mask,
1304 host->base + MMCICLEAR);
1305
1306 writel(readl(base + MMCIMASK0) &
1307 ~host->variant->busy_detect_mask,
1308 base + MMCIMASK0);
1309 host->busy_status = 0;
1310 }
1311 }
1312
1313 host->cmd = NULL;
1314
1315 if (status & MCI_CMDTIMEOUT) {
1316 cmd->error = -ETIMEDOUT;
1317 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
1318 cmd->error = -EILSEQ;
1319 } else {
1320 cmd->resp[0] = readl(base + MMCIRESPONSE0);
1321 cmd->resp[1] = readl(base + MMCIRESPONSE1);
1322 cmd->resp[2] = readl(base + MMCIRESPONSE2);
1323 cmd->resp[3] = readl(base + MMCIRESPONSE3);
1324 }
1325
1326 if ((!sbc && !cmd->data) || cmd->error) {
1327 if (host->data) {
1328 /* Terminate the DMA transfer */
David Brazdil0f672f62019-12-10 10:32:29 +00001329 mmci_dma_error(host);
1330
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001331 mmci_stop_data(host);
David Brazdil0f672f62019-12-10 10:32:29 +00001332 if (host->variant->cmdreg_stop && cmd->error) {
1333 mmci_stop_command(host);
1334 return;
1335 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001336 }
1337 mmci_request_end(host, host->mrq);
1338 } else if (sbc) {
1339 mmci_start_command(host, host->mrq->cmd, 0);
David Brazdil0f672f62019-12-10 10:32:29 +00001340 } else if (!host->variant->datactrl_first &&
1341 !(cmd->data->flags & MMC_DATA_READ)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001342 mmci_start_data(host, cmd->data);
1343 }
1344}
1345
1346static int mmci_get_rx_fifocnt(struct mmci_host *host, u32 status, int remain)
1347{
1348 return remain - (readl(host->base + MMCIFIFOCNT) << 2);
1349}
1350
1351static int mmci_qcom_get_rx_fifocnt(struct mmci_host *host, u32 status, int r)
1352{
1353 /*
1354 * on qcom SDCC4 only 8 words are used in each burst so only 8 addresses
1355 * from the fifo range should be used
1356 */
1357 if (status & MCI_RXFIFOHALFFULL)
1358 return host->variant->fifohalfsize;
1359 else if (status & MCI_RXDATAAVLBL)
1360 return 4;
1361
1362 return 0;
1363}
1364
1365static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
1366{
1367 void __iomem *base = host->base;
1368 char *ptr = buffer;
1369 u32 status = readl(host->base + MMCISTATUS);
1370 int host_remain = host->size;
1371
1372 do {
1373 int count = host->get_rx_fifocnt(host, status, host_remain);
1374
1375 if (count > remain)
1376 count = remain;
1377
1378 if (count <= 0)
1379 break;
1380
1381 /*
1382 * SDIO especially may want to send something that is
1383 * not divisible by 4 (as opposed to card sectors
1384 * etc). Therefore make sure to always read the last bytes
1385 * while only doing full 32-bit reads towards the FIFO.
1386 */
1387 if (unlikely(count & 0x3)) {
1388 if (count < 4) {
1389 unsigned char buf[4];
1390 ioread32_rep(base + MMCIFIFO, buf, 1);
1391 memcpy(ptr, buf, count);
1392 } else {
1393 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1394 count &= ~0x3;
1395 }
1396 } else {
1397 ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
1398 }
1399
1400 ptr += count;
1401 remain -= count;
1402 host_remain -= count;
1403
1404 if (remain == 0)
1405 break;
1406
1407 status = readl(base + MMCISTATUS);
1408 } while (status & MCI_RXDATAAVLBL);
1409
1410 return ptr - buffer;
1411}
1412
1413static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
1414{
1415 struct variant_data *variant = host->variant;
1416 void __iomem *base = host->base;
1417 char *ptr = buffer;
1418
1419 do {
1420 unsigned int count, maxcnt;
1421
1422 maxcnt = status & MCI_TXFIFOEMPTY ?
1423 variant->fifosize : variant->fifohalfsize;
1424 count = min(remain, maxcnt);
1425
1426 /*
1427 * SDIO especially may want to send something that is
1428 * not divisible by 4 (as opposed to card sectors
1429 * etc), and the FIFO only accept full 32-bit writes.
1430 * So compensate by adding +3 on the count, a single
1431 * byte become a 32bit write, 7 bytes will be two
1432 * 32bit writes etc.
1433 */
1434 iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
1435
1436 ptr += count;
1437 remain -= count;
1438
1439 if (remain == 0)
1440 break;
1441
1442 status = readl(base + MMCISTATUS);
1443 } while (status & MCI_TXFIFOHALFEMPTY);
1444
1445 return ptr - buffer;
1446}
1447
1448/*
1449 * PIO data transfer IRQ handler.
1450 */
1451static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
1452{
1453 struct mmci_host *host = dev_id;
1454 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1455 struct variant_data *variant = host->variant;
1456 void __iomem *base = host->base;
1457 u32 status;
1458
1459 status = readl(base + MMCISTATUS);
1460
1461 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
1462
1463 do {
1464 unsigned int remain, len;
1465 char *buffer;
1466
1467 /*
1468 * For write, we only need to test the half-empty flag
1469 * here - if the FIFO is completely empty, then by
1470 * definition it is more than half empty.
1471 *
1472 * For read, check for data available.
1473 */
1474 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
1475 break;
1476
1477 if (!sg_miter_next(sg_miter))
1478 break;
1479
1480 buffer = sg_miter->addr;
1481 remain = sg_miter->length;
1482
1483 len = 0;
1484 if (status & MCI_RXACTIVE)
1485 len = mmci_pio_read(host, buffer, remain);
1486 if (status & MCI_TXACTIVE)
1487 len = mmci_pio_write(host, buffer, remain, status);
1488
1489 sg_miter->consumed = len;
1490
1491 host->size -= len;
1492 remain -= len;
1493
1494 if (remain)
1495 break;
1496
1497 status = readl(base + MMCISTATUS);
1498 } while (1);
1499
1500 sg_miter_stop(sg_miter);
1501
1502 /*
1503 * If we have less than the fifo 'half-full' threshold to transfer,
1504 * trigger a PIO interrupt as soon as any data is available.
1505 */
1506 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
1507 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
1508
1509 /*
1510 * If we run out of data, disable the data IRQs; this
1511 * prevents a race where the FIFO becomes empty before
1512 * the chip itself has disabled the data path, and
1513 * stops us racing with our data end IRQ.
1514 */
1515 if (host->size == 0) {
1516 mmci_set_mask1(host, 0);
1517 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
1518 }
1519
1520 return IRQ_HANDLED;
1521}
1522
1523/*
1524 * Handle completion of command and data transfers.
1525 */
1526static irqreturn_t mmci_irq(int irq, void *dev_id)
1527{
1528 struct mmci_host *host = dev_id;
1529 u32 status;
1530 int ret = 0;
1531
1532 spin_lock(&host->lock);
1533
1534 do {
1535 status = readl(host->base + MMCISTATUS);
1536
1537 if (host->singleirq) {
1538 if (status & host->mask1_reg)
1539 mmci_pio_irq(irq, dev_id);
1540
David Brazdil0f672f62019-12-10 10:32:29 +00001541 status &= ~host->variant->irq_pio_mask;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001542 }
1543
1544 /*
David Brazdil0f672f62019-12-10 10:32:29 +00001545 * Busy detection is managed by mmci_cmd_irq(), including to
1546 * clear the corresponding IRQ.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001547 */
1548 status &= readl(host->base + MMCIMASK0);
1549 if (host->variant->busy_detect)
1550 writel(status & ~host->variant->busy_detect_mask,
1551 host->base + MMCICLEAR);
1552 else
1553 writel(status, host->base + MMCICLEAR);
1554
1555 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1556
1557 if (host->variant->reversed_irq_handling) {
1558 mmci_data_irq(host, host->data, status);
1559 mmci_cmd_irq(host, host->cmd, status);
1560 } else {
1561 mmci_cmd_irq(host, host->cmd, status);
1562 mmci_data_irq(host, host->data, status);
1563 }
1564
1565 /*
David Brazdil0f672f62019-12-10 10:32:29 +00001566 * Busy detection has been handled by mmci_cmd_irq() above.
1567 * Clear the status bit to prevent polling in IRQ context.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001568 */
David Brazdil0f672f62019-12-10 10:32:29 +00001569 if (host->variant->busy_detect_flag)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001570 status &= ~host->variant->busy_detect_flag;
1571
1572 ret = 1;
1573 } while (status);
1574
1575 spin_unlock(&host->lock);
1576
1577 return IRQ_RETVAL(ret);
1578}
1579
1580static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1581{
1582 struct mmci_host *host = mmc_priv(mmc);
1583 unsigned long flags;
1584
1585 WARN_ON(host->mrq != NULL);
1586
1587 mrq->cmd->error = mmci_validate_data(host, mrq->data);
1588 if (mrq->cmd->error) {
1589 mmc_request_done(mmc, mrq);
1590 return;
1591 }
1592
1593 spin_lock_irqsave(&host->lock, flags);
1594
1595 host->mrq = mrq;
1596
1597 if (mrq->data)
1598 mmci_get_next_data(host, mrq->data);
1599
David Brazdil0f672f62019-12-10 10:32:29 +00001600 if (mrq->data &&
1601 (host->variant->datactrl_first || mrq->data->flags & MMC_DATA_READ))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001602 mmci_start_data(host, mrq->data);
1603
1604 if (mrq->sbc)
1605 mmci_start_command(host, mrq->sbc, 0);
1606 else
1607 mmci_start_command(host, mrq->cmd, 0);
1608
1609 spin_unlock_irqrestore(&host->lock, flags);
1610}
1611
1612static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1613{
1614 struct mmci_host *host = mmc_priv(mmc);
1615 struct variant_data *variant = host->variant;
1616 u32 pwr = 0;
1617 unsigned long flags;
1618 int ret;
1619
1620 if (host->plat->ios_handler &&
1621 host->plat->ios_handler(mmc_dev(mmc), ios))
1622 dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
1623
1624 switch (ios->power_mode) {
1625 case MMC_POWER_OFF:
1626 if (!IS_ERR(mmc->supply.vmmc))
1627 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1628
1629 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
1630 regulator_disable(mmc->supply.vqmmc);
1631 host->vqmmc_enabled = false;
1632 }
1633
1634 break;
1635 case MMC_POWER_UP:
1636 if (!IS_ERR(mmc->supply.vmmc))
1637 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1638
1639 /*
1640 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1641 * and instead uses MCI_PWR_ON so apply whatever value is
1642 * configured in the variant data.
1643 */
1644 pwr |= variant->pwrreg_powerup;
1645
1646 break;
1647 case MMC_POWER_ON:
1648 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
1649 ret = regulator_enable(mmc->supply.vqmmc);
1650 if (ret < 0)
1651 dev_err(mmc_dev(mmc),
1652 "failed to enable vqmmc regulator\n");
1653 else
1654 host->vqmmc_enabled = true;
1655 }
1656
1657 pwr |= MCI_PWR_ON;
1658 break;
1659 }
1660
1661 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1662 /*
1663 * The ST Micro variant has some additional bits
1664 * indicating signal direction for the signals in
1665 * the SD/MMC bus and feedback-clock usage.
1666 */
1667 pwr |= host->pwr_reg_add;
1668
1669 if (ios->bus_width == MMC_BUS_WIDTH_4)
1670 pwr &= ~MCI_ST_DATA74DIREN;
1671 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1672 pwr &= (~MCI_ST_DATA74DIREN &
1673 ~MCI_ST_DATA31DIREN &
1674 ~MCI_ST_DATA2DIREN);
1675 }
1676
1677 if (variant->opendrain) {
1678 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1679 pwr |= variant->opendrain;
1680 } else {
1681 /*
1682 * If the variant cannot configure the pads by its own, then we
1683 * expect the pinctrl to be able to do that for us
1684 */
1685 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
1686 pinctrl_select_state(host->pinctrl, host->pins_opendrain);
1687 else
1688 pinctrl_select_state(host->pinctrl, host->pins_default);
1689 }
1690
1691 /*
1692 * If clock = 0 and the variant requires the MMCIPOWER to be used for
1693 * gating the clock, the MCI_PWR_ON bit is cleared.
1694 */
1695 if (!ios->clock && variant->pwrreg_clkgate)
1696 pwr &= ~MCI_PWR_ON;
1697
1698 if (host->variant->explicit_mclk_control &&
1699 ios->clock != host->clock_cache) {
1700 ret = clk_set_rate(host->clk, ios->clock);
1701 if (ret < 0)
1702 dev_err(mmc_dev(host->mmc),
1703 "Error setting clock rate (%d)\n", ret);
1704 else
1705 host->mclk = clk_get_rate(host->clk);
1706 }
1707 host->clock_cache = ios->clock;
1708
1709 spin_lock_irqsave(&host->lock, flags);
1710
David Brazdil0f672f62019-12-10 10:32:29 +00001711 if (host->ops && host->ops->set_clkreg)
1712 host->ops->set_clkreg(host, ios->clock);
1713 else
1714 mmci_set_clkreg(host, ios->clock);
1715
1716 if (host->ops && host->ops->set_pwrreg)
1717 host->ops->set_pwrreg(host, pwr);
1718 else
1719 mmci_write_pwrreg(host, pwr);
1720
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001721 mmci_reg_delay(host);
1722
1723 spin_unlock_irqrestore(&host->lock, flags);
1724}
1725
1726static int mmci_get_cd(struct mmc_host *mmc)
1727{
1728 struct mmci_host *host = mmc_priv(mmc);
1729 struct mmci_platform_data *plat = host->plat;
1730 unsigned int status = mmc_gpio_get_cd(mmc);
1731
1732 if (status == -ENOSYS) {
1733 if (!plat->status)
1734 return 1; /* Assume always present */
1735
1736 status = plat->status(mmc_dev(host->mmc));
1737 }
1738 return status;
1739}
1740
1741static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1742{
1743 int ret = 0;
1744
1745 if (!IS_ERR(mmc->supply.vqmmc)) {
1746
1747 switch (ios->signal_voltage) {
1748 case MMC_SIGNAL_VOLTAGE_330:
1749 ret = regulator_set_voltage(mmc->supply.vqmmc,
1750 2700000, 3600000);
1751 break;
1752 case MMC_SIGNAL_VOLTAGE_180:
1753 ret = regulator_set_voltage(mmc->supply.vqmmc,
1754 1700000, 1950000);
1755 break;
1756 case MMC_SIGNAL_VOLTAGE_120:
1757 ret = regulator_set_voltage(mmc->supply.vqmmc,
1758 1100000, 1300000);
1759 break;
1760 }
1761
1762 if (ret)
1763 dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
1764 }
1765
1766 return ret;
1767}
1768
1769static struct mmc_host_ops mmci_ops = {
1770 .request = mmci_request,
1771 .pre_req = mmci_pre_request,
1772 .post_req = mmci_post_request,
1773 .set_ios = mmci_set_ios,
1774 .get_ro = mmc_gpio_get_ro,
1775 .get_cd = mmci_get_cd,
1776 .start_signal_voltage_switch = mmci_sig_volt_switch,
1777};
1778
1779static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
1780{
1781 struct mmci_host *host = mmc_priv(mmc);
1782 int ret = mmc_of_parse(mmc);
1783
1784 if (ret)
1785 return ret;
1786
1787 if (of_get_property(np, "st,sig-dir-dat0", NULL))
1788 host->pwr_reg_add |= MCI_ST_DATA0DIREN;
1789 if (of_get_property(np, "st,sig-dir-dat2", NULL))
1790 host->pwr_reg_add |= MCI_ST_DATA2DIREN;
1791 if (of_get_property(np, "st,sig-dir-dat31", NULL))
1792 host->pwr_reg_add |= MCI_ST_DATA31DIREN;
1793 if (of_get_property(np, "st,sig-dir-dat74", NULL))
1794 host->pwr_reg_add |= MCI_ST_DATA74DIREN;
1795 if (of_get_property(np, "st,sig-dir-cmd", NULL))
1796 host->pwr_reg_add |= MCI_ST_CMDDIREN;
1797 if (of_get_property(np, "st,sig-pin-fbclk", NULL))
1798 host->pwr_reg_add |= MCI_ST_FBCLKEN;
David Brazdil0f672f62019-12-10 10:32:29 +00001799 if (of_get_property(np, "st,sig-dir", NULL))
1800 host->pwr_reg_add |= MCI_STM32_DIRPOL;
1801 if (of_get_property(np, "st,neg-edge", NULL))
1802 host->clk_reg_add |= MCI_STM32_CLK_NEGEDGE;
1803 if (of_get_property(np, "st,use-ckin", NULL))
1804 host->clk_reg_add |= MCI_STM32_CLK_SELCKIN;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001805
1806 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
1807 mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
1808 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
1809 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1810
1811 return 0;
1812}
1813
1814static int mmci_probe(struct amba_device *dev,
1815 const struct amba_id *id)
1816{
1817 struct mmci_platform_data *plat = dev->dev.platform_data;
1818 struct device_node *np = dev->dev.of_node;
1819 struct variant_data *variant = id->data;
1820 struct mmci_host *host;
1821 struct mmc_host *mmc;
1822 int ret;
1823
1824 /* Must have platform data or Device Tree. */
1825 if (!plat && !np) {
1826 dev_err(&dev->dev, "No plat data or DT found\n");
1827 return -EINVAL;
1828 }
1829
1830 if (!plat) {
1831 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
1832 if (!plat)
1833 return -ENOMEM;
1834 }
1835
1836 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1837 if (!mmc)
1838 return -ENOMEM;
1839
1840 ret = mmci_of_parse(np, mmc);
1841 if (ret)
1842 goto host_free;
1843
1844 host = mmc_priv(mmc);
1845 host->mmc = mmc;
1846
1847 /*
1848 * Some variant (STM32) doesn't have opendrain bit, nevertheless
1849 * pins can be set accordingly using pinctrl
1850 */
1851 if (!variant->opendrain) {
1852 host->pinctrl = devm_pinctrl_get(&dev->dev);
1853 if (IS_ERR(host->pinctrl)) {
1854 dev_err(&dev->dev, "failed to get pinctrl");
1855 ret = PTR_ERR(host->pinctrl);
1856 goto host_free;
1857 }
1858
1859 host->pins_default = pinctrl_lookup_state(host->pinctrl,
1860 PINCTRL_STATE_DEFAULT);
1861 if (IS_ERR(host->pins_default)) {
1862 dev_err(mmc_dev(mmc), "Can't select default pins\n");
1863 ret = PTR_ERR(host->pins_default);
1864 goto host_free;
1865 }
1866
1867 host->pins_opendrain = pinctrl_lookup_state(host->pinctrl,
1868 MMCI_PINCTRL_STATE_OPENDRAIN);
1869 if (IS_ERR(host->pins_opendrain)) {
1870 dev_err(mmc_dev(mmc), "Can't select opendrain pins\n");
1871 ret = PTR_ERR(host->pins_opendrain);
1872 goto host_free;
1873 }
1874 }
1875
1876 host->hw_designer = amba_manf(dev);
1877 host->hw_revision = amba_rev(dev);
1878 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1879 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1880
1881 host->clk = devm_clk_get(&dev->dev, NULL);
1882 if (IS_ERR(host->clk)) {
1883 ret = PTR_ERR(host->clk);
1884 goto host_free;
1885 }
1886
1887 ret = clk_prepare_enable(host->clk);
1888 if (ret)
1889 goto host_free;
1890
1891 if (variant->qcom_fifo)
1892 host->get_rx_fifocnt = mmci_qcom_get_rx_fifocnt;
1893 else
1894 host->get_rx_fifocnt = mmci_get_rx_fifocnt;
1895
1896 host->plat = plat;
1897 host->variant = variant;
1898 host->mclk = clk_get_rate(host->clk);
1899 /*
1900 * According to the spec, mclk is max 100 MHz,
1901 * so we try to adjust the clock down to this,
1902 * (if possible).
1903 */
1904 if (host->mclk > variant->f_max) {
1905 ret = clk_set_rate(host->clk, variant->f_max);
1906 if (ret < 0)
1907 goto clk_disable;
1908 host->mclk = clk_get_rate(host->clk);
1909 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1910 host->mclk);
1911 }
1912
1913 host->phybase = dev->res.start;
1914 host->base = devm_ioremap_resource(&dev->dev, &dev->res);
1915 if (IS_ERR(host->base)) {
1916 ret = PTR_ERR(host->base);
1917 goto clk_disable;
1918 }
1919
1920 if (variant->init)
1921 variant->init(host);
1922
1923 /*
1924 * The ARM and ST versions of the block have slightly different
1925 * clock divider equations which means that the minimum divider
1926 * differs too.
1927 * on Qualcomm like controllers get the nearest minimum clock to 100Khz
1928 */
1929 if (variant->st_clkdiv)
1930 mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
David Brazdil0f672f62019-12-10 10:32:29 +00001931 else if (variant->stm32_clkdiv)
1932 mmc->f_min = DIV_ROUND_UP(host->mclk, 2046);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001933 else if (variant->explicit_mclk_control)
1934 mmc->f_min = clk_round_rate(host->clk, 100000);
1935 else
1936 mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
1937 /*
1938 * If no maximum operating frequency is supplied, fall back to use
1939 * the module parameter, which has a (low) default value in case it
1940 * is not specified. Either value must not exceed the clock rate into
1941 * the block, of course.
1942 */
1943 if (mmc->f_max)
1944 mmc->f_max = variant->explicit_mclk_control ?
1945 min(variant->f_max, mmc->f_max) :
1946 min(host->mclk, mmc->f_max);
1947 else
1948 mmc->f_max = variant->explicit_mclk_control ?
1949 fmax : min(host->mclk, fmax);
1950
1951
1952 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1953
David Brazdil0f672f62019-12-10 10:32:29 +00001954 host->rst = devm_reset_control_get_optional_exclusive(&dev->dev, NULL);
1955 if (IS_ERR(host->rst)) {
1956 ret = PTR_ERR(host->rst);
1957 goto clk_disable;
1958 }
1959
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001960 /* Get regulators and the supported OCR mask */
1961 ret = mmc_regulator_get_supply(mmc);
1962 if (ret)
1963 goto clk_disable;
1964
1965 if (!mmc->ocr_avail)
1966 mmc->ocr_avail = plat->ocr_mask;
1967 else if (plat->ocr_mask)
1968 dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1969
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001970 /* We support these capabilities. */
1971 mmc->caps |= MMC_CAP_CMD23;
1972
1973 /*
1974 * Enable busy detection.
1975 */
1976 if (variant->busy_detect) {
1977 mmci_ops.card_busy = mmci_card_busy;
1978 /*
1979 * Not all variants have a flag to enable busy detection
1980 * in the DPSM, but if they do, set it here.
1981 */
1982 if (variant->busy_dpsm_flag)
1983 mmci_write_datactrlreg(host,
1984 host->variant->busy_dpsm_flag);
1985 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
1986 mmc->max_busy_timeout = 0;
1987 }
1988
David Brazdil0f672f62019-12-10 10:32:29 +00001989 /* Prepare a CMD12 - needed to clear the DPSM on some variants. */
1990 host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
1991 host->stop_abort.arg = 0;
1992 host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC;
1993
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001994 mmc->ops = &mmci_ops;
1995
1996 /* We support these PM capabilities. */
1997 mmc->pm_caps |= MMC_PM_KEEP_POWER;
1998
1999 /*
2000 * We can do SGIO
2001 */
2002 mmc->max_segs = NR_SG;
2003
2004 /*
2005 * Since only a certain number of bits are valid in the data length
2006 * register, we must ensure that we don't exceed 2^num-1 bytes in a
2007 * single request.
2008 */
2009 mmc->max_req_size = (1 << variant->datalength_bits) - 1;
2010
2011 /*
2012 * Set the maximum segment size. Since we aren't doing DMA
2013 * (yet) we are only limited by the data length register.
2014 */
2015 mmc->max_seg_size = mmc->max_req_size;
2016
2017 /*
2018 * Block size can be up to 2048 bytes, but must be a power of two.
2019 */
David Brazdil0f672f62019-12-10 10:32:29 +00002020 mmc->max_blk_size = 1 << variant->datactrl_blocksz;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002021
2022 /*
2023 * Limit the number of blocks transferred so that we don't overflow
2024 * the maximum request size.
2025 */
David Brazdil0f672f62019-12-10 10:32:29 +00002026 mmc->max_blk_count = mmc->max_req_size >> variant->datactrl_blocksz;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002027
2028 spin_lock_init(&host->lock);
2029
2030 writel(0, host->base + MMCIMASK0);
2031
2032 if (variant->mmcimask1)
2033 writel(0, host->base + MMCIMASK1);
2034
2035 writel(0xfff, host->base + MMCICLEAR);
2036
2037 /*
2038 * If:
2039 * - not using DT but using a descriptor table, or
2040 * - using a table of descriptors ALONGSIDE DT, or
2041 * look up these descriptors named "cd" and "wp" right here, fail
David Brazdil0f672f62019-12-10 10:32:29 +00002042 * silently of these do not exist
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002043 */
2044 if (!np) {
2045 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
David Brazdil0f672f62019-12-10 10:32:29 +00002046 if (ret == -EPROBE_DEFER)
2047 goto clk_disable;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002048
David Brazdil0f672f62019-12-10 10:32:29 +00002049 ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL);
2050 if (ret == -EPROBE_DEFER)
2051 goto clk_disable;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002052 }
2053
2054 ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
2055 DRIVER_NAME " (cmd)", host);
2056 if (ret)
2057 goto clk_disable;
2058
2059 if (!dev->irq[1])
2060 host->singleirq = true;
2061 else {
2062 ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
2063 IRQF_SHARED, DRIVER_NAME " (pio)", host);
2064 if (ret)
2065 goto clk_disable;
2066 }
2067
David Brazdil0f672f62019-12-10 10:32:29 +00002068 writel(MCI_IRQENABLE | variant->start_err, host->base + MMCIMASK0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002069
2070 amba_set_drvdata(dev, mmc);
2071
2072 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
2073 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
2074 amba_rev(dev), (unsigned long long)dev->res.start,
2075 dev->irq[0], dev->irq[1]);
2076
2077 mmci_dma_setup(host);
2078
2079 pm_runtime_set_autosuspend_delay(&dev->dev, 50);
2080 pm_runtime_use_autosuspend(&dev->dev);
2081
2082 mmc_add_host(mmc);
2083
2084 pm_runtime_put(&dev->dev);
2085 return 0;
2086
2087 clk_disable:
2088 clk_disable_unprepare(host->clk);
2089 host_free:
2090 mmc_free_host(mmc);
2091 return ret;
2092}
2093
2094static int mmci_remove(struct amba_device *dev)
2095{
2096 struct mmc_host *mmc = amba_get_drvdata(dev);
2097
2098 if (mmc) {
2099 struct mmci_host *host = mmc_priv(mmc);
2100 struct variant_data *variant = host->variant;
2101
2102 /*
2103 * Undo pm_runtime_put() in probe. We use the _sync
2104 * version here so that we can access the primecell.
2105 */
2106 pm_runtime_get_sync(&dev->dev);
2107
2108 mmc_remove_host(mmc);
2109
2110 writel(0, host->base + MMCIMASK0);
2111
2112 if (variant->mmcimask1)
2113 writel(0, host->base + MMCIMASK1);
2114
2115 writel(0, host->base + MMCICOMMAND);
2116 writel(0, host->base + MMCIDATACTRL);
2117
2118 mmci_dma_release(host);
2119 clk_disable_unprepare(host->clk);
2120 mmc_free_host(mmc);
2121 }
2122
2123 return 0;
2124}
2125
2126#ifdef CONFIG_PM
2127static void mmci_save(struct mmci_host *host)
2128{
2129 unsigned long flags;
2130
2131 spin_lock_irqsave(&host->lock, flags);
2132
2133 writel(0, host->base + MMCIMASK0);
2134 if (host->variant->pwrreg_nopower) {
2135 writel(0, host->base + MMCIDATACTRL);
2136 writel(0, host->base + MMCIPOWER);
2137 writel(0, host->base + MMCICLOCK);
2138 }
2139 mmci_reg_delay(host);
2140
2141 spin_unlock_irqrestore(&host->lock, flags);
2142}
2143
2144static void mmci_restore(struct mmci_host *host)
2145{
2146 unsigned long flags;
2147
2148 spin_lock_irqsave(&host->lock, flags);
2149
2150 if (host->variant->pwrreg_nopower) {
2151 writel(host->clk_reg, host->base + MMCICLOCK);
2152 writel(host->datactrl_reg, host->base + MMCIDATACTRL);
2153 writel(host->pwr_reg, host->base + MMCIPOWER);
2154 }
David Brazdil0f672f62019-12-10 10:32:29 +00002155 writel(MCI_IRQENABLE | host->variant->start_err,
2156 host->base + MMCIMASK0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002157 mmci_reg_delay(host);
2158
2159 spin_unlock_irqrestore(&host->lock, flags);
2160}
2161
2162static int mmci_runtime_suspend(struct device *dev)
2163{
2164 struct amba_device *adev = to_amba_device(dev);
2165 struct mmc_host *mmc = amba_get_drvdata(adev);
2166
2167 if (mmc) {
2168 struct mmci_host *host = mmc_priv(mmc);
2169 pinctrl_pm_select_sleep_state(dev);
2170 mmci_save(host);
2171 clk_disable_unprepare(host->clk);
2172 }
2173
2174 return 0;
2175}
2176
2177static int mmci_runtime_resume(struct device *dev)
2178{
2179 struct amba_device *adev = to_amba_device(dev);
2180 struct mmc_host *mmc = amba_get_drvdata(adev);
2181
2182 if (mmc) {
2183 struct mmci_host *host = mmc_priv(mmc);
2184 clk_prepare_enable(host->clk);
2185 mmci_restore(host);
2186 pinctrl_pm_select_default_state(dev);
2187 }
2188
2189 return 0;
2190}
2191#endif
2192
2193static const struct dev_pm_ops mmci_dev_pm_ops = {
2194 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2195 pm_runtime_force_resume)
2196 SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
2197};
2198
2199static const struct amba_id mmci_ids[] = {
2200 {
2201 .id = 0x00041180,
2202 .mask = 0xff0fffff,
2203 .data = &variant_arm,
2204 },
2205 {
2206 .id = 0x01041180,
2207 .mask = 0xff0fffff,
2208 .data = &variant_arm_extended_fifo,
2209 },
2210 {
2211 .id = 0x02041180,
2212 .mask = 0xff0fffff,
2213 .data = &variant_arm_extended_fifo_hwfc,
2214 },
2215 {
2216 .id = 0x00041181,
2217 .mask = 0x000fffff,
2218 .data = &variant_arm,
2219 },
2220 /* ST Micro variants */
2221 {
2222 .id = 0x00180180,
2223 .mask = 0x00ffffff,
2224 .data = &variant_u300,
2225 },
2226 {
2227 .id = 0x10180180,
2228 .mask = 0xf0ffffff,
2229 .data = &variant_nomadik,
2230 },
2231 {
2232 .id = 0x00280180,
2233 .mask = 0x00ffffff,
2234 .data = &variant_nomadik,
2235 },
2236 {
2237 .id = 0x00480180,
2238 .mask = 0xf0ffffff,
2239 .data = &variant_ux500,
2240 },
2241 {
2242 .id = 0x10480180,
2243 .mask = 0xf0ffffff,
2244 .data = &variant_ux500v2,
2245 },
2246 {
2247 .id = 0x00880180,
2248 .mask = 0x00ffffff,
2249 .data = &variant_stm32,
2250 },
David Brazdil0f672f62019-12-10 10:32:29 +00002251 {
2252 .id = 0x10153180,
2253 .mask = 0xf0ffffff,
2254 .data = &variant_stm32_sdmmc,
2255 },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002256 /* Qualcomm variants */
2257 {
2258 .id = 0x00051180,
2259 .mask = 0x000fffff,
2260 .data = &variant_qcom,
2261 },
2262 { 0, 0 },
2263};
2264
2265MODULE_DEVICE_TABLE(amba, mmci_ids);
2266
2267static struct amba_driver mmci_driver = {
2268 .drv = {
2269 .name = DRIVER_NAME,
2270 .pm = &mmci_dev_pm_ops,
2271 },
2272 .probe = mmci_probe,
2273 .remove = mmci_remove,
2274 .id_table = mmci_ids,
2275};
2276
2277module_amba_driver(mmci_driver);
2278
2279module_param(fmax, uint, 0444);
2280
2281MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
2282MODULE_LICENSE("GPL");