blob: 654051e00117a988b94f740e06e08950cfa1c511 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3 *
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * Thanks to the following companies for their support:
12 *
13 * - JMicron (hardware and technical support)
14 */
15
16#include <linux/delay.h>
17#include <linux/ktime.h>
18#include <linux/highmem.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/dma-mapping.h>
22#include <linux/slab.h>
23#include <linux/scatterlist.h>
24#include <linux/sizes.h>
25#include <linux/swiotlb.h>
26#include <linux/regulator/consumer.h>
27#include <linux/pm_runtime.h>
28#include <linux/of.h>
29
30#include <linux/leds.h>
31
32#include <linux/mmc/mmc.h>
33#include <linux/mmc/host.h>
34#include <linux/mmc/card.h>
35#include <linux/mmc/sdio.h>
36#include <linux/mmc/slot-gpio.h>
37
38#include "sdhci.h"
39
40#define DRIVER_NAME "sdhci"
41
42#define DBG(f, x...) \
43 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
44
45#define SDHCI_DUMP(f, x...) \
46 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
47
48#define MAX_TUNING_LOOP 40
49
50static unsigned int debug_quirks = 0;
51static unsigned int debug_quirks2;
52
53static void sdhci_finish_data(struct sdhci_host *);
54
55static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
56
57void sdhci_dumpregs(struct sdhci_host *host)
58{
59 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
60
61 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
62 sdhci_readl(host, SDHCI_DMA_ADDRESS),
63 sdhci_readw(host, SDHCI_HOST_VERSION));
64 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
65 sdhci_readw(host, SDHCI_BLOCK_SIZE),
66 sdhci_readw(host, SDHCI_BLOCK_COUNT));
67 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
68 sdhci_readl(host, SDHCI_ARGUMENT),
69 sdhci_readw(host, SDHCI_TRANSFER_MODE));
70 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
71 sdhci_readl(host, SDHCI_PRESENT_STATE),
72 sdhci_readb(host, SDHCI_HOST_CONTROL));
73 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
74 sdhci_readb(host, SDHCI_POWER_CONTROL),
75 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
76 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
77 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
78 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
79 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
80 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
81 sdhci_readl(host, SDHCI_INT_STATUS));
82 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
83 sdhci_readl(host, SDHCI_INT_ENABLE),
84 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
85 SDHCI_DUMP("AC12 err: 0x%08x | Slot int: 0x%08x\n",
86 sdhci_readw(host, SDHCI_ACMD12_ERR),
87 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
88 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
89 sdhci_readl(host, SDHCI_CAPABILITIES),
90 sdhci_readl(host, SDHCI_CAPABILITIES_1));
91 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
92 sdhci_readw(host, SDHCI_COMMAND),
93 sdhci_readl(host, SDHCI_MAX_CURRENT));
94 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
95 sdhci_readl(host, SDHCI_RESPONSE),
96 sdhci_readl(host, SDHCI_RESPONSE + 4));
97 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
98 sdhci_readl(host, SDHCI_RESPONSE + 8),
99 sdhci_readl(host, SDHCI_RESPONSE + 12));
100 SDHCI_DUMP("Host ctl2: 0x%08x\n",
101 sdhci_readw(host, SDHCI_HOST_CONTROL2));
102
103 if (host->flags & SDHCI_USE_ADMA) {
104 if (host->flags & SDHCI_USE_64_BIT_DMA) {
105 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
106 sdhci_readl(host, SDHCI_ADMA_ERROR),
107 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
108 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
109 } else {
110 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
111 sdhci_readl(host, SDHCI_ADMA_ERROR),
112 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
113 }
114 }
115
116 SDHCI_DUMP("============================================\n");
117}
118EXPORT_SYMBOL_GPL(sdhci_dumpregs);
119
120/*****************************************************************************\
121 * *
122 * Low level functions *
123 * *
124\*****************************************************************************/
125
126static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
127{
128 return cmd->data || cmd->flags & MMC_RSP_BUSY;
129}
130
131static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
132{
133 u32 present;
134
135 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
136 !mmc_card_is_removable(host->mmc))
137 return;
138
139 if (enable) {
140 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
141 SDHCI_CARD_PRESENT;
142
143 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
144 SDHCI_INT_CARD_INSERT;
145 } else {
146 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
147 }
148
149 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
150 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
151}
152
153static void sdhci_enable_card_detection(struct sdhci_host *host)
154{
155 sdhci_set_card_detection(host, true);
156}
157
158static void sdhci_disable_card_detection(struct sdhci_host *host)
159{
160 sdhci_set_card_detection(host, false);
161}
162
163static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
164{
165 if (host->bus_on)
166 return;
167 host->bus_on = true;
168 pm_runtime_get_noresume(host->mmc->parent);
169}
170
171static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
172{
173 if (!host->bus_on)
174 return;
175 host->bus_on = false;
176 pm_runtime_put_noidle(host->mmc->parent);
177}
178
179void sdhci_reset(struct sdhci_host *host, u8 mask)
180{
181 ktime_t timeout;
182
183 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
184
185 if (mask & SDHCI_RESET_ALL) {
186 host->clock = 0;
187 /* Reset-all turns off SD Bus Power */
188 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
189 sdhci_runtime_pm_bus_off(host);
190 }
191
192 /* Wait max 100 ms */
193 timeout = ktime_add_ms(ktime_get(), 100);
194
195 /* hw clears the bit when it's done */
196 while (1) {
197 bool timedout = ktime_after(ktime_get(), timeout);
198
199 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
200 break;
201 if (timedout) {
202 pr_err("%s: Reset 0x%x never completed.\n",
203 mmc_hostname(host->mmc), (int)mask);
204 sdhci_dumpregs(host);
205 return;
206 }
207 udelay(10);
208 }
209}
210EXPORT_SYMBOL_GPL(sdhci_reset);
211
212static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
213{
214 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
215 struct mmc_host *mmc = host->mmc;
216
217 if (!mmc->ops->get_cd(mmc))
218 return;
219 }
220
221 host->ops->reset(host, mask);
222
223 if (mask & SDHCI_RESET_ALL) {
224 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
225 if (host->ops->enable_dma)
226 host->ops->enable_dma(host);
227 }
228
229 /* Resetting the controller clears many */
230 host->preset_enabled = false;
231 }
232}
233
234static void sdhci_set_default_irqs(struct sdhci_host *host)
235{
236 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
237 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
238 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
239 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
240 SDHCI_INT_RESPONSE;
241
242 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
243 host->tuning_mode == SDHCI_TUNING_MODE_3)
244 host->ier |= SDHCI_INT_RETUNE;
245
246 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
247 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
248}
249
250static void sdhci_init(struct sdhci_host *host, int soft)
251{
252 struct mmc_host *mmc = host->mmc;
253
254 if (soft)
255 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
256 else
257 sdhci_do_reset(host, SDHCI_RESET_ALL);
258
259 sdhci_set_default_irqs(host);
260
261 host->cqe_on = false;
262
263 if (soft) {
264 /* force clock reconfiguration */
265 host->clock = 0;
266 mmc->ops->set_ios(mmc, &mmc->ios);
267 }
268}
269
270static void sdhci_reinit(struct sdhci_host *host)
271{
272 sdhci_init(host, 0);
273 sdhci_enable_card_detection(host);
274}
275
276static void __sdhci_led_activate(struct sdhci_host *host)
277{
278 u8 ctrl;
279
280 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
281 ctrl |= SDHCI_CTRL_LED;
282 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
283}
284
285static void __sdhci_led_deactivate(struct sdhci_host *host)
286{
287 u8 ctrl;
288
289 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
290 ctrl &= ~SDHCI_CTRL_LED;
291 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
292}
293
294#if IS_REACHABLE(CONFIG_LEDS_CLASS)
295static void sdhci_led_control(struct led_classdev *led,
296 enum led_brightness brightness)
297{
298 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
299 unsigned long flags;
300
301 spin_lock_irqsave(&host->lock, flags);
302
303 if (host->runtime_suspended)
304 goto out;
305
306 if (brightness == LED_OFF)
307 __sdhci_led_deactivate(host);
308 else
309 __sdhci_led_activate(host);
310out:
311 spin_unlock_irqrestore(&host->lock, flags);
312}
313
314static int sdhci_led_register(struct sdhci_host *host)
315{
316 struct mmc_host *mmc = host->mmc;
317
318 snprintf(host->led_name, sizeof(host->led_name),
319 "%s::", mmc_hostname(mmc));
320
321 host->led.name = host->led_name;
322 host->led.brightness = LED_OFF;
323 host->led.default_trigger = mmc_hostname(mmc);
324 host->led.brightness_set = sdhci_led_control;
325
326 return led_classdev_register(mmc_dev(mmc), &host->led);
327}
328
329static void sdhci_led_unregister(struct sdhci_host *host)
330{
331 led_classdev_unregister(&host->led);
332}
333
334static inline void sdhci_led_activate(struct sdhci_host *host)
335{
336}
337
338static inline void sdhci_led_deactivate(struct sdhci_host *host)
339{
340}
341
342#else
343
344static inline int sdhci_led_register(struct sdhci_host *host)
345{
346 return 0;
347}
348
349static inline void sdhci_led_unregister(struct sdhci_host *host)
350{
351}
352
353static inline void sdhci_led_activate(struct sdhci_host *host)
354{
355 __sdhci_led_activate(host);
356}
357
358static inline void sdhci_led_deactivate(struct sdhci_host *host)
359{
360 __sdhci_led_deactivate(host);
361}
362
363#endif
364
365/*****************************************************************************\
366 * *
367 * Core functions *
368 * *
369\*****************************************************************************/
370
371static void sdhci_read_block_pio(struct sdhci_host *host)
372{
373 unsigned long flags;
374 size_t blksize, len, chunk;
375 u32 uninitialized_var(scratch);
376 u8 *buf;
377
378 DBG("PIO reading\n");
379
380 blksize = host->data->blksz;
381 chunk = 0;
382
383 local_irq_save(flags);
384
385 while (blksize) {
386 BUG_ON(!sg_miter_next(&host->sg_miter));
387
388 len = min(host->sg_miter.length, blksize);
389
390 blksize -= len;
391 host->sg_miter.consumed = len;
392
393 buf = host->sg_miter.addr;
394
395 while (len) {
396 if (chunk == 0) {
397 scratch = sdhci_readl(host, SDHCI_BUFFER);
398 chunk = 4;
399 }
400
401 *buf = scratch & 0xFF;
402
403 buf++;
404 scratch >>= 8;
405 chunk--;
406 len--;
407 }
408 }
409
410 sg_miter_stop(&host->sg_miter);
411
412 local_irq_restore(flags);
413}
414
415static void sdhci_write_block_pio(struct sdhci_host *host)
416{
417 unsigned long flags;
418 size_t blksize, len, chunk;
419 u32 scratch;
420 u8 *buf;
421
422 DBG("PIO writing\n");
423
424 blksize = host->data->blksz;
425 chunk = 0;
426 scratch = 0;
427
428 local_irq_save(flags);
429
430 while (blksize) {
431 BUG_ON(!sg_miter_next(&host->sg_miter));
432
433 len = min(host->sg_miter.length, blksize);
434
435 blksize -= len;
436 host->sg_miter.consumed = len;
437
438 buf = host->sg_miter.addr;
439
440 while (len) {
441 scratch |= (u32)*buf << (chunk * 8);
442
443 buf++;
444 chunk++;
445 len--;
446
447 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
448 sdhci_writel(host, scratch, SDHCI_BUFFER);
449 chunk = 0;
450 scratch = 0;
451 }
452 }
453 }
454
455 sg_miter_stop(&host->sg_miter);
456
457 local_irq_restore(flags);
458}
459
460static void sdhci_transfer_pio(struct sdhci_host *host)
461{
462 u32 mask;
463
464 if (host->blocks == 0)
465 return;
466
467 if (host->data->flags & MMC_DATA_READ)
468 mask = SDHCI_DATA_AVAILABLE;
469 else
470 mask = SDHCI_SPACE_AVAILABLE;
471
472 /*
473 * Some controllers (JMicron JMB38x) mess up the buffer bits
474 * for transfers < 4 bytes. As long as it is just one block,
475 * we can ignore the bits.
476 */
477 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
478 (host->data->blocks == 1))
479 mask = ~0;
480
481 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
482 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
483 udelay(100);
484
485 if (host->data->flags & MMC_DATA_READ)
486 sdhci_read_block_pio(host);
487 else
488 sdhci_write_block_pio(host);
489
490 host->blocks--;
491 if (host->blocks == 0)
492 break;
493 }
494
495 DBG("PIO transfer complete.\n");
496}
497
498static int sdhci_pre_dma_transfer(struct sdhci_host *host,
499 struct mmc_data *data, int cookie)
500{
501 int sg_count;
502
503 /*
504 * If the data buffers are already mapped, return the previous
505 * dma_map_sg() result.
506 */
507 if (data->host_cookie == COOKIE_PRE_MAPPED)
508 return data->sg_count;
509
510 /* Bounce write requests to the bounce buffer */
511 if (host->bounce_buffer) {
512 unsigned int length = data->blksz * data->blocks;
513
514 if (length > host->bounce_buffer_size) {
515 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
516 mmc_hostname(host->mmc), length,
517 host->bounce_buffer_size);
518 return -EIO;
519 }
520 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
521 /* Copy the data to the bounce buffer */
522 sg_copy_to_buffer(data->sg, data->sg_len,
523 host->bounce_buffer,
524 length);
525 }
526 /* Switch ownership to the DMA */
527 dma_sync_single_for_device(host->mmc->parent,
528 host->bounce_addr,
529 host->bounce_buffer_size,
530 mmc_get_dma_dir(data));
531 /* Just a dummy value */
532 sg_count = 1;
533 } else {
534 /* Just access the data directly from memory */
535 sg_count = dma_map_sg(mmc_dev(host->mmc),
536 data->sg, data->sg_len,
537 mmc_get_dma_dir(data));
538 }
539
540 if (sg_count == 0)
541 return -ENOSPC;
542
543 data->sg_count = sg_count;
544 data->host_cookie = cookie;
545
546 return sg_count;
547}
548
549static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
550{
551 local_irq_save(*flags);
552 return kmap_atomic(sg_page(sg)) + sg->offset;
553}
554
555static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
556{
557 kunmap_atomic(buffer);
558 local_irq_restore(*flags);
559}
560
561static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
562 dma_addr_t addr, int len, unsigned cmd)
563{
564 struct sdhci_adma2_64_desc *dma_desc = desc;
565
566 /* 32-bit and 64-bit descriptors have these members in same position */
567 dma_desc->cmd = cpu_to_le16(cmd);
568 dma_desc->len = cpu_to_le16(len);
569 dma_desc->addr_lo = cpu_to_le32((u32)addr);
570
571 if (host->flags & SDHCI_USE_64_BIT_DMA)
572 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
573}
574
575static void sdhci_adma_mark_end(void *desc)
576{
577 struct sdhci_adma2_64_desc *dma_desc = desc;
578
579 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
580 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
581}
582
583static void sdhci_adma_table_pre(struct sdhci_host *host,
584 struct mmc_data *data, int sg_count)
585{
586 struct scatterlist *sg;
587 unsigned long flags;
588 dma_addr_t addr, align_addr;
589 void *desc, *align;
590 char *buffer;
591 int len, offset, i;
592
593 /*
594 * The spec does not specify endianness of descriptor table.
595 * We currently guess that it is LE.
596 */
597
598 host->sg_count = sg_count;
599
600 desc = host->adma_table;
601 align = host->align_buffer;
602
603 align_addr = host->align_addr;
604
605 for_each_sg(data->sg, sg, host->sg_count, i) {
606 addr = sg_dma_address(sg);
607 len = sg_dma_len(sg);
608
609 /*
610 * The SDHCI specification states that ADMA addresses must
611 * be 32-bit aligned. If they aren't, then we use a bounce
612 * buffer for the (up to three) bytes that screw up the
613 * alignment.
614 */
615 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
616 SDHCI_ADMA2_MASK;
617 if (offset) {
618 if (data->flags & MMC_DATA_WRITE) {
619 buffer = sdhci_kmap_atomic(sg, &flags);
620 memcpy(align, buffer, offset);
621 sdhci_kunmap_atomic(buffer, &flags);
622 }
623
624 /* tran, valid */
625 sdhci_adma_write_desc(host, desc, align_addr, offset,
626 ADMA2_TRAN_VALID);
627
628 BUG_ON(offset > 65536);
629
630 align += SDHCI_ADMA2_ALIGN;
631 align_addr += SDHCI_ADMA2_ALIGN;
632
633 desc += host->desc_sz;
634
635 addr += offset;
636 len -= offset;
637 }
638
639 BUG_ON(len > 65536);
640
641 if (len) {
642 /* tran, valid */
643 sdhci_adma_write_desc(host, desc, addr, len,
644 ADMA2_TRAN_VALID);
645 desc += host->desc_sz;
646 }
647
648 /*
649 * If this triggers then we have a calculation bug
650 * somewhere. :/
651 */
652 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
653 }
654
655 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
656 /* Mark the last descriptor as the terminating descriptor */
657 if (desc != host->adma_table) {
658 desc -= host->desc_sz;
659 sdhci_adma_mark_end(desc);
660 }
661 } else {
662 /* Add a terminating entry - nop, end, valid */
663 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
664 }
665}
666
667static void sdhci_adma_table_post(struct sdhci_host *host,
668 struct mmc_data *data)
669{
670 struct scatterlist *sg;
671 int i, size;
672 void *align;
673 char *buffer;
674 unsigned long flags;
675
676 if (data->flags & MMC_DATA_READ) {
677 bool has_unaligned = false;
678
679 /* Do a quick scan of the SG list for any unaligned mappings */
680 for_each_sg(data->sg, sg, host->sg_count, i)
681 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
682 has_unaligned = true;
683 break;
684 }
685
686 if (has_unaligned) {
687 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
688 data->sg_len, DMA_FROM_DEVICE);
689
690 align = host->align_buffer;
691
692 for_each_sg(data->sg, sg, host->sg_count, i) {
693 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
694 size = SDHCI_ADMA2_ALIGN -
695 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
696
697 buffer = sdhci_kmap_atomic(sg, &flags);
698 memcpy(buffer, align, size);
699 sdhci_kunmap_atomic(buffer, &flags);
700
701 align += SDHCI_ADMA2_ALIGN;
702 }
703 }
704 }
705 }
706}
707
708static u32 sdhci_sdma_address(struct sdhci_host *host)
709{
710 if (host->bounce_buffer)
711 return host->bounce_addr;
712 else
713 return sg_dma_address(host->data->sg);
714}
715
716static unsigned int sdhci_target_timeout(struct sdhci_host *host,
717 struct mmc_command *cmd,
718 struct mmc_data *data)
719{
720 unsigned int target_timeout;
721
722 /* timeout in us */
723 if (!data) {
724 target_timeout = cmd->busy_timeout * 1000;
725 } else {
726 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
727 if (host->clock && data->timeout_clks) {
728 unsigned long long val;
729
730 /*
731 * data->timeout_clks is in units of clock cycles.
732 * host->clock is in Hz. target_timeout is in us.
733 * Hence, us = 1000000 * cycles / Hz. Round up.
734 */
735 val = 1000000ULL * data->timeout_clks;
736 if (do_div(val, host->clock))
737 target_timeout++;
738 target_timeout += val;
739 }
740 }
741
742 return target_timeout;
743}
744
745static void sdhci_calc_sw_timeout(struct sdhci_host *host,
746 struct mmc_command *cmd)
747{
748 struct mmc_data *data = cmd->data;
749 struct mmc_host *mmc = host->mmc;
750 struct mmc_ios *ios = &mmc->ios;
751 unsigned char bus_width = 1 << ios->bus_width;
752 unsigned int blksz;
753 unsigned int freq;
754 u64 target_timeout;
755 u64 transfer_time;
756
757 target_timeout = sdhci_target_timeout(host, cmd, data);
758 target_timeout *= NSEC_PER_USEC;
759
760 if (data) {
761 blksz = data->blksz;
762 freq = host->mmc->actual_clock ? : host->clock;
763 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
764 do_div(transfer_time, freq);
765 /* multiply by '2' to account for any unknowns */
766 transfer_time = transfer_time * 2;
767 /* calculate timeout for the entire data */
768 host->data_timeout = data->blocks * target_timeout +
769 transfer_time;
770 } else {
771 host->data_timeout = target_timeout;
772 }
773
774 if (host->data_timeout)
775 host->data_timeout += MMC_CMD_TRANSFER_TIME;
776}
777
778static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
779 bool *too_big)
780{
781 u8 count;
782 struct mmc_data *data = cmd->data;
783 unsigned target_timeout, current_timeout;
784
785 *too_big = true;
786
787 /*
788 * If the host controller provides us with an incorrect timeout
789 * value, just skip the check and use 0xE. The hardware may take
790 * longer to time out, but that's much better than having a too-short
791 * timeout value.
792 */
793 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
794 return 0xE;
795
796 /* Unspecified timeout, assume max */
797 if (!data && !cmd->busy_timeout)
798 return 0xE;
799
800 /* timeout in us */
801 target_timeout = sdhci_target_timeout(host, cmd, data);
802
803 /*
804 * Figure out needed cycles.
805 * We do this in steps in order to fit inside a 32 bit int.
806 * The first step is the minimum timeout, which will have a
807 * minimum resolution of 6 bits:
808 * (1) 2^13*1000 > 2^22,
809 * (2) host->timeout_clk < 2^16
810 * =>
811 * (1) / (2) > 2^6
812 */
813 count = 0;
814 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
815 while (current_timeout < target_timeout) {
816 count++;
817 current_timeout <<= 1;
818 if (count >= 0xF)
819 break;
820 }
821
822 if (count >= 0xF) {
823 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
824 DBG("Too large timeout 0x%x requested for CMD%d!\n",
825 count, cmd->opcode);
826 count = 0xE;
827 } else {
828 *too_big = false;
829 }
830
831 return count;
832}
833
834static void sdhci_set_transfer_irqs(struct sdhci_host *host)
835{
836 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
837 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
838
839 if (host->flags & SDHCI_REQ_USE_DMA)
840 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
841 else
842 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
843
844 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
845 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
846}
847
848static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
849{
850 if (enable)
851 host->ier |= SDHCI_INT_DATA_TIMEOUT;
852 else
853 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
854 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
855 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
856}
857
858static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
859{
860 u8 count;
861
862 if (host->ops->set_timeout) {
863 host->ops->set_timeout(host, cmd);
864 } else {
865 bool too_big = false;
866
867 count = sdhci_calc_timeout(host, cmd, &too_big);
868
869 if (too_big &&
870 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
871 sdhci_calc_sw_timeout(host, cmd);
872 sdhci_set_data_timeout_irq(host, false);
873 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
874 sdhci_set_data_timeout_irq(host, true);
875 }
876
877 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
878 }
879}
880
881static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
882{
883 u8 ctrl;
884 struct mmc_data *data = cmd->data;
885
886 host->data_timeout = 0;
887
888 if (sdhci_data_line_cmd(cmd))
889 sdhci_set_timeout(host, cmd);
890
891 if (!data)
892 return;
893
894 WARN_ON(host->data);
895
896 /* Sanity checks */
897 BUG_ON(data->blksz * data->blocks > 524288);
898 BUG_ON(data->blksz > host->mmc->max_blk_size);
899 BUG_ON(data->blocks > 65535);
900
901 host->data = data;
902 host->data_early = 0;
903 host->data->bytes_xfered = 0;
904
905 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
906 struct scatterlist *sg;
907 unsigned int length_mask, offset_mask;
908 int i;
909
910 host->flags |= SDHCI_REQ_USE_DMA;
911
912 /*
913 * FIXME: This doesn't account for merging when mapping the
914 * scatterlist.
915 *
916 * The assumption here being that alignment and lengths are
917 * the same after DMA mapping to device address space.
918 */
919 length_mask = 0;
920 offset_mask = 0;
921 if (host->flags & SDHCI_USE_ADMA) {
922 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
923 length_mask = 3;
924 /*
925 * As we use up to 3 byte chunks to work
926 * around alignment problems, we need to
927 * check the offset as well.
928 */
929 offset_mask = 3;
930 }
931 } else {
932 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
933 length_mask = 3;
934 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
935 offset_mask = 3;
936 }
937
938 if (unlikely(length_mask | offset_mask)) {
939 for_each_sg(data->sg, sg, data->sg_len, i) {
940 if (sg->length & length_mask) {
941 DBG("Reverting to PIO because of transfer size (%d)\n",
942 sg->length);
943 host->flags &= ~SDHCI_REQ_USE_DMA;
944 break;
945 }
946 if (sg->offset & offset_mask) {
947 DBG("Reverting to PIO because of bad alignment\n");
948 host->flags &= ~SDHCI_REQ_USE_DMA;
949 break;
950 }
951 }
952 }
953 }
954
955 if (host->flags & SDHCI_REQ_USE_DMA) {
956 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
957
958 if (sg_cnt <= 0) {
959 /*
960 * This only happens when someone fed
961 * us an invalid request.
962 */
963 WARN_ON(1);
964 host->flags &= ~SDHCI_REQ_USE_DMA;
965 } else if (host->flags & SDHCI_USE_ADMA) {
966 sdhci_adma_table_pre(host, data, sg_cnt);
967
968 sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
969 if (host->flags & SDHCI_USE_64_BIT_DMA)
970 sdhci_writel(host,
971 (u64)host->adma_addr >> 32,
972 SDHCI_ADMA_ADDRESS_HI);
973 } else {
974 WARN_ON(sg_cnt != 1);
975 sdhci_writel(host, sdhci_sdma_address(host),
976 SDHCI_DMA_ADDRESS);
977 }
978 }
979
980 /*
981 * Always adjust the DMA selection as some controllers
982 * (e.g. JMicron) can't do PIO properly when the selection
983 * is ADMA.
984 */
985 if (host->version >= SDHCI_SPEC_200) {
986 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
987 ctrl &= ~SDHCI_CTRL_DMA_MASK;
988 if ((host->flags & SDHCI_REQ_USE_DMA) &&
989 (host->flags & SDHCI_USE_ADMA)) {
990 if (host->flags & SDHCI_USE_64_BIT_DMA)
991 ctrl |= SDHCI_CTRL_ADMA64;
992 else
993 ctrl |= SDHCI_CTRL_ADMA32;
994 } else {
995 ctrl |= SDHCI_CTRL_SDMA;
996 }
997 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
998 }
999
1000 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1001 int flags;
1002
1003 flags = SG_MITER_ATOMIC;
1004 if (host->data->flags & MMC_DATA_READ)
1005 flags |= SG_MITER_TO_SG;
1006 else
1007 flags |= SG_MITER_FROM_SG;
1008 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1009 host->blocks = data->blocks;
1010 }
1011
1012 sdhci_set_transfer_irqs(host);
1013
1014 /* Set the DMA boundary value and block size */
1015 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1016 SDHCI_BLOCK_SIZE);
1017 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1018}
1019
1020static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1021 struct mmc_request *mrq)
1022{
1023 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1024 !mrq->cap_cmd_during_tfr;
1025}
1026
1027static void sdhci_set_transfer_mode(struct sdhci_host *host,
1028 struct mmc_command *cmd)
1029{
1030 u16 mode = 0;
1031 struct mmc_data *data = cmd->data;
1032
1033 if (data == NULL) {
1034 if (host->quirks2 &
1035 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1036 /* must not clear SDHCI_TRANSFER_MODE when tuning */
1037 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1038 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1039 } else {
1040 /* clear Auto CMD settings for no data CMDs */
1041 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1042 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1043 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1044 }
1045 return;
1046 }
1047
1048 WARN_ON(!host->data);
1049
1050 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1051 mode = SDHCI_TRNS_BLK_CNT_EN;
1052
1053 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1054 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1055 /*
1056 * If we are sending CMD23, CMD12 never gets sent
1057 * on successful completion (so no Auto-CMD12).
1058 */
1059 if (sdhci_auto_cmd12(host, cmd->mrq) &&
1060 (cmd->opcode != SD_IO_RW_EXTENDED))
1061 mode |= SDHCI_TRNS_AUTO_CMD12;
1062 else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
1063 mode |= SDHCI_TRNS_AUTO_CMD23;
1064 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1065 }
1066 }
1067
1068 if (data->flags & MMC_DATA_READ)
1069 mode |= SDHCI_TRNS_READ;
1070 if (host->flags & SDHCI_REQ_USE_DMA)
1071 mode |= SDHCI_TRNS_DMA;
1072
1073 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1074}
1075
1076static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1077{
1078 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1079 ((mrq->cmd && mrq->cmd->error) ||
1080 (mrq->sbc && mrq->sbc->error) ||
1081 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
1082 (mrq->data->stop && mrq->data->stop->error))) ||
1083 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1084}
1085
1086static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1087{
1088 int i;
1089
1090 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1091 if (host->mrqs_done[i] == mrq) {
1092 WARN_ON(1);
1093 return;
1094 }
1095 }
1096
1097 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1098 if (!host->mrqs_done[i]) {
1099 host->mrqs_done[i] = mrq;
1100 break;
1101 }
1102 }
1103
1104 WARN_ON(i >= SDHCI_MAX_MRQS);
1105
1106 tasklet_schedule(&host->finish_tasklet);
1107}
1108
1109static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1110{
1111 if (host->cmd && host->cmd->mrq == mrq)
1112 host->cmd = NULL;
1113
1114 if (host->data_cmd && host->data_cmd->mrq == mrq)
1115 host->data_cmd = NULL;
1116
1117 if (host->data && host->data->mrq == mrq)
1118 host->data = NULL;
1119
1120 if (sdhci_needs_reset(host, mrq))
1121 host->pending_reset = true;
1122
1123 __sdhci_finish_mrq(host, mrq);
1124}
1125
1126static void sdhci_finish_data(struct sdhci_host *host)
1127{
1128 struct mmc_command *data_cmd = host->data_cmd;
1129 struct mmc_data *data = host->data;
1130
1131 host->data = NULL;
1132 host->data_cmd = NULL;
1133
1134 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1135 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1136 sdhci_adma_table_post(host, data);
1137
1138 /*
1139 * The specification states that the block count register must
1140 * be updated, but it does not specify at what point in the
1141 * data flow. That makes the register entirely useless to read
1142 * back so we have to assume that nothing made it to the card
1143 * in the event of an error.
1144 */
1145 if (data->error)
1146 data->bytes_xfered = 0;
1147 else
1148 data->bytes_xfered = data->blksz * data->blocks;
1149
1150 /*
1151 * Need to send CMD12 if -
1152 * a) open-ended multiblock transfer (no CMD23)
1153 * b) error in multiblock transfer
1154 */
1155 if (data->stop &&
1156 (data->error ||
1157 !data->mrq->sbc)) {
1158
1159 /*
1160 * The controller needs a reset of internal state machines
1161 * upon error conditions.
1162 */
1163 if (data->error) {
1164 if (!host->cmd || host->cmd == data_cmd)
1165 sdhci_do_reset(host, SDHCI_RESET_CMD);
1166 sdhci_do_reset(host, SDHCI_RESET_DATA);
1167 }
1168
1169 /*
1170 * 'cap_cmd_during_tfr' request must not use the command line
1171 * after mmc_command_done() has been called. It is upper layer's
1172 * responsibility to send the stop command if required.
1173 */
1174 if (data->mrq->cap_cmd_during_tfr) {
1175 sdhci_finish_mrq(host, data->mrq);
1176 } else {
1177 /* Avoid triggering warning in sdhci_send_command() */
1178 host->cmd = NULL;
1179 sdhci_send_command(host, data->stop);
1180 }
1181 } else {
1182 sdhci_finish_mrq(host, data->mrq);
1183 }
1184}
1185
1186static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1187 unsigned long timeout)
1188{
1189 if (sdhci_data_line_cmd(mrq->cmd))
1190 mod_timer(&host->data_timer, timeout);
1191 else
1192 mod_timer(&host->timer, timeout);
1193}
1194
1195static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1196{
1197 if (sdhci_data_line_cmd(mrq->cmd))
1198 del_timer(&host->data_timer);
1199 else
1200 del_timer(&host->timer);
1201}
1202
1203void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1204{
1205 int flags;
1206 u32 mask;
1207 unsigned long timeout;
1208
1209 WARN_ON(host->cmd);
1210
1211 /* Initially, a command has no error */
1212 cmd->error = 0;
1213
1214 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1215 cmd->opcode == MMC_STOP_TRANSMISSION)
1216 cmd->flags |= MMC_RSP_BUSY;
1217
1218 /* Wait max 10 ms */
1219 timeout = 10;
1220
1221 mask = SDHCI_CMD_INHIBIT;
1222 if (sdhci_data_line_cmd(cmd))
1223 mask |= SDHCI_DATA_INHIBIT;
1224
1225 /* We shouldn't wait for data inihibit for stop commands, even
1226 though they might use busy signaling */
1227 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1228 mask &= ~SDHCI_DATA_INHIBIT;
1229
1230 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1231 if (timeout == 0) {
1232 pr_err("%s: Controller never released inhibit bit(s).\n",
1233 mmc_hostname(host->mmc));
1234 sdhci_dumpregs(host);
1235 cmd->error = -EIO;
1236 sdhci_finish_mrq(host, cmd->mrq);
1237 return;
1238 }
1239 timeout--;
1240 mdelay(1);
1241 }
1242
1243 host->cmd = cmd;
1244 if (sdhci_data_line_cmd(cmd)) {
1245 WARN_ON(host->data_cmd);
1246 host->data_cmd = cmd;
1247 }
1248
1249 sdhci_prepare_data(host, cmd);
1250
1251 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1252
1253 sdhci_set_transfer_mode(host, cmd);
1254
1255 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1256 pr_err("%s: Unsupported response type!\n",
1257 mmc_hostname(host->mmc));
1258 cmd->error = -EINVAL;
1259 sdhci_finish_mrq(host, cmd->mrq);
1260 return;
1261 }
1262
1263 if (!(cmd->flags & MMC_RSP_PRESENT))
1264 flags = SDHCI_CMD_RESP_NONE;
1265 else if (cmd->flags & MMC_RSP_136)
1266 flags = SDHCI_CMD_RESP_LONG;
1267 else if (cmd->flags & MMC_RSP_BUSY)
1268 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1269 else
1270 flags = SDHCI_CMD_RESP_SHORT;
1271
1272 if (cmd->flags & MMC_RSP_CRC)
1273 flags |= SDHCI_CMD_CRC;
1274 if (cmd->flags & MMC_RSP_OPCODE)
1275 flags |= SDHCI_CMD_INDEX;
1276
1277 /* CMD19 is special in that the Data Present Select should be set */
1278 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1279 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1280 flags |= SDHCI_CMD_DATA;
1281
1282 timeout = jiffies;
1283 if (host->data_timeout)
1284 timeout += nsecs_to_jiffies(host->data_timeout);
1285 else if (!cmd->data && cmd->busy_timeout > 9000)
1286 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1287 else
1288 timeout += 10 * HZ;
1289 sdhci_mod_timer(host, cmd->mrq, timeout);
1290
1291 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1292}
1293EXPORT_SYMBOL_GPL(sdhci_send_command);
1294
1295static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1296{
1297 int i, reg;
1298
1299 for (i = 0; i < 4; i++) {
1300 reg = SDHCI_RESPONSE + (3 - i) * 4;
1301 cmd->resp[i] = sdhci_readl(host, reg);
1302 }
1303
1304 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1305 return;
1306
1307 /* CRC is stripped so we need to do some shifting */
1308 for (i = 0; i < 4; i++) {
1309 cmd->resp[i] <<= 8;
1310 if (i != 3)
1311 cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1312 }
1313}
1314
1315static void sdhci_finish_command(struct sdhci_host *host)
1316{
1317 struct mmc_command *cmd = host->cmd;
1318
1319 host->cmd = NULL;
1320
1321 if (cmd->flags & MMC_RSP_PRESENT) {
1322 if (cmd->flags & MMC_RSP_136) {
1323 sdhci_read_rsp_136(host, cmd);
1324 } else {
1325 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1326 }
1327 }
1328
1329 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1330 mmc_command_done(host->mmc, cmd->mrq);
1331
1332 /*
1333 * The host can send and interrupt when the busy state has
1334 * ended, allowing us to wait without wasting CPU cycles.
1335 * The busy signal uses DAT0 so this is similar to waiting
1336 * for data to complete.
1337 *
1338 * Note: The 1.0 specification is a bit ambiguous about this
1339 * feature so there might be some problems with older
1340 * controllers.
1341 */
1342 if (cmd->flags & MMC_RSP_BUSY) {
1343 if (cmd->data) {
1344 DBG("Cannot wait for busy signal when also doing a data transfer");
1345 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1346 cmd == host->data_cmd) {
1347 /* Command complete before busy is ended */
1348 return;
1349 }
1350 }
1351
1352 /* Finished CMD23, now send actual command. */
1353 if (cmd == cmd->mrq->sbc) {
1354 sdhci_send_command(host, cmd->mrq->cmd);
1355 } else {
1356
1357 /* Processed actual command. */
1358 if (host->data && host->data_early)
1359 sdhci_finish_data(host);
1360
1361 if (!cmd->data)
1362 sdhci_finish_mrq(host, cmd->mrq);
1363 }
1364}
1365
1366static u16 sdhci_get_preset_value(struct sdhci_host *host)
1367{
1368 u16 preset = 0;
1369
1370 switch (host->timing) {
1371 case MMC_TIMING_UHS_SDR12:
1372 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1373 break;
1374 case MMC_TIMING_UHS_SDR25:
1375 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1376 break;
1377 case MMC_TIMING_UHS_SDR50:
1378 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1379 break;
1380 case MMC_TIMING_UHS_SDR104:
1381 case MMC_TIMING_MMC_HS200:
1382 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1383 break;
1384 case MMC_TIMING_UHS_DDR50:
1385 case MMC_TIMING_MMC_DDR52:
1386 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1387 break;
1388 case MMC_TIMING_MMC_HS400:
1389 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1390 break;
1391 default:
1392 pr_warn("%s: Invalid UHS-I mode selected\n",
1393 mmc_hostname(host->mmc));
1394 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1395 break;
1396 }
1397 return preset;
1398}
1399
1400u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1401 unsigned int *actual_clock)
1402{
1403 int div = 0; /* Initialized for compiler warning */
1404 int real_div = div, clk_mul = 1;
1405 u16 clk = 0;
1406 bool switch_base_clk = false;
1407
1408 if (host->version >= SDHCI_SPEC_300) {
1409 if (host->preset_enabled) {
1410 u16 pre_val;
1411
1412 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1413 pre_val = sdhci_get_preset_value(host);
1414 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1415 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1416 if (host->clk_mul &&
1417 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1418 clk = SDHCI_PROG_CLOCK_MODE;
1419 real_div = div + 1;
1420 clk_mul = host->clk_mul;
1421 } else {
1422 real_div = max_t(int, 1, div << 1);
1423 }
1424 goto clock_set;
1425 }
1426
1427 /*
1428 * Check if the Host Controller supports Programmable Clock
1429 * Mode.
1430 */
1431 if (host->clk_mul) {
1432 for (div = 1; div <= 1024; div++) {
1433 if ((host->max_clk * host->clk_mul / div)
1434 <= clock)
1435 break;
1436 }
1437 if ((host->max_clk * host->clk_mul / div) <= clock) {
1438 /*
1439 * Set Programmable Clock Mode in the Clock
1440 * Control register.
1441 */
1442 clk = SDHCI_PROG_CLOCK_MODE;
1443 real_div = div;
1444 clk_mul = host->clk_mul;
1445 div--;
1446 } else {
1447 /*
1448 * Divisor can be too small to reach clock
1449 * speed requirement. Then use the base clock.
1450 */
1451 switch_base_clk = true;
1452 }
1453 }
1454
1455 if (!host->clk_mul || switch_base_clk) {
1456 /* Version 3.00 divisors must be a multiple of 2. */
1457 if (host->max_clk <= clock)
1458 div = 1;
1459 else {
1460 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1461 div += 2) {
1462 if ((host->max_clk / div) <= clock)
1463 break;
1464 }
1465 }
1466 real_div = div;
1467 div >>= 1;
1468 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1469 && !div && host->max_clk <= 25000000)
1470 div = 1;
1471 }
1472 } else {
1473 /* Version 2.00 divisors must be a power of 2. */
1474 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1475 if ((host->max_clk / div) <= clock)
1476 break;
1477 }
1478 real_div = div;
1479 div >>= 1;
1480 }
1481
1482clock_set:
1483 if (real_div)
1484 *actual_clock = (host->max_clk * clk_mul) / real_div;
1485 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1486 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1487 << SDHCI_DIVIDER_HI_SHIFT;
1488
1489 return clk;
1490}
1491EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1492
1493void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1494{
1495 ktime_t timeout;
1496
1497 clk |= SDHCI_CLOCK_INT_EN;
1498 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1499
1500 /* Wait max 20 ms */
1501 timeout = ktime_add_ms(ktime_get(), 20);
1502 while (1) {
1503 bool timedout = ktime_after(ktime_get(), timeout);
1504
1505 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1506 if (clk & SDHCI_CLOCK_INT_STABLE)
1507 break;
1508 if (timedout) {
1509 pr_err("%s: Internal clock never stabilised.\n",
1510 mmc_hostname(host->mmc));
1511 sdhci_dumpregs(host);
1512 return;
1513 }
1514 udelay(10);
1515 }
1516
1517 clk |= SDHCI_CLOCK_CARD_EN;
1518 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1519}
1520EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1521
1522void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1523{
1524 u16 clk;
1525
1526 host->mmc->actual_clock = 0;
1527
1528 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1529
1530 if (clock == 0)
1531 return;
1532
1533 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1534 sdhci_enable_clk(host, clk);
1535}
1536EXPORT_SYMBOL_GPL(sdhci_set_clock);
1537
1538static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1539 unsigned short vdd)
1540{
1541 struct mmc_host *mmc = host->mmc;
1542
1543 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1544
1545 if (mode != MMC_POWER_OFF)
1546 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1547 else
1548 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1549}
1550
1551void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1552 unsigned short vdd)
1553{
1554 u8 pwr = 0;
1555
1556 if (mode != MMC_POWER_OFF) {
1557 switch (1 << vdd) {
1558 case MMC_VDD_165_195:
1559 /*
1560 * Without a regulator, SDHCI does not support 2.0v
1561 * so we only get here if the driver deliberately
1562 * added the 2.0v range to ocr_avail. Map it to 1.8v
1563 * for the purpose of turning on the power.
1564 */
1565 case MMC_VDD_20_21:
1566 pwr = SDHCI_POWER_180;
1567 break;
1568 case MMC_VDD_29_30:
1569 case MMC_VDD_30_31:
1570 pwr = SDHCI_POWER_300;
1571 break;
1572 case MMC_VDD_32_33:
1573 case MMC_VDD_33_34:
1574 pwr = SDHCI_POWER_330;
1575 break;
1576 default:
1577 WARN(1, "%s: Invalid vdd %#x\n",
1578 mmc_hostname(host->mmc), vdd);
1579 break;
1580 }
1581 }
1582
1583 if (host->pwr == pwr)
1584 return;
1585
1586 host->pwr = pwr;
1587
1588 if (pwr == 0) {
1589 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1590 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1591 sdhci_runtime_pm_bus_off(host);
1592 } else {
1593 /*
1594 * Spec says that we should clear the power reg before setting
1595 * a new value. Some controllers don't seem to like this though.
1596 */
1597 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1598 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1599
1600 /*
1601 * At least the Marvell CaFe chip gets confused if we set the
1602 * voltage and set turn on power at the same time, so set the
1603 * voltage first.
1604 */
1605 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1606 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1607
1608 pwr |= SDHCI_POWER_ON;
1609
1610 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1611
1612 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1613 sdhci_runtime_pm_bus_on(host);
1614
1615 /*
1616 * Some controllers need an extra 10ms delay of 10ms before
1617 * they can apply clock after applying power
1618 */
1619 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1620 mdelay(10);
1621 }
1622}
1623EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1624
1625void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1626 unsigned short vdd)
1627{
1628 if (IS_ERR(host->mmc->supply.vmmc))
1629 sdhci_set_power_noreg(host, mode, vdd);
1630 else
1631 sdhci_set_power_reg(host, mode, vdd);
1632}
1633EXPORT_SYMBOL_GPL(sdhci_set_power);
1634
1635/*****************************************************************************\
1636 * *
1637 * MMC callbacks *
1638 * *
1639\*****************************************************************************/
1640
1641static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1642{
1643 struct sdhci_host *host;
1644 int present;
1645 unsigned long flags;
1646
1647 host = mmc_priv(mmc);
1648
1649 /* Firstly check card presence */
1650 present = mmc->ops->get_cd(mmc);
1651
1652 spin_lock_irqsave(&host->lock, flags);
1653
1654 sdhci_led_activate(host);
1655
1656 /*
1657 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1658 * requests if Auto-CMD12 is enabled.
1659 */
1660 if (sdhci_auto_cmd12(host, mrq)) {
1661 if (mrq->stop) {
1662 mrq->data->stop = NULL;
1663 mrq->stop = NULL;
1664 }
1665 }
1666
1667 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1668 mrq->cmd->error = -ENOMEDIUM;
1669 sdhci_finish_mrq(host, mrq);
1670 } else {
1671 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1672 sdhci_send_command(host, mrq->sbc);
1673 else
1674 sdhci_send_command(host, mrq->cmd);
1675 }
1676
1677 mmiowb();
1678 spin_unlock_irqrestore(&host->lock, flags);
1679}
1680
1681void sdhci_set_bus_width(struct sdhci_host *host, int width)
1682{
1683 u8 ctrl;
1684
1685 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1686 if (width == MMC_BUS_WIDTH_8) {
1687 ctrl &= ~SDHCI_CTRL_4BITBUS;
1688 ctrl |= SDHCI_CTRL_8BITBUS;
1689 } else {
1690 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
1691 ctrl &= ~SDHCI_CTRL_8BITBUS;
1692 if (width == MMC_BUS_WIDTH_4)
1693 ctrl |= SDHCI_CTRL_4BITBUS;
1694 else
1695 ctrl &= ~SDHCI_CTRL_4BITBUS;
1696 }
1697 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1698}
1699EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1700
1701void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1702{
1703 u16 ctrl_2;
1704
1705 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1706 /* Select Bus Speed Mode for host */
1707 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1708 if ((timing == MMC_TIMING_MMC_HS200) ||
1709 (timing == MMC_TIMING_UHS_SDR104))
1710 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1711 else if (timing == MMC_TIMING_UHS_SDR12)
1712 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1713 else if (timing == MMC_TIMING_UHS_SDR25)
1714 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1715 else if (timing == MMC_TIMING_UHS_SDR50)
1716 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1717 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1718 (timing == MMC_TIMING_MMC_DDR52))
1719 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1720 else if (timing == MMC_TIMING_MMC_HS400)
1721 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1722 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1723}
1724EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1725
1726void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1727{
1728 struct sdhci_host *host = mmc_priv(mmc);
1729 u8 ctrl;
1730
1731 if (ios->power_mode == MMC_POWER_UNDEFINED)
1732 return;
1733
1734 if (host->flags & SDHCI_DEVICE_DEAD) {
1735 if (!IS_ERR(mmc->supply.vmmc) &&
1736 ios->power_mode == MMC_POWER_OFF)
1737 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1738 return;
1739 }
1740
1741 /*
1742 * Reset the chip on each power off.
1743 * Should clear out any weird states.
1744 */
1745 if (ios->power_mode == MMC_POWER_OFF) {
1746 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1747 sdhci_reinit(host);
1748 }
1749
1750 if (host->version >= SDHCI_SPEC_300 &&
1751 (ios->power_mode == MMC_POWER_UP) &&
1752 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1753 sdhci_enable_preset_value(host, false);
1754
1755 if (!ios->clock || ios->clock != host->clock) {
1756 host->ops->set_clock(host, ios->clock);
1757 host->clock = ios->clock;
1758
1759 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1760 host->clock) {
1761 host->timeout_clk = host->mmc->actual_clock ?
1762 host->mmc->actual_clock / 1000 :
1763 host->clock / 1000;
1764 host->mmc->max_busy_timeout =
1765 host->ops->get_max_timeout_count ?
1766 host->ops->get_max_timeout_count(host) :
1767 1 << 27;
1768 host->mmc->max_busy_timeout /= host->timeout_clk;
1769 }
1770 }
1771
1772 if (host->ops->set_power)
1773 host->ops->set_power(host, ios->power_mode, ios->vdd);
1774 else
1775 sdhci_set_power(host, ios->power_mode, ios->vdd);
1776
1777 if (host->ops->platform_send_init_74_clocks)
1778 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1779
1780 host->ops->set_bus_width(host, ios->bus_width);
1781
1782 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1783
1784 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
1785 if (ios->timing == MMC_TIMING_SD_HS ||
1786 ios->timing == MMC_TIMING_MMC_HS ||
1787 ios->timing == MMC_TIMING_MMC_HS400 ||
1788 ios->timing == MMC_TIMING_MMC_HS200 ||
1789 ios->timing == MMC_TIMING_MMC_DDR52 ||
1790 ios->timing == MMC_TIMING_UHS_SDR50 ||
1791 ios->timing == MMC_TIMING_UHS_SDR104 ||
1792 ios->timing == MMC_TIMING_UHS_DDR50 ||
1793 ios->timing == MMC_TIMING_UHS_SDR25)
1794 ctrl |= SDHCI_CTRL_HISPD;
1795 else
1796 ctrl &= ~SDHCI_CTRL_HISPD;
1797 }
1798
1799 if (host->version >= SDHCI_SPEC_300) {
1800 u16 clk, ctrl_2;
1801
1802 if (!host->preset_enabled) {
1803 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1804 /*
1805 * We only need to set Driver Strength if the
1806 * preset value enable is not set.
1807 */
1808 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1809 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1810 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1811 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1812 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1813 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1814 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1815 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1816 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1817 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1818 else {
1819 pr_warn("%s: invalid driver type, default to driver type B\n",
1820 mmc_hostname(mmc));
1821 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1822 }
1823
1824 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1825 } else {
1826 /*
1827 * According to SDHC Spec v3.00, if the Preset Value
1828 * Enable in the Host Control 2 register is set, we
1829 * need to reset SD Clock Enable before changing High
1830 * Speed Enable to avoid generating clock gliches.
1831 */
1832
1833 /* Reset SD Clock Enable */
1834 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1835 clk &= ~SDHCI_CLOCK_CARD_EN;
1836 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1837
1838 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1839
1840 /* Re-enable SD Clock */
1841 host->ops->set_clock(host, host->clock);
1842 }
1843
1844 /* Reset SD Clock Enable */
1845 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1846 clk &= ~SDHCI_CLOCK_CARD_EN;
1847 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1848
1849 host->ops->set_uhs_signaling(host, ios->timing);
1850 host->timing = ios->timing;
1851
1852 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1853 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1854 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1855 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1856 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1857 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1858 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1859 u16 preset;
1860
1861 sdhci_enable_preset_value(host, true);
1862 preset = sdhci_get_preset_value(host);
1863 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1864 >> SDHCI_PRESET_DRV_SHIFT;
1865 }
1866
1867 /* Re-enable SD Clock */
1868 host->ops->set_clock(host, host->clock);
1869 } else
1870 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1871
1872 /*
1873 * Some (ENE) controllers go apeshit on some ios operation,
1874 * signalling timeout and CRC errors even on CMD0. Resetting
1875 * it on each ios seems to solve the problem.
1876 */
1877 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1878 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1879
1880 mmiowb();
1881}
1882EXPORT_SYMBOL_GPL(sdhci_set_ios);
1883
1884static int sdhci_get_cd(struct mmc_host *mmc)
1885{
1886 struct sdhci_host *host = mmc_priv(mmc);
1887 int gpio_cd = mmc_gpio_get_cd(mmc);
1888
1889 if (host->flags & SDHCI_DEVICE_DEAD)
1890 return 0;
1891
1892 /* If nonremovable, assume that the card is always present. */
1893 if (!mmc_card_is_removable(host->mmc))
1894 return 1;
1895
1896 /*
1897 * Try slot gpio detect, if defined it take precedence
1898 * over build in controller functionality
1899 */
1900 if (gpio_cd >= 0)
1901 return !!gpio_cd;
1902
1903 /* If polling, assume that the card is always present. */
1904 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1905 return 1;
1906
1907 /* Host native card detect */
1908 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1909}
1910
1911static int sdhci_check_ro(struct sdhci_host *host)
1912{
1913 unsigned long flags;
1914 int is_readonly;
1915
1916 spin_lock_irqsave(&host->lock, flags);
1917
1918 if (host->flags & SDHCI_DEVICE_DEAD)
1919 is_readonly = 0;
1920 else if (host->ops->get_ro)
1921 is_readonly = host->ops->get_ro(host);
1922 else
1923 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1924 & SDHCI_WRITE_PROTECT);
1925
1926 spin_unlock_irqrestore(&host->lock, flags);
1927
1928 /* This quirk needs to be replaced by a callback-function later */
1929 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1930 !is_readonly : is_readonly;
1931}
1932
1933#define SAMPLE_COUNT 5
1934
1935static int sdhci_get_ro(struct mmc_host *mmc)
1936{
1937 struct sdhci_host *host = mmc_priv(mmc);
1938 int i, ro_count;
1939
1940 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1941 return sdhci_check_ro(host);
1942
1943 ro_count = 0;
1944 for (i = 0; i < SAMPLE_COUNT; i++) {
1945 if (sdhci_check_ro(host)) {
1946 if (++ro_count > SAMPLE_COUNT / 2)
1947 return 1;
1948 }
1949 msleep(30);
1950 }
1951 return 0;
1952}
1953
1954static void sdhci_hw_reset(struct mmc_host *mmc)
1955{
1956 struct sdhci_host *host = mmc_priv(mmc);
1957
1958 if (host->ops && host->ops->hw_reset)
1959 host->ops->hw_reset(host);
1960}
1961
1962static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1963{
1964 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1965 if (enable)
1966 host->ier |= SDHCI_INT_CARD_INT;
1967 else
1968 host->ier &= ~SDHCI_INT_CARD_INT;
1969
1970 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1971 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1972 mmiowb();
1973 }
1974}
1975
1976void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1977{
1978 struct sdhci_host *host = mmc_priv(mmc);
1979 unsigned long flags;
1980
1981 if (enable)
1982 pm_runtime_get_noresume(host->mmc->parent);
1983
1984 spin_lock_irqsave(&host->lock, flags);
1985 if (enable)
1986 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1987 else
1988 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1989
1990 sdhci_enable_sdio_irq_nolock(host, enable);
1991 spin_unlock_irqrestore(&host->lock, flags);
1992
1993 if (!enable)
1994 pm_runtime_put_noidle(host->mmc->parent);
1995}
1996EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
1997
1998int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1999 struct mmc_ios *ios)
2000{
2001 struct sdhci_host *host = mmc_priv(mmc);
2002 u16 ctrl;
2003 int ret;
2004
2005 /*
2006 * Signal Voltage Switching is only applicable for Host Controllers
2007 * v3.00 and above.
2008 */
2009 if (host->version < SDHCI_SPEC_300)
2010 return 0;
2011
2012 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2013
2014 switch (ios->signal_voltage) {
2015 case MMC_SIGNAL_VOLTAGE_330:
2016 if (!(host->flags & SDHCI_SIGNALING_330))
2017 return -EINVAL;
2018 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2019 ctrl &= ~SDHCI_CTRL_VDD_180;
2020 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2021
2022 if (!IS_ERR(mmc->supply.vqmmc)) {
2023 ret = mmc_regulator_set_vqmmc(mmc, ios);
2024 if (ret) {
2025 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2026 mmc_hostname(mmc));
2027 return -EIO;
2028 }
2029 }
2030 /* Wait for 5ms */
2031 usleep_range(5000, 5500);
2032
2033 /* 3.3V regulator output should be stable within 5 ms */
2034 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2035 if (!(ctrl & SDHCI_CTRL_VDD_180))
2036 return 0;
2037
2038 pr_warn("%s: 3.3V regulator output did not became stable\n",
2039 mmc_hostname(mmc));
2040
2041 return -EAGAIN;
2042 case MMC_SIGNAL_VOLTAGE_180:
2043 if (!(host->flags & SDHCI_SIGNALING_180))
2044 return -EINVAL;
2045 if (!IS_ERR(mmc->supply.vqmmc)) {
2046 ret = mmc_regulator_set_vqmmc(mmc, ios);
2047 if (ret) {
2048 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2049 mmc_hostname(mmc));
2050 return -EIO;
2051 }
2052 }
2053
2054 /*
2055 * Enable 1.8V Signal Enable in the Host Control2
2056 * register
2057 */
2058 ctrl |= SDHCI_CTRL_VDD_180;
2059 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2060
2061 /* Some controller need to do more when switching */
2062 if (host->ops->voltage_switch)
2063 host->ops->voltage_switch(host);
2064
2065 /* 1.8V regulator output should be stable within 5 ms */
2066 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2067 if (ctrl & SDHCI_CTRL_VDD_180)
2068 return 0;
2069
2070 pr_warn("%s: 1.8V regulator output did not became stable\n",
2071 mmc_hostname(mmc));
2072
2073 return -EAGAIN;
2074 case MMC_SIGNAL_VOLTAGE_120:
2075 if (!(host->flags & SDHCI_SIGNALING_120))
2076 return -EINVAL;
2077 if (!IS_ERR(mmc->supply.vqmmc)) {
2078 ret = mmc_regulator_set_vqmmc(mmc, ios);
2079 if (ret) {
2080 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2081 mmc_hostname(mmc));
2082 return -EIO;
2083 }
2084 }
2085 return 0;
2086 default:
2087 /* No signal voltage switch required */
2088 return 0;
2089 }
2090}
2091EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2092
2093static int sdhci_card_busy(struct mmc_host *mmc)
2094{
2095 struct sdhci_host *host = mmc_priv(mmc);
2096 u32 present_state;
2097
2098 /* Check whether DAT[0] is 0 */
2099 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2100
2101 return !(present_state & SDHCI_DATA_0_LVL_MASK);
2102}
2103
2104static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2105{
2106 struct sdhci_host *host = mmc_priv(mmc);
2107 unsigned long flags;
2108
2109 spin_lock_irqsave(&host->lock, flags);
2110 host->flags |= SDHCI_HS400_TUNING;
2111 spin_unlock_irqrestore(&host->lock, flags);
2112
2113 return 0;
2114}
2115
2116void sdhci_start_tuning(struct sdhci_host *host)
2117{
2118 u16 ctrl;
2119
2120 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2121 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2122 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2123 ctrl |= SDHCI_CTRL_TUNED_CLK;
2124 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2125
2126 /*
2127 * As per the Host Controller spec v3.00, tuning command
2128 * generates Buffer Read Ready interrupt, so enable that.
2129 *
2130 * Note: The spec clearly says that when tuning sequence
2131 * is being performed, the controller does not generate
2132 * interrupts other than Buffer Read Ready interrupt. But
2133 * to make sure we don't hit a controller bug, we _only_
2134 * enable Buffer Read Ready interrupt here.
2135 */
2136 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2137 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2138}
2139EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2140
2141void sdhci_end_tuning(struct sdhci_host *host)
2142{
2143 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2144 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2145}
2146EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2147
2148void sdhci_reset_tuning(struct sdhci_host *host)
2149{
2150 u16 ctrl;
2151
2152 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2153 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2154 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2155 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2156}
2157EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2158
2159static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2160{
2161 sdhci_reset_tuning(host);
2162
2163 sdhci_do_reset(host, SDHCI_RESET_CMD);
2164 sdhci_do_reset(host, SDHCI_RESET_DATA);
2165
2166 sdhci_end_tuning(host);
2167
2168 mmc_abort_tuning(host->mmc, opcode);
2169}
2170
2171/*
2172 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2173 * tuning command does not have a data payload (or rather the hardware does it
2174 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2175 * interrupt setup is different to other commands and there is no timeout
2176 * interrupt so special handling is needed.
2177 */
2178void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2179{
2180 struct mmc_host *mmc = host->mmc;
2181 struct mmc_command cmd = {};
2182 struct mmc_request mrq = {};
2183 unsigned long flags;
2184 u32 b = host->sdma_boundary;
2185
2186 spin_lock_irqsave(&host->lock, flags);
2187
2188 cmd.opcode = opcode;
2189 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2190 cmd.mrq = &mrq;
2191
2192 mrq.cmd = &cmd;
2193 /*
2194 * In response to CMD19, the card sends 64 bytes of tuning
2195 * block to the Host Controller. So we set the block size
2196 * to 64 here.
2197 */
2198 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2199 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2200 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2201 else
2202 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2203
2204 /*
2205 * The tuning block is sent by the card to the host controller.
2206 * So we set the TRNS_READ bit in the Transfer Mode register.
2207 * This also takes care of setting DMA Enable and Multi Block
2208 * Select in the same register to 0.
2209 */
2210 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2211
2212 sdhci_send_command(host, &cmd);
2213
2214 host->cmd = NULL;
2215
2216 sdhci_del_timer(host, &mrq);
2217
2218 host->tuning_done = 0;
2219
2220 mmiowb();
2221 spin_unlock_irqrestore(&host->lock, flags);
2222
2223 /* Wait for Buffer Read Ready interrupt */
2224 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2225 msecs_to_jiffies(50));
2226
2227}
2228EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2229
2230static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2231{
2232 int i;
2233
2234 /*
2235 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2236 * of loops reaches 40 times.
2237 */
2238 for (i = 0; i < MAX_TUNING_LOOP; i++) {
2239 u16 ctrl;
2240
2241 sdhci_send_tuning(host, opcode);
2242
2243 if (!host->tuning_done) {
2244 pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2245 mmc_hostname(host->mmc));
2246 sdhci_abort_tuning(host, opcode);
2247 return;
2248 }
2249
2250 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2251 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2252 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2253 return; /* Success! */
2254 break;
2255 }
2256
2257 /* Spec does not require a delay between tuning cycles */
2258 if (host->tuning_delay > 0)
2259 mdelay(host->tuning_delay);
2260 }
2261
2262 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2263 mmc_hostname(host->mmc));
2264 sdhci_reset_tuning(host);
2265}
2266
2267int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2268{
2269 struct sdhci_host *host = mmc_priv(mmc);
2270 int err = 0;
2271 unsigned int tuning_count = 0;
2272 bool hs400_tuning;
2273
2274 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2275
2276 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2277 tuning_count = host->tuning_count;
2278
2279 /*
2280 * The Host Controller needs tuning in case of SDR104 and DDR50
2281 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2282 * the Capabilities register.
2283 * If the Host Controller supports the HS200 mode then the
2284 * tuning function has to be executed.
2285 */
2286 switch (host->timing) {
2287 /* HS400 tuning is done in HS200 mode */
2288 case MMC_TIMING_MMC_HS400:
2289 err = -EINVAL;
2290 goto out;
2291
2292 case MMC_TIMING_MMC_HS200:
2293 /*
2294 * Periodic re-tuning for HS400 is not expected to be needed, so
2295 * disable it here.
2296 */
2297 if (hs400_tuning)
2298 tuning_count = 0;
2299 break;
2300
2301 case MMC_TIMING_UHS_SDR104:
2302 case MMC_TIMING_UHS_DDR50:
2303 break;
2304
2305 case MMC_TIMING_UHS_SDR50:
2306 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2307 break;
2308 /* FALLTHROUGH */
2309
2310 default:
2311 goto out;
2312 }
2313
2314 if (host->ops->platform_execute_tuning) {
2315 err = host->ops->platform_execute_tuning(host, opcode);
2316 goto out;
2317 }
2318
2319 host->mmc->retune_period = tuning_count;
2320
2321 if (host->tuning_delay < 0)
2322 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2323
2324 sdhci_start_tuning(host);
2325
2326 __sdhci_execute_tuning(host, opcode);
2327
2328 sdhci_end_tuning(host);
2329out:
2330 host->flags &= ~SDHCI_HS400_TUNING;
2331
2332 return err;
2333}
2334EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2335
2336static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2337{
2338 /* Host Controller v3.00 defines preset value registers */
2339 if (host->version < SDHCI_SPEC_300)
2340 return;
2341
2342 /*
2343 * We only enable or disable Preset Value if they are not already
2344 * enabled or disabled respectively. Otherwise, we bail out.
2345 */
2346 if (host->preset_enabled != enable) {
2347 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2348
2349 if (enable)
2350 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2351 else
2352 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2353
2354 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2355
2356 if (enable)
2357 host->flags |= SDHCI_PV_ENABLED;
2358 else
2359 host->flags &= ~SDHCI_PV_ENABLED;
2360
2361 host->preset_enabled = enable;
2362 }
2363}
2364
2365static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2366 int err)
2367{
2368 struct sdhci_host *host = mmc_priv(mmc);
2369 struct mmc_data *data = mrq->data;
2370
2371 if (data->host_cookie != COOKIE_UNMAPPED)
2372 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2373 mmc_get_dma_dir(data));
2374
2375 data->host_cookie = COOKIE_UNMAPPED;
2376}
2377
2378static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2379{
2380 struct sdhci_host *host = mmc_priv(mmc);
2381
2382 mrq->data->host_cookie = COOKIE_UNMAPPED;
2383
2384 /*
2385 * No pre-mapping in the pre hook if we're using the bounce buffer,
2386 * for that we would need two bounce buffers since one buffer is
2387 * in flight when this is getting called.
2388 */
2389 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2390 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2391}
2392
2393static inline bool sdhci_has_requests(struct sdhci_host *host)
2394{
2395 return host->cmd || host->data_cmd;
2396}
2397
2398static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2399{
2400 if (host->data_cmd) {
2401 host->data_cmd->error = err;
2402 sdhci_finish_mrq(host, host->data_cmd->mrq);
2403 }
2404
2405 if (host->cmd) {
2406 host->cmd->error = err;
2407 sdhci_finish_mrq(host, host->cmd->mrq);
2408 }
2409}
2410
2411static void sdhci_card_event(struct mmc_host *mmc)
2412{
2413 struct sdhci_host *host = mmc_priv(mmc);
2414 unsigned long flags;
2415 int present;
2416
2417 /* First check if client has provided their own card event */
2418 if (host->ops->card_event)
2419 host->ops->card_event(host);
2420
2421 present = mmc->ops->get_cd(mmc);
2422
2423 spin_lock_irqsave(&host->lock, flags);
2424
2425 /* Check sdhci_has_requests() first in case we are runtime suspended */
2426 if (sdhci_has_requests(host) && !present) {
2427 pr_err("%s: Card removed during transfer!\n",
2428 mmc_hostname(host->mmc));
2429 pr_err("%s: Resetting controller.\n",
2430 mmc_hostname(host->mmc));
2431
2432 sdhci_do_reset(host, SDHCI_RESET_CMD);
2433 sdhci_do_reset(host, SDHCI_RESET_DATA);
2434
2435 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2436 }
2437
2438 spin_unlock_irqrestore(&host->lock, flags);
2439}
2440
2441static const struct mmc_host_ops sdhci_ops = {
2442 .request = sdhci_request,
2443 .post_req = sdhci_post_req,
2444 .pre_req = sdhci_pre_req,
2445 .set_ios = sdhci_set_ios,
2446 .get_cd = sdhci_get_cd,
2447 .get_ro = sdhci_get_ro,
2448 .hw_reset = sdhci_hw_reset,
2449 .enable_sdio_irq = sdhci_enable_sdio_irq,
2450 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2451 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2452 .execute_tuning = sdhci_execute_tuning,
2453 .card_event = sdhci_card_event,
2454 .card_busy = sdhci_card_busy,
2455};
2456
2457/*****************************************************************************\
2458 * *
2459 * Tasklets *
2460 * *
2461\*****************************************************************************/
2462
2463static bool sdhci_request_done(struct sdhci_host *host)
2464{
2465 unsigned long flags;
2466 struct mmc_request *mrq;
2467 int i;
2468
2469 spin_lock_irqsave(&host->lock, flags);
2470
2471 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2472 mrq = host->mrqs_done[i];
2473 if (mrq)
2474 break;
2475 }
2476
2477 if (!mrq) {
2478 spin_unlock_irqrestore(&host->lock, flags);
2479 return true;
2480 }
2481
2482 sdhci_del_timer(host, mrq);
2483
2484 /*
2485 * Always unmap the data buffers if they were mapped by
2486 * sdhci_prepare_data() whenever we finish with a request.
2487 * This avoids leaking DMA mappings on error.
2488 */
2489 if (host->flags & SDHCI_REQ_USE_DMA) {
2490 struct mmc_data *data = mrq->data;
2491
2492 if (data && data->host_cookie == COOKIE_MAPPED) {
2493 if (host->bounce_buffer) {
2494 /*
2495 * On reads, copy the bounced data into the
2496 * sglist
2497 */
2498 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
2499 unsigned int length = data->bytes_xfered;
2500
2501 if (length > host->bounce_buffer_size) {
2502 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
2503 mmc_hostname(host->mmc),
2504 host->bounce_buffer_size,
2505 data->bytes_xfered);
2506 /* Cap it down and continue */
2507 length = host->bounce_buffer_size;
2508 }
2509 dma_sync_single_for_cpu(
2510 host->mmc->parent,
2511 host->bounce_addr,
2512 host->bounce_buffer_size,
2513 DMA_FROM_DEVICE);
2514 sg_copy_from_buffer(data->sg,
2515 data->sg_len,
2516 host->bounce_buffer,
2517 length);
2518 } else {
2519 /* No copying, just switch ownership */
2520 dma_sync_single_for_cpu(
2521 host->mmc->parent,
2522 host->bounce_addr,
2523 host->bounce_buffer_size,
2524 mmc_get_dma_dir(data));
2525 }
2526 } else {
2527 /* Unmap the raw data */
2528 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
2529 data->sg_len,
2530 mmc_get_dma_dir(data));
2531 }
2532 data->host_cookie = COOKIE_UNMAPPED;
2533 }
2534 }
2535
2536 /*
2537 * The controller needs a reset of internal state machines
2538 * upon error conditions.
2539 */
2540 if (sdhci_needs_reset(host, mrq)) {
2541 /*
2542 * Do not finish until command and data lines are available for
2543 * reset. Note there can only be one other mrq, so it cannot
2544 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2545 * would both be null.
2546 */
2547 if (host->cmd || host->data_cmd) {
2548 spin_unlock_irqrestore(&host->lock, flags);
2549 return true;
2550 }
2551
2552 /* Some controllers need this kick or reset won't work here */
2553 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2554 /* This is to force an update */
2555 host->ops->set_clock(host, host->clock);
2556
2557 /* Spec says we should do both at the same time, but Ricoh
2558 controllers do not like that. */
2559 sdhci_do_reset(host, SDHCI_RESET_CMD);
2560 sdhci_do_reset(host, SDHCI_RESET_DATA);
2561
2562 host->pending_reset = false;
2563 }
2564
2565 if (!sdhci_has_requests(host))
2566 sdhci_led_deactivate(host);
2567
2568 host->mrqs_done[i] = NULL;
2569
2570 mmiowb();
2571 spin_unlock_irqrestore(&host->lock, flags);
2572
2573 mmc_request_done(host->mmc, mrq);
2574
2575 return false;
2576}
2577
2578static void sdhci_tasklet_finish(unsigned long param)
2579{
2580 struct sdhci_host *host = (struct sdhci_host *)param;
2581
2582 while (!sdhci_request_done(host))
2583 ;
2584}
2585
2586static void sdhci_timeout_timer(struct timer_list *t)
2587{
2588 struct sdhci_host *host;
2589 unsigned long flags;
2590
2591 host = from_timer(host, t, timer);
2592
2593 spin_lock_irqsave(&host->lock, flags);
2594
2595 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2596 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2597 mmc_hostname(host->mmc));
2598 sdhci_dumpregs(host);
2599
2600 host->cmd->error = -ETIMEDOUT;
2601 sdhci_finish_mrq(host, host->cmd->mrq);
2602 }
2603
2604 mmiowb();
2605 spin_unlock_irqrestore(&host->lock, flags);
2606}
2607
2608static void sdhci_timeout_data_timer(struct timer_list *t)
2609{
2610 struct sdhci_host *host;
2611 unsigned long flags;
2612
2613 host = from_timer(host, t, data_timer);
2614
2615 spin_lock_irqsave(&host->lock, flags);
2616
2617 if (host->data || host->data_cmd ||
2618 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2619 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2620 mmc_hostname(host->mmc));
2621 sdhci_dumpregs(host);
2622
2623 if (host->data) {
2624 host->data->error = -ETIMEDOUT;
2625 sdhci_finish_data(host);
2626 } else if (host->data_cmd) {
2627 host->data_cmd->error = -ETIMEDOUT;
2628 sdhci_finish_mrq(host, host->data_cmd->mrq);
2629 } else {
2630 host->cmd->error = -ETIMEDOUT;
2631 sdhci_finish_mrq(host, host->cmd->mrq);
2632 }
2633 }
2634
2635 mmiowb();
2636 spin_unlock_irqrestore(&host->lock, flags);
2637}
2638
2639/*****************************************************************************\
2640 * *
2641 * Interrupt handling *
2642 * *
2643\*****************************************************************************/
2644
2645static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
2646{
2647 if (!host->cmd) {
2648 /*
2649 * SDHCI recovers from errors by resetting the cmd and data
2650 * circuits. Until that is done, there very well might be more
2651 * interrupts, so ignore them in that case.
2652 */
2653 if (host->pending_reset)
2654 return;
2655 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2656 mmc_hostname(host->mmc), (unsigned)intmask);
2657 sdhci_dumpregs(host);
2658 return;
2659 }
2660
2661 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2662 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2663 if (intmask & SDHCI_INT_TIMEOUT)
2664 host->cmd->error = -ETIMEDOUT;
2665 else
2666 host->cmd->error = -EILSEQ;
2667
2668 /*
2669 * If this command initiates a data phase and a response
2670 * CRC error is signalled, the card can start transferring
2671 * data - the card may have received the command without
2672 * error. We must not terminate the mmc_request early.
2673 *
2674 * If the card did not receive the command or returned an
2675 * error which prevented it sending data, the data phase
2676 * will time out.
2677 */
2678 if (host->cmd->data &&
2679 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2680 SDHCI_INT_CRC) {
2681 host->cmd = NULL;
2682 return;
2683 }
2684
2685 sdhci_finish_mrq(host, host->cmd->mrq);
2686 return;
2687 }
2688
2689 if (intmask & SDHCI_INT_RESPONSE)
2690 sdhci_finish_command(host);
2691}
2692
2693static void sdhci_adma_show_error(struct sdhci_host *host)
2694{
2695 void *desc = host->adma_table;
2696
2697 sdhci_dumpregs(host);
2698
2699 while (true) {
2700 struct sdhci_adma2_64_desc *dma_desc = desc;
2701
2702 if (host->flags & SDHCI_USE_64_BIT_DMA)
2703 DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2704 desc, le32_to_cpu(dma_desc->addr_hi),
2705 le32_to_cpu(dma_desc->addr_lo),
2706 le16_to_cpu(dma_desc->len),
2707 le16_to_cpu(dma_desc->cmd));
2708 else
2709 DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2710 desc, le32_to_cpu(dma_desc->addr_lo),
2711 le16_to_cpu(dma_desc->len),
2712 le16_to_cpu(dma_desc->cmd));
2713
2714 desc += host->desc_sz;
2715
2716 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2717 break;
2718 }
2719}
2720
2721static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2722{
2723 u32 command;
2724
2725 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2726 if (intmask & SDHCI_INT_DATA_AVAIL) {
2727 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2728 if (command == MMC_SEND_TUNING_BLOCK ||
2729 command == MMC_SEND_TUNING_BLOCK_HS200) {
2730 host->tuning_done = 1;
2731 wake_up(&host->buf_ready_int);
2732 return;
2733 }
2734 }
2735
2736 if (!host->data) {
2737 struct mmc_command *data_cmd = host->data_cmd;
2738
2739 /*
2740 * The "data complete" interrupt is also used to
2741 * indicate that a busy state has ended. See comment
2742 * above in sdhci_cmd_irq().
2743 */
2744 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2745 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2746 host->data_cmd = NULL;
2747 data_cmd->error = -ETIMEDOUT;
2748 sdhci_finish_mrq(host, data_cmd->mrq);
2749 return;
2750 }
2751 if (intmask & SDHCI_INT_DATA_END) {
2752 host->data_cmd = NULL;
2753 /*
2754 * Some cards handle busy-end interrupt
2755 * before the command completed, so make
2756 * sure we do things in the proper order.
2757 */
2758 if (host->cmd == data_cmd)
2759 return;
2760
2761 sdhci_finish_mrq(host, data_cmd->mrq);
2762 return;
2763 }
2764 }
2765
2766 /*
2767 * SDHCI recovers from errors by resetting the cmd and data
2768 * circuits. Until that is done, there very well might be more
2769 * interrupts, so ignore them in that case.
2770 */
2771 if (host->pending_reset)
2772 return;
2773
2774 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2775 mmc_hostname(host->mmc), (unsigned)intmask);
2776 sdhci_dumpregs(host);
2777
2778 return;
2779 }
2780
2781 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2782 host->data->error = -ETIMEDOUT;
2783 else if (intmask & SDHCI_INT_DATA_END_BIT)
2784 host->data->error = -EILSEQ;
2785 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2786 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2787 != MMC_BUS_TEST_R)
2788 host->data->error = -EILSEQ;
2789 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2790 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2791 sdhci_adma_show_error(host);
2792 host->data->error = -EIO;
2793 if (host->ops->adma_workaround)
2794 host->ops->adma_workaround(host, intmask);
2795 }
2796
2797 if (host->data->error)
2798 sdhci_finish_data(host);
2799 else {
2800 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2801 sdhci_transfer_pio(host);
2802
2803 /*
2804 * We currently don't do anything fancy with DMA
2805 * boundaries, but as we can't disable the feature
2806 * we need to at least restart the transfer.
2807 *
2808 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2809 * should return a valid address to continue from, but as
2810 * some controllers are faulty, don't trust them.
2811 */
2812 if (intmask & SDHCI_INT_DMA_END) {
2813 u32 dmastart, dmanow;
2814
2815 dmastart = sdhci_sdma_address(host);
2816 dmanow = dmastart + host->data->bytes_xfered;
2817 /*
2818 * Force update to the next DMA block boundary.
2819 */
2820 dmanow = (dmanow &
2821 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2822 SDHCI_DEFAULT_BOUNDARY_SIZE;
2823 host->data->bytes_xfered = dmanow - dmastart;
2824 DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2825 dmastart, host->data->bytes_xfered, dmanow);
2826 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2827 }
2828
2829 if (intmask & SDHCI_INT_DATA_END) {
2830 if (host->cmd == host->data_cmd) {
2831 /*
2832 * Data managed to finish before the
2833 * command completed. Make sure we do
2834 * things in the proper order.
2835 */
2836 host->data_early = 1;
2837 } else {
2838 sdhci_finish_data(host);
2839 }
2840 }
2841 }
2842}
2843
2844static irqreturn_t sdhci_irq(int irq, void *dev_id)
2845{
2846 irqreturn_t result = IRQ_NONE;
2847 struct sdhci_host *host = dev_id;
2848 u32 intmask, mask, unexpected = 0;
2849 int max_loops = 16;
2850
2851 spin_lock(&host->lock);
2852
2853 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2854 spin_unlock(&host->lock);
2855 return IRQ_NONE;
2856 }
2857
2858 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2859 if (!intmask || intmask == 0xffffffff) {
2860 result = IRQ_NONE;
2861 goto out;
2862 }
2863
2864 do {
2865 DBG("IRQ status 0x%08x\n", intmask);
2866
2867 if (host->ops->irq) {
2868 intmask = host->ops->irq(host, intmask);
2869 if (!intmask)
2870 goto cont;
2871 }
2872
2873 /* Clear selected interrupts. */
2874 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2875 SDHCI_INT_BUS_POWER);
2876 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2877
2878 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2879 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2880 SDHCI_CARD_PRESENT;
2881
2882 /*
2883 * There is a observation on i.mx esdhc. INSERT
2884 * bit will be immediately set again when it gets
2885 * cleared, if a card is inserted. We have to mask
2886 * the irq to prevent interrupt storm which will
2887 * freeze the system. And the REMOVE gets the
2888 * same situation.
2889 *
2890 * More testing are needed here to ensure it works
2891 * for other platforms though.
2892 */
2893 host->ier &= ~(SDHCI_INT_CARD_INSERT |
2894 SDHCI_INT_CARD_REMOVE);
2895 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2896 SDHCI_INT_CARD_INSERT;
2897 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2898 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2899
2900 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2901 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2902
2903 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2904 SDHCI_INT_CARD_REMOVE);
2905 result = IRQ_WAKE_THREAD;
2906 }
2907
2908 if (intmask & SDHCI_INT_CMD_MASK)
2909 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
2910
2911 if (intmask & SDHCI_INT_DATA_MASK)
2912 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2913
2914 if (intmask & SDHCI_INT_BUS_POWER)
2915 pr_err("%s: Card is consuming too much power!\n",
2916 mmc_hostname(host->mmc));
2917
2918 if (intmask & SDHCI_INT_RETUNE)
2919 mmc_retune_needed(host->mmc);
2920
2921 if ((intmask & SDHCI_INT_CARD_INT) &&
2922 (host->ier & SDHCI_INT_CARD_INT)) {
2923 sdhci_enable_sdio_irq_nolock(host, false);
2924 host->thread_isr |= SDHCI_INT_CARD_INT;
2925 result = IRQ_WAKE_THREAD;
2926 }
2927
2928 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2929 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2930 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2931 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
2932
2933 if (intmask) {
2934 unexpected |= intmask;
2935 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2936 }
2937cont:
2938 if (result == IRQ_NONE)
2939 result = IRQ_HANDLED;
2940
2941 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2942 } while (intmask && --max_loops);
2943out:
2944 spin_unlock(&host->lock);
2945
2946 if (unexpected) {
2947 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2948 mmc_hostname(host->mmc), unexpected);
2949 sdhci_dumpregs(host);
2950 }
2951
2952 return result;
2953}
2954
2955static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2956{
2957 struct sdhci_host *host = dev_id;
2958 unsigned long flags;
2959 u32 isr;
2960
2961 spin_lock_irqsave(&host->lock, flags);
2962 isr = host->thread_isr;
2963 host->thread_isr = 0;
2964 spin_unlock_irqrestore(&host->lock, flags);
2965
2966 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2967 struct mmc_host *mmc = host->mmc;
2968
2969 mmc->ops->card_event(mmc);
2970 mmc_detect_change(mmc, msecs_to_jiffies(200));
2971 }
2972
2973 if (isr & SDHCI_INT_CARD_INT) {
2974 sdio_run_irqs(host->mmc);
2975
2976 spin_lock_irqsave(&host->lock, flags);
2977 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2978 sdhci_enable_sdio_irq_nolock(host, true);
2979 spin_unlock_irqrestore(&host->lock, flags);
2980 }
2981
2982 return isr ? IRQ_HANDLED : IRQ_NONE;
2983}
2984
2985/*****************************************************************************\
2986 * *
2987 * Suspend/resume *
2988 * *
2989\*****************************************************************************/
2990
2991#ifdef CONFIG_PM
2992
2993static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
2994{
2995 return mmc_card_is_removable(host->mmc) &&
2996 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
2997 !mmc_can_gpio_cd(host->mmc);
2998}
2999
3000/*
3001 * To enable wakeup events, the corresponding events have to be enabled in
3002 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3003 * Table' in the SD Host Controller Standard Specification.
3004 * It is useless to restore SDHCI_INT_ENABLE state in
3005 * sdhci_disable_irq_wakeups() since it will be set by
3006 * sdhci_enable_card_detection() or sdhci_init().
3007 */
3008static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3009{
3010 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3011 SDHCI_WAKE_ON_INT;
3012 u32 irq_val = 0;
3013 u8 wake_val = 0;
3014 u8 val;
3015
3016 if (sdhci_cd_irq_can_wakeup(host)) {
3017 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3018 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3019 }
3020
3021 if (mmc_card_wake_sdio_irq(host->mmc)) {
3022 wake_val |= SDHCI_WAKE_ON_INT;
3023 irq_val |= SDHCI_INT_CARD_INT;
3024 }
3025
3026 if (!irq_val)
3027 return false;
3028
3029 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3030 val &= ~mask;
3031 val |= wake_val;
3032 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3033
3034 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3035
3036 host->irq_wake_enabled = !enable_irq_wake(host->irq);
3037
3038 return host->irq_wake_enabled;
3039}
3040
3041static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3042{
3043 u8 val;
3044 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3045 | SDHCI_WAKE_ON_INT;
3046
3047 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3048 val &= ~mask;
3049 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3050
3051 disable_irq_wake(host->irq);
3052
3053 host->irq_wake_enabled = false;
3054}
3055
3056int sdhci_suspend_host(struct sdhci_host *host)
3057{
3058 sdhci_disable_card_detection(host);
3059
3060 mmc_retune_timer_stop(host->mmc);
3061
3062 if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3063 !sdhci_enable_irq_wakeups(host)) {
3064 host->ier = 0;
3065 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3066 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3067 free_irq(host->irq, host);
3068 }
3069
3070 return 0;
3071}
3072
3073EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3074
3075int sdhci_resume_host(struct sdhci_host *host)
3076{
3077 struct mmc_host *mmc = host->mmc;
3078 int ret = 0;
3079
3080 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3081 if (host->ops->enable_dma)
3082 host->ops->enable_dma(host);
3083 }
3084
3085 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3086 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3087 /* Card keeps power but host controller does not */
3088 sdhci_init(host, 0);
3089 host->pwr = 0;
3090 host->clock = 0;
3091 mmc->ops->set_ios(mmc, &mmc->ios);
3092 } else {
3093 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3094 mmiowb();
3095 }
3096
3097 if (host->irq_wake_enabled) {
3098 sdhci_disable_irq_wakeups(host);
3099 } else {
3100 ret = request_threaded_irq(host->irq, sdhci_irq,
3101 sdhci_thread_irq, IRQF_SHARED,
3102 mmc_hostname(host->mmc), host);
3103 if (ret)
3104 return ret;
3105 }
3106
3107 sdhci_enable_card_detection(host);
3108
3109 return ret;
3110}
3111
3112EXPORT_SYMBOL_GPL(sdhci_resume_host);
3113
3114int sdhci_runtime_suspend_host(struct sdhci_host *host)
3115{
3116 unsigned long flags;
3117
3118 mmc_retune_timer_stop(host->mmc);
3119
3120 spin_lock_irqsave(&host->lock, flags);
3121 host->ier &= SDHCI_INT_CARD_INT;
3122 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3123 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3124 spin_unlock_irqrestore(&host->lock, flags);
3125
3126 synchronize_hardirq(host->irq);
3127
3128 spin_lock_irqsave(&host->lock, flags);
3129 host->runtime_suspended = true;
3130 spin_unlock_irqrestore(&host->lock, flags);
3131
3132 return 0;
3133}
3134EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3135
3136int sdhci_runtime_resume_host(struct sdhci_host *host)
3137{
3138 struct mmc_host *mmc = host->mmc;
3139 unsigned long flags;
3140 int host_flags = host->flags;
3141
3142 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3143 if (host->ops->enable_dma)
3144 host->ops->enable_dma(host);
3145 }
3146
3147 sdhci_init(host, 0);
3148
3149 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3150 mmc->ios.power_mode != MMC_POWER_OFF) {
3151 /* Force clock and power re-program */
3152 host->pwr = 0;
3153 host->clock = 0;
3154 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3155 mmc->ops->set_ios(mmc, &mmc->ios);
3156
3157 if ((host_flags & SDHCI_PV_ENABLED) &&
3158 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3159 spin_lock_irqsave(&host->lock, flags);
3160 sdhci_enable_preset_value(host, true);
3161 spin_unlock_irqrestore(&host->lock, flags);
3162 }
3163
3164 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3165 mmc->ops->hs400_enhanced_strobe)
3166 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3167 }
3168
3169 spin_lock_irqsave(&host->lock, flags);
3170
3171 host->runtime_suspended = false;
3172
3173 /* Enable SDIO IRQ */
3174 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3175 sdhci_enable_sdio_irq_nolock(host, true);
3176
3177 /* Enable Card Detection */
3178 sdhci_enable_card_detection(host);
3179
3180 spin_unlock_irqrestore(&host->lock, flags);
3181
3182 return 0;
3183}
3184EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3185
3186#endif /* CONFIG_PM */
3187
3188/*****************************************************************************\
3189 * *
3190 * Command Queue Engine (CQE) helpers *
3191 * *
3192\*****************************************************************************/
3193
3194void sdhci_cqe_enable(struct mmc_host *mmc)
3195{
3196 struct sdhci_host *host = mmc_priv(mmc);
3197 unsigned long flags;
3198 u8 ctrl;
3199
3200 spin_lock_irqsave(&host->lock, flags);
3201
3202 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3203 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3204 if (host->flags & SDHCI_USE_64_BIT_DMA)
3205 ctrl |= SDHCI_CTRL_ADMA64;
3206 else
3207 ctrl |= SDHCI_CTRL_ADMA32;
3208 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3209
3210 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3211 SDHCI_BLOCK_SIZE);
3212
3213 /* Set maximum timeout */
3214 sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
3215
3216 host->ier = host->cqe_ier;
3217
3218 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3219 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3220
3221 host->cqe_on = true;
3222
3223 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3224 mmc_hostname(mmc), host->ier,
3225 sdhci_readl(host, SDHCI_INT_STATUS));
3226
3227 mmiowb();
3228 spin_unlock_irqrestore(&host->lock, flags);
3229}
3230EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3231
3232void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3233{
3234 struct sdhci_host *host = mmc_priv(mmc);
3235 unsigned long flags;
3236
3237 spin_lock_irqsave(&host->lock, flags);
3238
3239 sdhci_set_default_irqs(host);
3240
3241 host->cqe_on = false;
3242
3243 if (recovery) {
3244 sdhci_do_reset(host, SDHCI_RESET_CMD);
3245 sdhci_do_reset(host, SDHCI_RESET_DATA);
3246 }
3247
3248 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3249 mmc_hostname(mmc), host->ier,
3250 sdhci_readl(host, SDHCI_INT_STATUS));
3251
3252 mmiowb();
3253 spin_unlock_irqrestore(&host->lock, flags);
3254}
3255EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3256
3257bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3258 int *data_error)
3259{
3260 u32 mask;
3261
3262 if (!host->cqe_on)
3263 return false;
3264
3265 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3266 *cmd_error = -EILSEQ;
3267 else if (intmask & SDHCI_INT_TIMEOUT)
3268 *cmd_error = -ETIMEDOUT;
3269 else
3270 *cmd_error = 0;
3271
3272 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3273 *data_error = -EILSEQ;
3274 else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3275 *data_error = -ETIMEDOUT;
3276 else if (intmask & SDHCI_INT_ADMA_ERROR)
3277 *data_error = -EIO;
3278 else
3279 *data_error = 0;
3280
3281 /* Clear selected interrupts. */
3282 mask = intmask & host->cqe_ier;
3283 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3284
3285 if (intmask & SDHCI_INT_BUS_POWER)
3286 pr_err("%s: Card is consuming too much power!\n",
3287 mmc_hostname(host->mmc));
3288
3289 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3290 if (intmask) {
3291 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3292 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3293 mmc_hostname(host->mmc), intmask);
3294 sdhci_dumpregs(host);
3295 }
3296
3297 return true;
3298}
3299EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3300
3301/*****************************************************************************\
3302 * *
3303 * Device allocation/registration *
3304 * *
3305\*****************************************************************************/
3306
3307struct sdhci_host *sdhci_alloc_host(struct device *dev,
3308 size_t priv_size)
3309{
3310 struct mmc_host *mmc;
3311 struct sdhci_host *host;
3312
3313 WARN_ON(dev == NULL);
3314
3315 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3316 if (!mmc)
3317 return ERR_PTR(-ENOMEM);
3318
3319 host = mmc_priv(mmc);
3320 host->mmc = mmc;
3321 host->mmc_host_ops = sdhci_ops;
3322 mmc->ops = &host->mmc_host_ops;
3323
3324 host->flags = SDHCI_SIGNALING_330;
3325
3326 host->cqe_ier = SDHCI_CQE_INT_MASK;
3327 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3328
3329 host->tuning_delay = -1;
3330
3331 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3332
3333 return host;
3334}
3335
3336EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3337
3338static int sdhci_set_dma_mask(struct sdhci_host *host)
3339{
3340 struct mmc_host *mmc = host->mmc;
3341 struct device *dev = mmc_dev(mmc);
3342 int ret = -EINVAL;
3343
3344 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3345 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3346
3347 /* Try 64-bit mask if hardware is capable of it */
3348 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3349 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3350 if (ret) {
3351 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3352 mmc_hostname(mmc));
3353 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3354 }
3355 }
3356
3357 /* 32-bit mask as default & fallback */
3358 if (ret) {
3359 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3360 if (ret)
3361 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3362 mmc_hostname(mmc));
3363 }
3364
3365 return ret;
3366}
3367
3368void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3369{
3370 u16 v;
3371 u64 dt_caps_mask = 0;
3372 u64 dt_caps = 0;
3373
3374 if (host->read_caps)
3375 return;
3376
3377 host->read_caps = true;
3378
3379 if (debug_quirks)
3380 host->quirks = debug_quirks;
3381
3382 if (debug_quirks2)
3383 host->quirks2 = debug_quirks2;
3384
3385 sdhci_do_reset(host, SDHCI_RESET_ALL);
3386
3387 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3388 "sdhci-caps-mask", &dt_caps_mask);
3389 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3390 "sdhci-caps", &dt_caps);
3391
3392 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3393 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3394
3395 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3396 return;
3397
3398 if (caps) {
3399 host->caps = *caps;
3400 } else {
3401 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3402 host->caps &= ~lower_32_bits(dt_caps_mask);
3403 host->caps |= lower_32_bits(dt_caps);
3404 }
3405
3406 if (host->version < SDHCI_SPEC_300)
3407 return;
3408
3409 if (caps1) {
3410 host->caps1 = *caps1;
3411 } else {
3412 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3413 host->caps1 &= ~upper_32_bits(dt_caps_mask);
3414 host->caps1 |= upper_32_bits(dt_caps);
3415 }
3416}
3417EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3418
3419static int sdhci_allocate_bounce_buffer(struct sdhci_host *host)
3420{
3421 struct mmc_host *mmc = host->mmc;
3422 unsigned int max_blocks;
3423 unsigned int bounce_size;
3424 int ret;
3425
3426 /*
3427 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
3428 * has diminishing returns, this is probably because SD/MMC
3429 * cards are usually optimized to handle this size of requests.
3430 */
3431 bounce_size = SZ_64K;
3432 /*
3433 * Adjust downwards to maximum request size if this is less
3434 * than our segment size, else hammer down the maximum
3435 * request size to the maximum buffer size.
3436 */
3437 if (mmc->max_req_size < bounce_size)
3438 bounce_size = mmc->max_req_size;
3439 max_blocks = bounce_size / 512;
3440
3441 /*
3442 * When we just support one segment, we can get significant
3443 * speedups by the help of a bounce buffer to group scattered
3444 * reads/writes together.
3445 */
3446 host->bounce_buffer = devm_kmalloc(mmc->parent,
3447 bounce_size,
3448 GFP_KERNEL);
3449 if (!host->bounce_buffer) {
3450 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
3451 mmc_hostname(mmc),
3452 bounce_size);
3453 /*
3454 * Exiting with zero here makes sure we proceed with
3455 * mmc->max_segs == 1.
3456 */
3457 return 0;
3458 }
3459
3460 host->bounce_addr = dma_map_single(mmc->parent,
3461 host->bounce_buffer,
3462 bounce_size,
3463 DMA_BIDIRECTIONAL);
3464 ret = dma_mapping_error(mmc->parent, host->bounce_addr);
3465 if (ret)
3466 /* Again fall back to max_segs == 1 */
3467 return 0;
3468 host->bounce_buffer_size = bounce_size;
3469
3470 /* Lie about this since we're bouncing */
3471 mmc->max_segs = max_blocks;
3472 mmc->max_seg_size = bounce_size;
3473 mmc->max_req_size = bounce_size;
3474
3475 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
3476 mmc_hostname(mmc), max_blocks, bounce_size);
3477
3478 return 0;
3479}
3480
3481int sdhci_setup_host(struct sdhci_host *host)
3482{
3483 struct mmc_host *mmc;
3484 u32 max_current_caps;
3485 unsigned int ocr_avail;
3486 unsigned int override_timeout_clk;
3487 u32 max_clk;
3488 int ret;
3489
3490 WARN_ON(host == NULL);
3491 if (host == NULL)
3492 return -EINVAL;
3493
3494 mmc = host->mmc;
3495
3496 /*
3497 * If there are external regulators, get them. Note this must be done
3498 * early before resetting the host and reading the capabilities so that
3499 * the host can take the appropriate action if regulators are not
3500 * available.
3501 */
3502 ret = mmc_regulator_get_supply(mmc);
3503 if (ret)
3504 return ret;
3505
3506 DBG("Version: 0x%08x | Present: 0x%08x\n",
3507 sdhci_readw(host, SDHCI_HOST_VERSION),
3508 sdhci_readl(host, SDHCI_PRESENT_STATE));
3509 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
3510 sdhci_readl(host, SDHCI_CAPABILITIES),
3511 sdhci_readl(host, SDHCI_CAPABILITIES_1));
3512
3513 sdhci_read_caps(host);
3514
3515 override_timeout_clk = host->timeout_clk;
3516
3517 if (host->version > SDHCI_SPEC_300) {
3518 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3519 mmc_hostname(mmc), host->version);
3520 }
3521
3522 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3523 host->flags |= SDHCI_USE_SDMA;
3524 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3525 DBG("Controller doesn't have SDMA capability\n");
3526 else
3527 host->flags |= SDHCI_USE_SDMA;
3528
3529 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3530 (host->flags & SDHCI_USE_SDMA)) {
3531 DBG("Disabling DMA as it is marked broken\n");
3532 host->flags &= ~SDHCI_USE_SDMA;
3533 }
3534
3535 if ((host->version >= SDHCI_SPEC_200) &&
3536 (host->caps & SDHCI_CAN_DO_ADMA2))
3537 host->flags |= SDHCI_USE_ADMA;
3538
3539 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3540 (host->flags & SDHCI_USE_ADMA)) {
3541 DBG("Disabling ADMA as it is marked broken\n");
3542 host->flags &= ~SDHCI_USE_ADMA;
3543 }
3544
3545 /*
3546 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3547 * and *must* do 64-bit DMA. A driver has the opportunity to change
3548 * that during the first call to ->enable_dma(). Similarly
3549 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3550 * implement.
3551 */
3552 if (host->caps & SDHCI_CAN_64BIT)
3553 host->flags |= SDHCI_USE_64_BIT_DMA;
3554
3555 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3556 ret = sdhci_set_dma_mask(host);
3557
3558 if (!ret && host->ops->enable_dma)
3559 ret = host->ops->enable_dma(host);
3560
3561 if (ret) {
3562 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3563 mmc_hostname(mmc));
3564 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3565
3566 ret = 0;
3567 }
3568 }
3569
3570 /* SDMA does not support 64-bit DMA */
3571 if (host->flags & SDHCI_USE_64_BIT_DMA)
3572 host->flags &= ~SDHCI_USE_SDMA;
3573
3574 if (host->flags & SDHCI_USE_ADMA) {
3575 dma_addr_t dma;
3576 void *buf;
3577
3578 /*
3579 * The DMA descriptor table size is calculated as the maximum
3580 * number of segments times 2, to allow for an alignment
3581 * descriptor for each segment, plus 1 for a nop end descriptor,
3582 * all multipled by the descriptor size.
3583 */
3584 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3585 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3586 SDHCI_ADMA2_64_DESC_SZ;
3587 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3588 } else {
3589 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3590 SDHCI_ADMA2_32_DESC_SZ;
3591 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3592 }
3593
3594 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3595 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3596 host->adma_table_sz, &dma, GFP_KERNEL);
3597 if (!buf) {
3598 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3599 mmc_hostname(mmc));
3600 host->flags &= ~SDHCI_USE_ADMA;
3601 } else if ((dma + host->align_buffer_sz) &
3602 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3603 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3604 mmc_hostname(mmc));
3605 host->flags &= ~SDHCI_USE_ADMA;
3606 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3607 host->adma_table_sz, buf, dma);
3608 } else {
3609 host->align_buffer = buf;
3610 host->align_addr = dma;
3611
3612 host->adma_table = buf + host->align_buffer_sz;
3613 host->adma_addr = dma + host->align_buffer_sz;
3614 }
3615 }
3616
3617 /*
3618 * If we use DMA, then it's up to the caller to set the DMA
3619 * mask, but PIO does not need the hw shim so we set a new
3620 * mask here in that case.
3621 */
3622 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3623 host->dma_mask = DMA_BIT_MASK(64);
3624 mmc_dev(mmc)->dma_mask = &host->dma_mask;
3625 }
3626
3627 if (host->version >= SDHCI_SPEC_300)
3628 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3629 >> SDHCI_CLOCK_BASE_SHIFT;
3630 else
3631 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3632 >> SDHCI_CLOCK_BASE_SHIFT;
3633
3634 host->max_clk *= 1000000;
3635 if (host->max_clk == 0 || host->quirks &
3636 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3637 if (!host->ops->get_max_clock) {
3638 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3639 mmc_hostname(mmc));
3640 ret = -ENODEV;
3641 goto undma;
3642 }
3643 host->max_clk = host->ops->get_max_clock(host);
3644 }
3645
3646 /*
3647 * In case of Host Controller v3.00, find out whether clock
3648 * multiplier is supported.
3649 */
3650 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3651 SDHCI_CLOCK_MUL_SHIFT;
3652
3653 /*
3654 * In case the value in Clock Multiplier is 0, then programmable
3655 * clock mode is not supported, otherwise the actual clock
3656 * multiplier is one more than the value of Clock Multiplier
3657 * in the Capabilities Register.
3658 */
3659 if (host->clk_mul)
3660 host->clk_mul += 1;
3661
3662 /*
3663 * Set host parameters.
3664 */
3665 max_clk = host->max_clk;
3666
3667 if (host->ops->get_min_clock)
3668 mmc->f_min = host->ops->get_min_clock(host);
3669 else if (host->version >= SDHCI_SPEC_300) {
3670 if (host->clk_mul) {
3671 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3672 max_clk = host->max_clk * host->clk_mul;
3673 } else
3674 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3675 } else
3676 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3677
3678 if (!mmc->f_max || mmc->f_max > max_clk)
3679 mmc->f_max = max_clk;
3680
3681 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3682 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3683 SDHCI_TIMEOUT_CLK_SHIFT;
3684
3685 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3686 host->timeout_clk *= 1000;
3687
3688 if (host->timeout_clk == 0) {
3689 if (!host->ops->get_timeout_clock) {
3690 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3691 mmc_hostname(mmc));
3692 ret = -ENODEV;
3693 goto undma;
3694 }
3695
3696 host->timeout_clk =
3697 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3698 1000);
3699 }
3700
3701 if (override_timeout_clk)
3702 host->timeout_clk = override_timeout_clk;
3703
3704 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3705 host->ops->get_max_timeout_count(host) : 1 << 27;
3706 mmc->max_busy_timeout /= host->timeout_clk;
3707 }
3708
3709 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
3710 !host->ops->get_max_timeout_count)
3711 mmc->max_busy_timeout = 0;
3712
3713 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3714 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3715
3716 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3717 host->flags |= SDHCI_AUTO_CMD12;
3718
3719 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3720 if ((host->version >= SDHCI_SPEC_300) &&
3721 ((host->flags & SDHCI_USE_ADMA) ||
3722 !(host->flags & SDHCI_USE_SDMA)) &&
3723 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3724 host->flags |= SDHCI_AUTO_CMD23;
3725 DBG("Auto-CMD23 available\n");
3726 } else {
3727 DBG("Auto-CMD23 unavailable\n");
3728 }
3729
3730 /*
3731 * A controller may support 8-bit width, but the board itself
3732 * might not have the pins brought out. Boards that support
3733 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3734 * their platform code before calling sdhci_add_host(), and we
3735 * won't assume 8-bit width for hosts without that CAP.
3736 */
3737 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3738 mmc->caps |= MMC_CAP_4_BIT_DATA;
3739
3740 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3741 mmc->caps &= ~MMC_CAP_CMD23;
3742
3743 if (host->caps & SDHCI_CAN_DO_HISPD)
3744 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3745
3746 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3747 mmc_card_is_removable(mmc) &&
3748 mmc_gpio_get_cd(host->mmc) < 0)
3749 mmc->caps |= MMC_CAP_NEEDS_POLL;
3750
3751 if (!IS_ERR(mmc->supply.vqmmc)) {
3752 ret = regulator_enable(mmc->supply.vqmmc);
3753
3754 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
3755 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3756 1950000))
3757 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3758 SDHCI_SUPPORT_SDR50 |
3759 SDHCI_SUPPORT_DDR50);
3760
3761 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
3762 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
3763 3600000))
3764 host->flags &= ~SDHCI_SIGNALING_330;
3765
3766 if (ret) {
3767 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3768 mmc_hostname(mmc), ret);
3769 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3770 }
3771 }
3772
3773 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3774 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3775 SDHCI_SUPPORT_DDR50);
3776 /*
3777 * The SDHCI controller in a SoC might support HS200/HS400
3778 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
3779 * but if the board is modeled such that the IO lines are not
3780 * connected to 1.8v then HS200/HS400 cannot be supported.
3781 * Disable HS200/HS400 if the board does not have 1.8v connected
3782 * to the IO lines. (Applicable for other modes in 1.8v)
3783 */
3784 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
3785 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
3786 }
3787
3788 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3789 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3790 SDHCI_SUPPORT_DDR50))
3791 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3792
3793 /* SDR104 supports also implies SDR50 support */
3794 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3795 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3796 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3797 * field can be promoted to support HS200.
3798 */
3799 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3800 mmc->caps2 |= MMC_CAP2_HS200;
3801 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3802 mmc->caps |= MMC_CAP_UHS_SDR50;
3803 }
3804
3805 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3806 (host->caps1 & SDHCI_SUPPORT_HS400))
3807 mmc->caps2 |= MMC_CAP2_HS400;
3808
3809 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3810 (IS_ERR(mmc->supply.vqmmc) ||
3811 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3812 1300000)))
3813 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3814
3815 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3816 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3817 mmc->caps |= MMC_CAP_UHS_DDR50;
3818
3819 /* Does the host need tuning for SDR50? */
3820 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3821 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3822
3823 /* Driver Type(s) (A, C, D) supported by the host */
3824 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3825 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3826 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3827 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3828 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3829 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3830
3831 /* Initial value for re-tuning timer count */
3832 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3833 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3834
3835 /*
3836 * In case Re-tuning Timer is not disabled, the actual value of
3837 * re-tuning timer will be 2 ^ (n - 1).
3838 */
3839 if (host->tuning_count)
3840 host->tuning_count = 1 << (host->tuning_count - 1);
3841
3842 /* Re-tuning mode supported by the Host Controller */
3843 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3844 SDHCI_RETUNING_MODE_SHIFT;
3845
3846 ocr_avail = 0;
3847
3848 /*
3849 * According to SD Host Controller spec v3.00, if the Host System
3850 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3851 * the value is meaningful only if Voltage Support in the Capabilities
3852 * register is set. The actual current value is 4 times the register
3853 * value.
3854 */
3855 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3856 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3857 int curr = regulator_get_current_limit(mmc->supply.vmmc);
3858 if (curr > 0) {
3859
3860 /* convert to SDHCI_MAX_CURRENT format */
3861 curr = curr/1000; /* convert to mA */
3862 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3863
3864 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3865 max_current_caps =
3866 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3867 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3868 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3869 }
3870 }
3871
3872 if (host->caps & SDHCI_CAN_VDD_330) {
3873 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3874
3875 mmc->max_current_330 = ((max_current_caps &
3876 SDHCI_MAX_CURRENT_330_MASK) >>
3877 SDHCI_MAX_CURRENT_330_SHIFT) *
3878 SDHCI_MAX_CURRENT_MULTIPLIER;
3879 }
3880 if (host->caps & SDHCI_CAN_VDD_300) {
3881 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3882
3883 mmc->max_current_300 = ((max_current_caps &
3884 SDHCI_MAX_CURRENT_300_MASK) >>
3885 SDHCI_MAX_CURRENT_300_SHIFT) *
3886 SDHCI_MAX_CURRENT_MULTIPLIER;
3887 }
3888 if (host->caps & SDHCI_CAN_VDD_180) {
3889 ocr_avail |= MMC_VDD_165_195;
3890
3891 mmc->max_current_180 = ((max_current_caps &
3892 SDHCI_MAX_CURRENT_180_MASK) >>
3893 SDHCI_MAX_CURRENT_180_SHIFT) *
3894 SDHCI_MAX_CURRENT_MULTIPLIER;
3895 }
3896
3897 /* If OCR set by host, use it instead. */
3898 if (host->ocr_mask)
3899 ocr_avail = host->ocr_mask;
3900
3901 /* If OCR set by external regulators, give it highest prio. */
3902 if (mmc->ocr_avail)
3903 ocr_avail = mmc->ocr_avail;
3904
3905 mmc->ocr_avail = ocr_avail;
3906 mmc->ocr_avail_sdio = ocr_avail;
3907 if (host->ocr_avail_sdio)
3908 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3909 mmc->ocr_avail_sd = ocr_avail;
3910 if (host->ocr_avail_sd)
3911 mmc->ocr_avail_sd &= host->ocr_avail_sd;
3912 else /* normal SD controllers don't support 1.8V */
3913 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3914 mmc->ocr_avail_mmc = ocr_avail;
3915 if (host->ocr_avail_mmc)
3916 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3917
3918 if (mmc->ocr_avail == 0) {
3919 pr_err("%s: Hardware doesn't report any support voltages.\n",
3920 mmc_hostname(mmc));
3921 ret = -ENODEV;
3922 goto unreg;
3923 }
3924
3925 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
3926 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
3927 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
3928 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
3929 host->flags |= SDHCI_SIGNALING_180;
3930
3931 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
3932 host->flags |= SDHCI_SIGNALING_120;
3933
3934 spin_lock_init(&host->lock);
3935
3936 /*
3937 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3938 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3939 * is less anyway.
3940 */
3941 mmc->max_req_size = 524288;
3942
3943 /*
3944 * Maximum number of segments. Depends on if the hardware
3945 * can do scatter/gather or not.
3946 */
3947 if (host->flags & SDHCI_USE_ADMA) {
3948 mmc->max_segs = SDHCI_MAX_SEGS;
3949 } else if (host->flags & SDHCI_USE_SDMA) {
3950 mmc->max_segs = 1;
3951 if (swiotlb_max_segment()) {
3952 unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
3953 IO_TLB_SEGSIZE;
3954 mmc->max_req_size = min(mmc->max_req_size,
3955 max_req_size);
3956 }
3957 } else { /* PIO */
3958 mmc->max_segs = SDHCI_MAX_SEGS;
3959 }
3960
3961 /*
3962 * Maximum segment size. Could be one segment with the maximum number
3963 * of bytes. When doing hardware scatter/gather, each entry cannot
3964 * be larger than 64 KiB though.
3965 */
3966 if (host->flags & SDHCI_USE_ADMA) {
3967 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3968 mmc->max_seg_size = 65535;
3969 else
3970 mmc->max_seg_size = 65536;
3971 } else {
3972 mmc->max_seg_size = mmc->max_req_size;
3973 }
3974
3975 /*
3976 * Maximum block size. This varies from controller to controller and
3977 * is specified in the capabilities register.
3978 */
3979 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3980 mmc->max_blk_size = 2;
3981 } else {
3982 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
3983 SDHCI_MAX_BLOCK_SHIFT;
3984 if (mmc->max_blk_size >= 3) {
3985 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3986 mmc_hostname(mmc));
3987 mmc->max_blk_size = 0;
3988 }
3989 }
3990
3991 mmc->max_blk_size = 512 << mmc->max_blk_size;
3992
3993 /*
3994 * Maximum block count.
3995 */
3996 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3997
3998 if (mmc->max_segs == 1) {
3999 /* This may alter mmc->*_blk_* parameters */
4000 ret = sdhci_allocate_bounce_buffer(host);
4001 if (ret)
4002 return ret;
4003 }
4004
4005 return 0;
4006
4007unreg:
4008 if (!IS_ERR(mmc->supply.vqmmc))
4009 regulator_disable(mmc->supply.vqmmc);
4010undma:
4011 if (host->align_buffer)
4012 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4013 host->adma_table_sz, host->align_buffer,
4014 host->align_addr);
4015 host->adma_table = NULL;
4016 host->align_buffer = NULL;
4017
4018 return ret;
4019}
4020EXPORT_SYMBOL_GPL(sdhci_setup_host);
4021
4022void sdhci_cleanup_host(struct sdhci_host *host)
4023{
4024 struct mmc_host *mmc = host->mmc;
4025
4026 if (!IS_ERR(mmc->supply.vqmmc))
4027 regulator_disable(mmc->supply.vqmmc);
4028
4029 if (host->align_buffer)
4030 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4031 host->adma_table_sz, host->align_buffer,
4032 host->align_addr);
4033 host->adma_table = NULL;
4034 host->align_buffer = NULL;
4035}
4036EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4037
4038int __sdhci_add_host(struct sdhci_host *host)
4039{
4040 struct mmc_host *mmc = host->mmc;
4041 int ret;
4042
4043 /*
4044 * Init tasklets.
4045 */
4046 tasklet_init(&host->finish_tasklet,
4047 sdhci_tasklet_finish, (unsigned long)host);
4048
4049 timer_setup(&host->timer, sdhci_timeout_timer, 0);
4050 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4051
4052 init_waitqueue_head(&host->buf_ready_int);
4053
4054 sdhci_init(host, 0);
4055
4056 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4057 IRQF_SHARED, mmc_hostname(mmc), host);
4058 if (ret) {
4059 pr_err("%s: Failed to request IRQ %d: %d\n",
4060 mmc_hostname(mmc), host->irq, ret);
4061 goto untasklet;
4062 }
4063
4064 ret = sdhci_led_register(host);
4065 if (ret) {
4066 pr_err("%s: Failed to register LED device: %d\n",
4067 mmc_hostname(mmc), ret);
4068 goto unirq;
4069 }
4070
4071 mmiowb();
4072
4073 ret = mmc_add_host(mmc);
4074 if (ret)
4075 goto unled;
4076
4077 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4078 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4079 (host->flags & SDHCI_USE_ADMA) ?
4080 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4081 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4082
4083 sdhci_enable_card_detection(host);
4084
4085 return 0;
4086
4087unled:
4088 sdhci_led_unregister(host);
4089unirq:
4090 sdhci_do_reset(host, SDHCI_RESET_ALL);
4091 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4092 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4093 free_irq(host->irq, host);
4094untasklet:
4095 tasklet_kill(&host->finish_tasklet);
4096
4097 return ret;
4098}
4099EXPORT_SYMBOL_GPL(__sdhci_add_host);
4100
4101int sdhci_add_host(struct sdhci_host *host)
4102{
4103 int ret;
4104
4105 ret = sdhci_setup_host(host);
4106 if (ret)
4107 return ret;
4108
4109 ret = __sdhci_add_host(host);
4110 if (ret)
4111 goto cleanup;
4112
4113 return 0;
4114
4115cleanup:
4116 sdhci_cleanup_host(host);
4117
4118 return ret;
4119}
4120EXPORT_SYMBOL_GPL(sdhci_add_host);
4121
4122void sdhci_remove_host(struct sdhci_host *host, int dead)
4123{
4124 struct mmc_host *mmc = host->mmc;
4125 unsigned long flags;
4126
4127 if (dead) {
4128 spin_lock_irqsave(&host->lock, flags);
4129
4130 host->flags |= SDHCI_DEVICE_DEAD;
4131
4132 if (sdhci_has_requests(host)) {
4133 pr_err("%s: Controller removed during "
4134 " transfer!\n", mmc_hostname(mmc));
4135 sdhci_error_out_mrqs(host, -ENOMEDIUM);
4136 }
4137
4138 spin_unlock_irqrestore(&host->lock, flags);
4139 }
4140
4141 sdhci_disable_card_detection(host);
4142
4143 mmc_remove_host(mmc);
4144
4145 sdhci_led_unregister(host);
4146
4147 if (!dead)
4148 sdhci_do_reset(host, SDHCI_RESET_ALL);
4149
4150 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4151 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4152 free_irq(host->irq, host);
4153
4154 del_timer_sync(&host->timer);
4155 del_timer_sync(&host->data_timer);
4156
4157 tasklet_kill(&host->finish_tasklet);
4158
4159 if (!IS_ERR(mmc->supply.vqmmc))
4160 regulator_disable(mmc->supply.vqmmc);
4161
4162 if (host->align_buffer)
4163 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4164 host->adma_table_sz, host->align_buffer,
4165 host->align_addr);
4166
4167 host->adma_table = NULL;
4168 host->align_buffer = NULL;
4169}
4170
4171EXPORT_SYMBOL_GPL(sdhci_remove_host);
4172
4173void sdhci_free_host(struct sdhci_host *host)
4174{
4175 mmc_free_host(host->mmc);
4176}
4177
4178EXPORT_SYMBOL_GPL(sdhci_free_host);
4179
4180/*****************************************************************************\
4181 * *
4182 * Driver init/exit *
4183 * *
4184\*****************************************************************************/
4185
4186static int __init sdhci_drv_init(void)
4187{
4188 pr_info(DRIVER_NAME
4189 ": Secure Digital Host Controller Interface driver\n");
4190 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4191
4192 return 0;
4193}
4194
4195static void __exit sdhci_drv_exit(void)
4196{
4197}
4198
4199module_init(sdhci_drv_init);
4200module_exit(sdhci_drv_exit);
4201
4202module_param(debug_quirks, uint, 0444);
4203module_param(debug_quirks2, uint, 0444);
4204
4205MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4206MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4207MODULE_LICENSE("GPL");
4208
4209MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4210MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");