blob: 4c707d8dc3eb3925fc3a6eee06ff82b1650da742 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/* Driver for Realtek PCI-Express card reader
3 *
4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
5 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 * Author:
7 * Wei WANG <wei_wang@realsil.com.cn>
8 */
9
10#include <linux/pci.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/dma-mapping.h>
14#include <linux/highmem.h>
15#include <linux/interrupt.h>
16#include <linux/delay.h>
17#include <linux/idr.h>
18#include <linux/platform_device.h>
19#include <linux/mfd/core.h>
20#include <linux/rtsx_pci.h>
21#include <linux/mmc/card.h>
22#include <asm/unaligned.h>
23
24#include "rtsx_pcr.h"
25
26static bool msi_en = true;
27module_param(msi_en, bool, S_IRUGO | S_IWUSR);
28MODULE_PARM_DESC(msi_en, "Enable MSI");
29
30static DEFINE_IDR(rtsx_pci_idr);
31static DEFINE_SPINLOCK(rtsx_pci_lock);
32
33static struct mfd_cell rtsx_pcr_cells[] = {
34 [RTSX_SD_CARD] = {
35 .name = DRV_NAME_RTSX_PCI_SDMMC,
36 },
37 [RTSX_MS_CARD] = {
38 .name = DRV_NAME_RTSX_PCI_MS,
39 },
40};
41
42static const struct pci_device_id rtsx_pci_ids[] = {
43 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
44 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
45 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
46 { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
47 { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
48 { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
49 { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
50 { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
51 { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
52 { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
53 { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
54 { 0, }
55};
56
57MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
58
59static inline void rtsx_pci_enable_aspm(struct rtsx_pcr *pcr)
60{
61 rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
62 0xFC, pcr->aspm_en);
63}
64
65static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr)
66{
67 rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL,
68 0xFC, 0);
69}
70
71static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
72{
73 rtsx_pci_write_register(pcr, MSGTXDATA0,
74 MASK_8_BIT_DEF, (u8) (latency & 0xFF));
75 rtsx_pci_write_register(pcr, MSGTXDATA1,
76 MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
77 rtsx_pci_write_register(pcr, MSGTXDATA2,
78 MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
79 rtsx_pci_write_register(pcr, MSGTXDATA3,
80 MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
81 rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
82 LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
83
84 return 0;
85}
86
87int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
88{
89 if (pcr->ops->set_ltr_latency)
90 return pcr->ops->set_ltr_latency(pcr, latency);
91 else
92 return rtsx_comm_set_ltr_latency(pcr, latency);
93}
94
95static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
96{
97 struct rtsx_cr_option *option = &pcr->option;
98
99 if (pcr->aspm_enabled == enable)
100 return;
101
102 if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) {
103 if (enable)
104 rtsx_pci_enable_aspm(pcr);
105 else
106 rtsx_pci_disable_aspm(pcr);
107 } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) {
108 u8 mask = FORCE_ASPM_VAL_MASK;
109 u8 val = 0;
110
111 if (enable)
112 val = pcr->aspm_en;
113 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
114 }
115
116 pcr->aspm_enabled = enable;
117}
118
119static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
120{
121 if (pcr->ops->set_aspm)
122 pcr->ops->set_aspm(pcr, false);
123 else
124 rtsx_comm_set_aspm(pcr, false);
125}
126
127int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
128{
129 rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
130
131 return 0;
132}
133
134static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
135{
136 if (pcr->ops->set_l1off_cfg_sub_d0)
137 pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
138}
139
140static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
141{
142 struct rtsx_cr_option *option = &pcr->option;
143
144 rtsx_disable_aspm(pcr);
145
Olivier Deprez0e641232021-09-23 10:07:05 +0200146 /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */
147 msleep(1);
148
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000149 if (option->ltr_enabled)
150 rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
151
152 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
153 rtsx_set_l1off_sub_cfg_d0(pcr, 1);
154}
155
156static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
157{
158 if (pcr->ops->full_on)
159 pcr->ops->full_on(pcr);
160 else
161 rtsx_comm_pm_full_on(pcr);
162}
163
164void rtsx_pci_start_run(struct rtsx_pcr *pcr)
165{
166 /* If pci device removed, don't queue idle work any more */
167 if (pcr->remove_pci)
168 return;
169
170 if (pcr->state != PDEV_STAT_RUN) {
171 pcr->state = PDEV_STAT_RUN;
172 if (pcr->ops->enable_auto_blink)
173 pcr->ops->enable_auto_blink(pcr);
174 rtsx_pm_full_on(pcr);
175 }
176
177 mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200));
178}
179EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
180
181int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
182{
183 int i;
184 u32 val = HAIMR_WRITE_START;
185
186 val |= (u32)(addr & 0x3FFF) << 16;
187 val |= (u32)mask << 8;
188 val |= (u32)data;
189
190 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
191
192 for (i = 0; i < MAX_RW_REG_CNT; i++) {
193 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
194 if ((val & HAIMR_TRANS_END) == 0) {
195 if (data != (u8)val)
196 return -EIO;
197 return 0;
198 }
199 }
200
201 return -ETIMEDOUT;
202}
203EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
204
205int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
206{
207 u32 val = HAIMR_READ_START;
208 int i;
209
210 val |= (u32)(addr & 0x3FFF) << 16;
211 rtsx_pci_writel(pcr, RTSX_HAIMR, val);
212
213 for (i = 0; i < MAX_RW_REG_CNT; i++) {
214 val = rtsx_pci_readl(pcr, RTSX_HAIMR);
215 if ((val & HAIMR_TRANS_END) == 0)
216 break;
217 }
218
219 if (i >= MAX_RW_REG_CNT)
220 return -ETIMEDOUT;
221
222 if (data)
223 *data = (u8)(val & 0xFF);
224
225 return 0;
226}
227EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
228
229int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
230{
231 int err, i, finished = 0;
232 u8 tmp;
233
234 rtsx_pci_init_cmd(pcr);
235
236 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val);
237 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8));
238 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
239 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81);
240
241 err = rtsx_pci_send_cmd(pcr, 100);
242 if (err < 0)
243 return err;
244
245 for (i = 0; i < 100000; i++) {
246 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
247 if (err < 0)
248 return err;
249
250 if (!(tmp & 0x80)) {
251 finished = 1;
252 break;
253 }
254 }
255
256 if (!finished)
257 return -ETIMEDOUT;
258
259 return 0;
260}
261
262int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
263{
264 if (pcr->ops->write_phy)
265 return pcr->ops->write_phy(pcr, addr, val);
266
267 return __rtsx_pci_write_phy_register(pcr, addr, val);
268}
269EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
270
271int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
272{
273 int err, i, finished = 0;
274 u16 data;
275 u8 *ptr, tmp;
276
277 rtsx_pci_init_cmd(pcr);
278
279 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr);
280 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80);
281
282 err = rtsx_pci_send_cmd(pcr, 100);
283 if (err < 0)
284 return err;
285
286 for (i = 0; i < 100000; i++) {
287 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
288 if (err < 0)
289 return err;
290
291 if (!(tmp & 0x80)) {
292 finished = 1;
293 break;
294 }
295 }
296
297 if (!finished)
298 return -ETIMEDOUT;
299
300 rtsx_pci_init_cmd(pcr);
301
302 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0);
303 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0);
304
305 err = rtsx_pci_send_cmd(pcr, 100);
306 if (err < 0)
307 return err;
308
309 ptr = rtsx_pci_get_cmd_data(pcr);
310 data = ((u16)ptr[1] << 8) | ptr[0];
311
312 if (val)
313 *val = data;
314
315 return 0;
316}
317
318int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
319{
320 if (pcr->ops->read_phy)
321 return pcr->ops->read_phy(pcr, addr, val);
322
323 return __rtsx_pci_read_phy_register(pcr, addr, val);
324}
325EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
326
327void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
328{
329 if (pcr->ops->stop_cmd)
330 return pcr->ops->stop_cmd(pcr);
331
332 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
333 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
334
335 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
336 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
337}
338EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
339
340void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
341 u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
342{
343 unsigned long flags;
344 u32 val = 0;
345 u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
346
347 val |= (u32)(cmd_type & 0x03) << 30;
348 val |= (u32)(reg_addr & 0x3FFF) << 16;
349 val |= (u32)mask << 8;
350 val |= (u32)data;
351
352 spin_lock_irqsave(&pcr->lock, flags);
353 ptr += pcr->ci;
354 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
355 put_unaligned_le32(val, ptr);
356 ptr++;
357 pcr->ci++;
358 }
359 spin_unlock_irqrestore(&pcr->lock, flags);
360}
361EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
362
363void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
364{
365 u32 val = 1 << 31;
366
367 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
368
369 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
370 /* Hardware Auto Response */
371 val |= 0x40000000;
372 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
373}
374EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
375
376int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
377{
378 struct completion trans_done;
379 u32 val = 1 << 31;
380 long timeleft;
381 unsigned long flags;
382 int err = 0;
383
384 spin_lock_irqsave(&pcr->lock, flags);
385
386 /* set up data structures for the wakeup system */
387 pcr->done = &trans_done;
388 pcr->trans_result = TRANS_NOT_READY;
389 init_completion(&trans_done);
390
391 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
392
393 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
394 /* Hardware Auto Response */
395 val |= 0x40000000;
396 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
397
398 spin_unlock_irqrestore(&pcr->lock, flags);
399
400 /* Wait for TRANS_OK_INT */
401 timeleft = wait_for_completion_interruptible_timeout(
402 &trans_done, msecs_to_jiffies(timeout));
403 if (timeleft <= 0) {
404 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
405 err = -ETIMEDOUT;
406 goto finish_send_cmd;
407 }
408
409 spin_lock_irqsave(&pcr->lock, flags);
410 if (pcr->trans_result == TRANS_RESULT_FAIL)
411 err = -EINVAL;
412 else if (pcr->trans_result == TRANS_RESULT_OK)
413 err = 0;
414 else if (pcr->trans_result == TRANS_NO_DEVICE)
415 err = -ENODEV;
416 spin_unlock_irqrestore(&pcr->lock, flags);
417
418finish_send_cmd:
419 spin_lock_irqsave(&pcr->lock, flags);
420 pcr->done = NULL;
421 spin_unlock_irqrestore(&pcr->lock, flags);
422
423 if ((err < 0) && (err != -ENODEV))
424 rtsx_pci_stop_cmd(pcr);
425
426 if (pcr->finish_me)
427 complete(pcr->finish_me);
428
429 return err;
430}
431EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
432
433static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
434 dma_addr_t addr, unsigned int len, int end)
435{
436 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
437 u64 val;
438 u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
439
440 pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
441
442 if (end)
443 option |= RTSX_SG_END;
444 val = ((u64)addr << 32) | ((u64)len << 12) | option;
445
446 put_unaligned_le64(val, ptr);
447 pcr->sgi++;
448}
449
450int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
451 int num_sg, bool read, int timeout)
452{
453 int err = 0, count;
454
455 pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
456 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
457 if (count < 1)
458 return -EINVAL;
459 pcr_dbg(pcr, "DMA mapping count: %d\n", count);
460
461 err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
462
463 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
464
465 return err;
466}
467EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
468
469int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
470 int num_sg, bool read)
471{
472 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
473
474 if (pcr->remove_pci)
475 return -EINVAL;
476
477 if ((sglist == NULL) || (num_sg <= 0))
478 return -EINVAL;
479
480 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
481}
482EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
483
484void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
485 int num_sg, bool read)
486{
487 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
488
489 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
490}
491EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
492
493int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
494 int count, bool read, int timeout)
495{
496 struct completion trans_done;
497 struct scatterlist *sg;
498 dma_addr_t addr;
499 long timeleft;
500 unsigned long flags;
501 unsigned int len;
502 int i, err = 0;
503 u32 val;
504 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
505
506 if (pcr->remove_pci)
507 return -ENODEV;
508
509 if ((sglist == NULL) || (count < 1))
510 return -EINVAL;
511
512 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
513 pcr->sgi = 0;
514 for_each_sg(sglist, sg, count, i) {
515 addr = sg_dma_address(sg);
516 len = sg_dma_len(sg);
517 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
518 }
519
520 spin_lock_irqsave(&pcr->lock, flags);
521
522 pcr->done = &trans_done;
523 pcr->trans_result = TRANS_NOT_READY;
524 init_completion(&trans_done);
525 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
526 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
527
528 spin_unlock_irqrestore(&pcr->lock, flags);
529
530 timeleft = wait_for_completion_interruptible_timeout(
531 &trans_done, msecs_to_jiffies(timeout));
532 if (timeleft <= 0) {
533 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
534 err = -ETIMEDOUT;
535 goto out;
536 }
537
538 spin_lock_irqsave(&pcr->lock, flags);
539 if (pcr->trans_result == TRANS_RESULT_FAIL) {
540 err = -EILSEQ;
541 if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
542 pcr->dma_error_count++;
543 }
544
545 else if (pcr->trans_result == TRANS_NO_DEVICE)
546 err = -ENODEV;
547 spin_unlock_irqrestore(&pcr->lock, flags);
548
549out:
550 spin_lock_irqsave(&pcr->lock, flags);
551 pcr->done = NULL;
552 spin_unlock_irqrestore(&pcr->lock, flags);
553
554 if ((err < 0) && (err != -ENODEV))
555 rtsx_pci_stop_cmd(pcr);
556
557 if (pcr->finish_me)
558 complete(pcr->finish_me);
559
560 return err;
561}
562EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
563
564int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
565{
566 int err;
567 int i, j;
568 u16 reg;
569 u8 *ptr;
570
571 if (buf_len > 512)
572 buf_len = 512;
573
574 ptr = buf;
575 reg = PPBUF_BASE2;
576 for (i = 0; i < buf_len / 256; i++) {
577 rtsx_pci_init_cmd(pcr);
578
579 for (j = 0; j < 256; j++)
580 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
581
582 err = rtsx_pci_send_cmd(pcr, 250);
583 if (err < 0)
584 return err;
585
586 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
587 ptr += 256;
588 }
589
590 if (buf_len % 256) {
591 rtsx_pci_init_cmd(pcr);
592
593 for (j = 0; j < buf_len % 256; j++)
594 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
595
596 err = rtsx_pci_send_cmd(pcr, 250);
597 if (err < 0)
598 return err;
599 }
600
601 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
602
603 return 0;
604}
605EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
606
607int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
608{
609 int err;
610 int i, j;
611 u16 reg;
612 u8 *ptr;
613
614 if (buf_len > 512)
615 buf_len = 512;
616
617 ptr = buf;
618 reg = PPBUF_BASE2;
619 for (i = 0; i < buf_len / 256; i++) {
620 rtsx_pci_init_cmd(pcr);
621
622 for (j = 0; j < 256; j++) {
623 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
624 reg++, 0xFF, *ptr);
625 ptr++;
626 }
627
628 err = rtsx_pci_send_cmd(pcr, 250);
629 if (err < 0)
630 return err;
631 }
632
633 if (buf_len % 256) {
634 rtsx_pci_init_cmd(pcr);
635
636 for (j = 0; j < buf_len % 256; j++) {
637 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
638 reg++, 0xFF, *ptr);
639 ptr++;
640 }
641
642 err = rtsx_pci_send_cmd(pcr, 250);
643 if (err < 0)
644 return err;
645 }
646
647 return 0;
648}
649EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
650
651static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
652{
653 rtsx_pci_init_cmd(pcr);
654
655 while (*tbl & 0xFFFF0000) {
656 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
657 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
658 tbl++;
659 }
660
661 return rtsx_pci_send_cmd(pcr, 100);
662}
663
664int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
665{
666 const u32 *tbl;
667
668 if (card == RTSX_SD_CARD)
669 tbl = pcr->sd_pull_ctl_enable_tbl;
670 else if (card == RTSX_MS_CARD)
671 tbl = pcr->ms_pull_ctl_enable_tbl;
672 else
673 return -EINVAL;
674
675 return rtsx_pci_set_pull_ctl(pcr, tbl);
676}
677EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
678
679int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
680{
681 const u32 *tbl;
682
683 if (card == RTSX_SD_CARD)
684 tbl = pcr->sd_pull_ctl_disable_tbl;
685 else if (card == RTSX_MS_CARD)
686 tbl = pcr->ms_pull_ctl_disable_tbl;
687 else
688 return -EINVAL;
689
690
691 return rtsx_pci_set_pull_ctl(pcr, tbl);
692}
693EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
694
695static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
696{
David Brazdil0f672f62019-12-10 10:32:29 +0000697 struct rtsx_hw_param *hw_param = &pcr->hw_param;
698
699 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
700 | hw_param->interrupt_en;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000701
702 if (pcr->num_slots > 1)
703 pcr->bier |= MS_INT_EN;
704
705 /* Enable Bus Interrupt */
706 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
707
708 pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
709}
710
711static inline u8 double_ssc_depth(u8 depth)
712{
713 return ((depth > 1) ? (depth - 1) : depth);
714}
715
716static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
717{
718 if (div > CLK_DIV_1) {
719 if (ssc_depth > (div - 1))
720 ssc_depth -= (div - 1);
721 else
722 ssc_depth = SSC_DEPTH_4M;
723 }
724
725 return ssc_depth;
726}
727
728int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
729 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
730{
731 int err, clk;
732 u8 n, clk_divider, mcu_cnt, div;
733 static const u8 depth[] = {
734 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
735 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
736 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
737 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
738 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
739 };
740
741 if (initial_mode) {
742 /* We use 250k(around) here, in initial stage */
743 clk_divider = SD_CLK_DIVIDE_128;
744 card_clock = 30000000;
745 } else {
746 clk_divider = SD_CLK_DIVIDE_0;
747 }
748 err = rtsx_pci_write_register(pcr, SD_CFG1,
749 SD_CLK_DIVIDE_MASK, clk_divider);
750 if (err < 0)
751 return err;
752
753 /* Reduce card clock by 20MHz each time a DMA transfer error occurs */
754 if (card_clock == UHS_SDR104_MAX_DTR &&
755 pcr->dma_error_count &&
756 PCI_PID(pcr) == RTS5227_DEVICE_ID)
757 card_clock = UHS_SDR104_MAX_DTR -
758 (pcr->dma_error_count * 20000000);
759
760 card_clock /= 1000000;
761 pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
762
763 clk = card_clock;
764 if (!initial_mode && double_clk)
765 clk = card_clock * 2;
766 pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
767 clk, pcr->cur_clock);
768
769 if (clk == pcr->cur_clock)
770 return 0;
771
772 if (pcr->ops->conv_clk_and_div_n)
773 n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
774 else
775 n = (u8)(clk - 2);
776 if ((clk <= 2) || (n > MAX_DIV_N_PCR))
777 return -EINVAL;
778
779 mcu_cnt = (u8)(125/clk + 3);
780 if (mcu_cnt > 15)
781 mcu_cnt = 15;
782
783 /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
784 div = CLK_DIV_1;
785 while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
786 if (pcr->ops->conv_clk_and_div_n) {
787 int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
788 DIV_N_TO_CLK) * 2;
789 n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
790 CLK_TO_DIV_N);
791 } else {
792 n = (n + 2) * 2 - 2;
793 }
794 div++;
795 }
796 pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
797
798 ssc_depth = depth[ssc_depth];
799 if (double_clk)
800 ssc_depth = double_ssc_depth(ssc_depth);
801
802 ssc_depth = revise_ssc_depth(ssc_depth, div);
803 pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
804
805 rtsx_pci_init_cmd(pcr);
806 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
807 CLK_LOW_FREQ, CLK_LOW_FREQ);
808 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
809 0xFF, (div << 4) | mcu_cnt);
810 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
811 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
812 SSC_DEPTH_MASK, ssc_depth);
813 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
814 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
815 if (vpclk) {
816 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
817 PHASE_NOT_RESET, 0);
818 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
819 PHASE_NOT_RESET, PHASE_NOT_RESET);
820 }
821
822 err = rtsx_pci_send_cmd(pcr, 2000);
823 if (err < 0)
824 return err;
825
826 /* Wait SSC clock stable */
827 udelay(SSC_CLOCK_STABLE_WAIT);
828 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
829 if (err < 0)
830 return err;
831
832 pcr->cur_clock = clk;
833 return 0;
834}
835EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
836
837int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
838{
839 if (pcr->ops->card_power_on)
840 return pcr->ops->card_power_on(pcr, card);
841
842 return 0;
843}
844EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
845
846int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
847{
848 if (pcr->ops->card_power_off)
849 return pcr->ops->card_power_off(pcr, card);
850
851 return 0;
852}
853EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
854
855int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
856{
857 static const unsigned int cd_mask[] = {
858 [RTSX_SD_CARD] = SD_EXIST,
859 [RTSX_MS_CARD] = MS_EXIST
860 };
861
862 if (!(pcr->flags & PCR_MS_PMOS)) {
863 /* When using single PMOS, accessing card is not permitted
864 * if the existing card is not the designated one.
865 */
866 if (pcr->card_exist & (~cd_mask[card]))
867 return -EIO;
868 }
869
870 return 0;
871}
872EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
873
874int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
875{
876 if (pcr->ops->switch_output_voltage)
877 return pcr->ops->switch_output_voltage(pcr, voltage);
878
879 return 0;
880}
881EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
882
883unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
884{
885 unsigned int val;
886
887 val = rtsx_pci_readl(pcr, RTSX_BIPR);
888 if (pcr->ops->cd_deglitch)
889 val = pcr->ops->cd_deglitch(pcr);
890
891 return val;
892}
893EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
894
895void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
896{
897 struct completion finish;
898
899 pcr->finish_me = &finish;
900 init_completion(&finish);
901
902 if (pcr->done)
903 complete(pcr->done);
904
905 if (!pcr->remove_pci)
906 rtsx_pci_stop_cmd(pcr);
907
908 wait_for_completion_interruptible_timeout(&finish,
909 msecs_to_jiffies(2));
910 pcr->finish_me = NULL;
911}
912EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
913
914static void rtsx_pci_card_detect(struct work_struct *work)
915{
916 struct delayed_work *dwork;
917 struct rtsx_pcr *pcr;
918 unsigned long flags;
919 unsigned int card_detect = 0, card_inserted, card_removed;
920 u32 irq_status;
921
922 dwork = to_delayed_work(work);
923 pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
924
925 pcr_dbg(pcr, "--> %s\n", __func__);
926
927 mutex_lock(&pcr->pcr_mutex);
928 spin_lock_irqsave(&pcr->lock, flags);
929
930 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
931 pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
932
933 irq_status &= CARD_EXIST;
934 card_inserted = pcr->card_inserted & irq_status;
935 card_removed = pcr->card_removed;
936 pcr->card_inserted = 0;
937 pcr->card_removed = 0;
938
939 spin_unlock_irqrestore(&pcr->lock, flags);
940
941 if (card_inserted || card_removed) {
942 pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
943 card_inserted, card_removed);
944
945 if (pcr->ops->cd_deglitch)
946 card_inserted = pcr->ops->cd_deglitch(pcr);
947
948 card_detect = card_inserted | card_removed;
949
950 pcr->card_exist |= card_inserted;
951 pcr->card_exist &= ~card_removed;
952 }
953
954 mutex_unlock(&pcr->pcr_mutex);
955
956 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
957 pcr->slots[RTSX_SD_CARD].card_event(
958 pcr->slots[RTSX_SD_CARD].p_dev);
959 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
960 pcr->slots[RTSX_MS_CARD].card_event(
961 pcr->slots[RTSX_MS_CARD].p_dev);
962}
963
964static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
965{
David Brazdil0f672f62019-12-10 10:32:29 +0000966 if (pcr->ops->process_ocp) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000967 pcr->ops->process_ocp(pcr);
David Brazdil0f672f62019-12-10 10:32:29 +0000968 } else {
969 if (!pcr->option.ocp_en)
970 return;
971 rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
972 if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
973 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
974 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
975 rtsx_pci_clear_ocpstat(pcr);
976 pcr->ocp_stat = 0;
977 }
978 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000979}
980
981static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
982{
983 if (pcr->option.ocp_en)
984 rtsx_pci_process_ocp(pcr);
985
986 return 0;
987}
988
989static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
990{
991 struct rtsx_pcr *pcr = dev_id;
992 u32 int_reg;
993
994 if (!pcr)
995 return IRQ_NONE;
996
997 spin_lock(&pcr->lock);
998
999 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
1000 /* Clear interrupt flag */
1001 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
1002 if ((int_reg & pcr->bier) == 0) {
1003 spin_unlock(&pcr->lock);
1004 return IRQ_NONE;
1005 }
1006 if (int_reg == 0xFFFFFFFF) {
1007 spin_unlock(&pcr->lock);
1008 return IRQ_HANDLED;
1009 }
1010
1011 int_reg &= (pcr->bier | 0x7FFFFF);
1012
1013 if (int_reg & SD_OC_INT)
1014 rtsx_pci_process_ocp_interrupt(pcr);
1015
1016 if (int_reg & SD_INT) {
1017 if (int_reg & SD_EXIST) {
1018 pcr->card_inserted |= SD_EXIST;
1019 } else {
1020 pcr->card_removed |= SD_EXIST;
1021 pcr->card_inserted &= ~SD_EXIST;
1022 }
1023 pcr->dma_error_count = 0;
1024 }
1025
1026 if (int_reg & MS_INT) {
1027 if (int_reg & MS_EXIST) {
1028 pcr->card_inserted |= MS_EXIST;
1029 } else {
1030 pcr->card_removed |= MS_EXIST;
1031 pcr->card_inserted &= ~MS_EXIST;
1032 }
1033 }
1034
1035 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
1036 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
1037 pcr->trans_result = TRANS_RESULT_FAIL;
1038 if (pcr->done)
1039 complete(pcr->done);
1040 } else if (int_reg & TRANS_OK_INT) {
1041 pcr->trans_result = TRANS_RESULT_OK;
1042 if (pcr->done)
1043 complete(pcr->done);
1044 }
1045 }
1046
David Brazdil0f672f62019-12-10 10:32:29 +00001047 if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001048 schedule_delayed_work(&pcr->carddet_work,
1049 msecs_to_jiffies(200));
1050
1051 spin_unlock(&pcr->lock);
1052 return IRQ_HANDLED;
1053}
1054
1055static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
1056{
1057 pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
1058 __func__, pcr->msi_en, pcr->pci->irq);
1059
1060 if (request_irq(pcr->pci->irq, rtsx_pci_isr,
1061 pcr->msi_en ? 0 : IRQF_SHARED,
1062 DRV_NAME_RTSX_PCI, pcr)) {
1063 dev_err(&(pcr->pci->dev),
1064 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
1065 pcr->pci->irq);
1066 return -1;
1067 }
1068
1069 pcr->irq = pcr->pci->irq;
1070 pci_intx(pcr->pci, !pcr->msi_en);
1071
1072 return 0;
1073}
1074
1075static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
1076{
1077 if (pcr->ops->set_aspm)
1078 pcr->ops->set_aspm(pcr, true);
1079 else
1080 rtsx_comm_set_aspm(pcr, true);
1081}
1082
1083static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
1084{
1085 struct rtsx_cr_option *option = &pcr->option;
1086
1087 if (option->ltr_enabled) {
1088 u32 latency = option->ltr_l1off_latency;
1089
1090 if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
1091 mdelay(option->l1_snooze_delay);
1092
1093 rtsx_set_ltr_latency(pcr, latency);
1094 }
1095
1096 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
1097 rtsx_set_l1off_sub_cfg_d0(pcr, 0);
1098
1099 rtsx_enable_aspm(pcr);
1100}
1101
1102static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
1103{
1104 if (pcr->ops->power_saving)
1105 pcr->ops->power_saving(pcr);
1106 else
1107 rtsx_comm_pm_power_saving(pcr);
1108}
1109
1110static void rtsx_pci_idle_work(struct work_struct *work)
1111{
1112 struct delayed_work *dwork = to_delayed_work(work);
1113 struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work);
1114
1115 pcr_dbg(pcr, "--> %s\n", __func__);
1116
1117 mutex_lock(&pcr->pcr_mutex);
1118
1119 pcr->state = PDEV_STAT_IDLE;
1120
1121 if (pcr->ops->disable_auto_blink)
1122 pcr->ops->disable_auto_blink(pcr);
1123 if (pcr->ops->turn_off_led)
1124 pcr->ops->turn_off_led(pcr);
1125
1126 rtsx_pm_power_saving(pcr);
1127
1128 mutex_unlock(&pcr->pcr_mutex);
1129}
1130
1131#ifdef CONFIG_PM
1132static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
1133{
1134 if (pcr->ops->turn_off_led)
1135 pcr->ops->turn_off_led(pcr);
1136
1137 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1138 pcr->bier = 0;
1139
1140 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
1141 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
1142
1143 if (pcr->ops->force_power_down)
1144 pcr->ops->force_power_down(pcr, pm_state);
1145}
1146#endif
1147
1148void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
1149{
1150 u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
1151
David Brazdil0f672f62019-12-10 10:32:29 +00001152 if (pcr->ops->enable_ocp) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001153 pcr->ops->enable_ocp(pcr);
David Brazdil0f672f62019-12-10 10:32:29 +00001154 } else {
1155 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001156 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
David Brazdil0f672f62019-12-10 10:32:29 +00001157 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001158
1159}
1160
1161void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
1162{
1163 u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
1164
David Brazdil0f672f62019-12-10 10:32:29 +00001165 if (pcr->ops->disable_ocp) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001166 pcr->ops->disable_ocp(pcr);
David Brazdil0f672f62019-12-10 10:32:29 +00001167 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001168 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
David Brazdil0f672f62019-12-10 10:32:29 +00001169 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
1170 OC_POWER_DOWN);
1171 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001172}
1173
1174void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
1175{
1176 if (pcr->ops->init_ocp) {
1177 pcr->ops->init_ocp(pcr);
1178 } else {
1179 struct rtsx_cr_option *option = &(pcr->option);
1180
1181 if (option->ocp_en) {
David Brazdil0f672f62019-12-10 10:32:29 +00001182 u8 val = option->sd_800mA_ocp_thd;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001183
1184 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
1185 rtsx_pci_write_register(pcr, REG_OCPPARA1,
1186 SD_OCP_TIME_MASK, SD_OCP_TIME_800);
1187 rtsx_pci_write_register(pcr, REG_OCPPARA2,
1188 SD_OCP_THD_MASK, val);
1189 rtsx_pci_write_register(pcr, REG_OCPGLITCH,
1190 SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
1191 rtsx_pci_enable_ocp(pcr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001192 }
1193 }
1194}
1195
1196int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
1197{
1198 if (pcr->ops->get_ocpstat)
1199 return pcr->ops->get_ocpstat(pcr, val);
1200 else
1201 return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
1202}
1203
1204void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
1205{
1206 if (pcr->ops->clear_ocpstat) {
1207 pcr->ops->clear_ocpstat(pcr);
1208 } else {
1209 u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
1210 u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
1211
1212 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
David Brazdil0f672f62019-12-10 10:32:29 +00001213 udelay(100);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001214 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
1215 }
1216}
1217
1218int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
1219{
1220 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1221 MS_CLK_EN | SD40_CLK_EN, 0);
1222 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001223 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
1224
1225 msleep(50);
1226
1227 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
1228
1229 return 0;
1230}
1231
1232int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
1233{
1234 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
1235 MS_CLK_EN | SD40_CLK_EN, 0);
1236
1237 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
1238
1239 rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
1240 rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
1241
1242 return 0;
1243}
1244
1245static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
1246{
1247 int err;
1248
1249 pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP);
1250 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
1251
1252 rtsx_pci_enable_bus_int(pcr);
1253
1254 /* Power on SSC */
1255 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
1256 if (err < 0)
1257 return err;
1258
1259 /* Wait SSC power stable */
1260 udelay(200);
1261
1262 rtsx_pci_disable_aspm(pcr);
1263 if (pcr->ops->optimize_phy) {
1264 err = pcr->ops->optimize_phy(pcr);
1265 if (err < 0)
1266 return err;
1267 }
1268
1269 rtsx_pci_init_cmd(pcr);
1270
1271 /* Set mcu_cnt to 7 to ensure data can be sampled properly */
1272 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
1273
1274 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
1275 /* Disable card clock */
1276 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
1277 /* Reset delink mode */
1278 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
1279 /* Card driving select */
1280 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
1281 0xFF, pcr->card_drive_sel);
1282 /* Enable SSC Clock */
1283 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
1284 0xFF, SSC_8X_EN | SSC_SEL_4M);
1285 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
1286 /* Disable cd_pwr_save */
1287 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
1288 /* Clear Link Ready Interrupt */
1289 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
1290 LINK_RDY_INT, LINK_RDY_INT);
1291 /* Enlarge the estimation window of PERST# glitch
1292 * to reduce the chance of invalid card interrupt
1293 */
1294 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
1295 /* Update RC oscillator to 400k
1296 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
1297 * 1: 2M 0: 400k
1298 */
1299 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
1300 /* Set interrupt write clear
1301 * bit 1: U_elbi_if_rd_clr_en
1302 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
1303 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
1304 */
1305 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
1306
1307 err = rtsx_pci_send_cmd(pcr, 100);
1308 if (err < 0)
1309 return err;
1310
1311 switch (PCI_PID(pcr)) {
1312 case PID_5250:
1313 case PID_524A:
1314 case PID_525A:
1315 case PID_5260:
1316 rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
1317 break;
1318 default:
1319 break;
1320 }
1321
David Brazdil0f672f62019-12-10 10:32:29 +00001322 /*init ocp*/
1323 rtsx_pci_init_ocp(pcr);
1324
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001325 /* Enable clk_request_n to enable clock power management */
1326 rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1);
1327 /* Enter L1 when host tx idle */
1328 rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B);
1329
1330 if (pcr->ops->extra_init_hw) {
1331 err = pcr->ops->extra_init_hw(pcr);
1332 if (err < 0)
1333 return err;
1334 }
1335
1336 /* No CD interrupt if probing driver with card inserted.
1337 * So we need to initialize pcr->card_exist here.
1338 */
1339 if (pcr->ops->cd_deglitch)
1340 pcr->card_exist = pcr->ops->cd_deglitch(pcr);
1341 else
1342 pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
1343
1344 return 0;
1345}
1346
1347static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
1348{
1349 int err;
1350
1351 spin_lock_init(&pcr->lock);
1352 mutex_init(&pcr->pcr_mutex);
1353
1354 switch (PCI_PID(pcr)) {
1355 default:
1356 case 0x5209:
1357 rts5209_init_params(pcr);
1358 break;
1359
1360 case 0x5229:
1361 rts5229_init_params(pcr);
1362 break;
1363
1364 case 0x5289:
1365 rtl8411_init_params(pcr);
1366 break;
1367
1368 case 0x5227:
1369 rts5227_init_params(pcr);
1370 break;
1371
1372 case 0x522A:
1373 rts522a_init_params(pcr);
1374 break;
1375
1376 case 0x5249:
1377 rts5249_init_params(pcr);
1378 break;
1379
1380 case 0x524A:
1381 rts524a_init_params(pcr);
1382 break;
1383
1384 case 0x525A:
1385 rts525a_init_params(pcr);
1386 break;
1387
1388 case 0x5287:
1389 rtl8411b_init_params(pcr);
1390 break;
1391
1392 case 0x5286:
1393 rtl8402_init_params(pcr);
1394 break;
1395 case 0x5260:
1396 rts5260_init_params(pcr);
1397 break;
1398 }
1399
1400 pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
1401 PCI_PID(pcr), pcr->ic_version);
1402
1403 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
1404 GFP_KERNEL);
1405 if (!pcr->slots)
1406 return -ENOMEM;
1407
1408 if (pcr->ops->fetch_vendor_settings)
1409 pcr->ops->fetch_vendor_settings(pcr);
1410
1411 pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
1412 pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
1413 pcr->sd30_drive_sel_1v8);
1414 pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
1415 pcr->sd30_drive_sel_3v3);
1416 pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
1417 pcr->card_drive_sel);
1418 pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
1419
1420 pcr->state = PDEV_STAT_IDLE;
1421 err = rtsx_pci_init_hw(pcr);
1422 if (err < 0) {
1423 kfree(pcr->slots);
1424 return err;
1425 }
1426
1427 return 0;
1428}
1429
1430static int rtsx_pci_probe(struct pci_dev *pcidev,
1431 const struct pci_device_id *id)
1432{
1433 struct rtsx_pcr *pcr;
1434 struct pcr_handle *handle;
1435 u32 base, len;
1436 int ret, i, bar = 0;
1437
1438 dev_dbg(&(pcidev->dev),
1439 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
1440 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
1441 (int)pcidev->revision);
1442
1443 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
1444 if (ret < 0)
1445 return ret;
1446
1447 ret = pci_enable_device(pcidev);
1448 if (ret)
1449 return ret;
1450
1451 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
1452 if (ret)
1453 goto disable;
1454
1455 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
1456 if (!pcr) {
1457 ret = -ENOMEM;
1458 goto release_pci;
1459 }
1460
1461 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1462 if (!handle) {
1463 ret = -ENOMEM;
1464 goto free_pcr;
1465 }
1466 handle->pcr = pcr;
1467
1468 idr_preload(GFP_KERNEL);
1469 spin_lock(&rtsx_pci_lock);
1470 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
1471 if (ret >= 0)
1472 pcr->id = ret;
1473 spin_unlock(&rtsx_pci_lock);
1474 idr_preload_end();
1475 if (ret < 0)
1476 goto free_handle;
1477
1478 pcr->pci = pcidev;
1479 dev_set_drvdata(&pcidev->dev, handle);
1480
1481 if (CHK_PCI_PID(pcr, 0x525A))
1482 bar = 1;
1483 len = pci_resource_len(pcidev, bar);
1484 base = pci_resource_start(pcidev, bar);
1485 pcr->remap_addr = ioremap_nocache(base, len);
1486 if (!pcr->remap_addr) {
1487 ret = -ENOMEM;
1488 goto free_handle;
1489 }
1490
1491 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
1492 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
1493 GFP_KERNEL);
1494 if (pcr->rtsx_resv_buf == NULL) {
1495 ret = -ENXIO;
1496 goto unmap;
1497 }
1498 pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
1499 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
1500 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
1501 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
1502
1503 pcr->card_inserted = 0;
1504 pcr->card_removed = 0;
1505 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
1506 INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work);
1507
1508 pcr->msi_en = msi_en;
1509 if (pcr->msi_en) {
1510 ret = pci_enable_msi(pcidev);
1511 if (ret)
1512 pcr->msi_en = false;
1513 }
1514
1515 ret = rtsx_pci_acquire_irq(pcr);
1516 if (ret < 0)
1517 goto disable_msi;
1518
1519 pci_set_master(pcidev);
1520 synchronize_irq(pcr->irq);
1521
1522 ret = rtsx_pci_init_chip(pcr);
1523 if (ret < 0)
1524 goto disable_irq;
1525
1526 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
1527 rtsx_pcr_cells[i].platform_data = handle;
1528 rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
1529 }
1530 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
1531 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
1532 if (ret < 0)
Olivier Deprez0e641232021-09-23 10:07:05 +02001533 goto free_slots;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001534
1535 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1536
1537 return 0;
1538
Olivier Deprez0e641232021-09-23 10:07:05 +02001539free_slots:
1540 kfree(pcr->slots);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001541disable_irq:
1542 free_irq(pcr->irq, (void *)pcr);
1543disable_msi:
1544 if (pcr->msi_en)
1545 pci_disable_msi(pcr->pci);
1546 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1547 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1548unmap:
1549 iounmap(pcr->remap_addr);
1550free_handle:
1551 kfree(handle);
1552free_pcr:
1553 kfree(pcr);
1554release_pci:
1555 pci_release_regions(pcidev);
1556disable:
1557 pci_disable_device(pcidev);
1558
1559 return ret;
1560}
1561
1562static void rtsx_pci_remove(struct pci_dev *pcidev)
1563{
1564 struct pcr_handle *handle = pci_get_drvdata(pcidev);
1565 struct rtsx_pcr *pcr = handle->pcr;
1566
1567 pcr->remove_pci = true;
1568
1569 /* Disable interrupts at the pcr level */
1570 spin_lock_irq(&pcr->lock);
1571 rtsx_pci_writel(pcr, RTSX_BIER, 0);
1572 pcr->bier = 0;
1573 spin_unlock_irq(&pcr->lock);
1574
1575 cancel_delayed_work_sync(&pcr->carddet_work);
1576 cancel_delayed_work_sync(&pcr->idle_work);
1577
1578 mfd_remove_devices(&pcidev->dev);
1579
1580 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
1581 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
1582 free_irq(pcr->irq, (void *)pcr);
1583 if (pcr->msi_en)
1584 pci_disable_msi(pcr->pci);
1585 iounmap(pcr->remap_addr);
1586
1587 pci_release_regions(pcidev);
1588 pci_disable_device(pcidev);
1589
1590 spin_lock(&rtsx_pci_lock);
1591 idr_remove(&rtsx_pci_idr, pcr->id);
1592 spin_unlock(&rtsx_pci_lock);
1593
1594 kfree(pcr->slots);
1595 kfree(pcr);
1596 kfree(handle);
1597
1598 dev_dbg(&(pcidev->dev),
1599 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
1600 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
1601}
1602
1603#ifdef CONFIG_PM
1604
1605static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state)
1606{
1607 struct pcr_handle *handle;
1608 struct rtsx_pcr *pcr;
1609
1610 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1611
1612 handle = pci_get_drvdata(pcidev);
1613 pcr = handle->pcr;
1614
1615 cancel_delayed_work(&pcr->carddet_work);
1616 cancel_delayed_work(&pcr->idle_work);
1617
1618 mutex_lock(&pcr->pcr_mutex);
1619
1620 rtsx_pci_power_off(pcr, HOST_ENTER_S3);
1621
1622 pci_save_state(pcidev);
1623 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
1624 pci_disable_device(pcidev);
1625 pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
1626
1627 mutex_unlock(&pcr->pcr_mutex);
1628 return 0;
1629}
1630
1631static int rtsx_pci_resume(struct pci_dev *pcidev)
1632{
1633 struct pcr_handle *handle;
1634 struct rtsx_pcr *pcr;
1635 int ret = 0;
1636
1637 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1638
1639 handle = pci_get_drvdata(pcidev);
1640 pcr = handle->pcr;
1641
1642 mutex_lock(&pcr->pcr_mutex);
1643
1644 pci_set_power_state(pcidev, PCI_D0);
1645 pci_restore_state(pcidev);
1646 ret = pci_enable_device(pcidev);
1647 if (ret)
1648 goto out;
1649 pci_set_master(pcidev);
1650
1651 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
1652 if (ret)
1653 goto out;
1654
1655 ret = rtsx_pci_init_hw(pcr);
1656 if (ret)
1657 goto out;
1658
1659 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200));
1660
1661out:
1662 mutex_unlock(&pcr->pcr_mutex);
1663 return ret;
1664}
1665
1666static void rtsx_pci_shutdown(struct pci_dev *pcidev)
1667{
1668 struct pcr_handle *handle;
1669 struct rtsx_pcr *pcr;
1670
1671 dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
1672
1673 handle = pci_get_drvdata(pcidev);
1674 pcr = handle->pcr;
1675 rtsx_pci_power_off(pcr, HOST_ENTER_S1);
1676
1677 pci_disable_device(pcidev);
1678 free_irq(pcr->irq, (void *)pcr);
1679 if (pcr->msi_en)
1680 pci_disable_msi(pcr->pci);
1681}
1682
1683#else /* CONFIG_PM */
1684
1685#define rtsx_pci_suspend NULL
1686#define rtsx_pci_resume NULL
1687#define rtsx_pci_shutdown NULL
1688
1689#endif /* CONFIG_PM */
1690
1691static struct pci_driver rtsx_pci_driver = {
1692 .name = DRV_NAME_RTSX_PCI,
1693 .id_table = rtsx_pci_ids,
1694 .probe = rtsx_pci_probe,
1695 .remove = rtsx_pci_remove,
1696 .suspend = rtsx_pci_suspend,
1697 .resume = rtsx_pci_resume,
1698 .shutdown = rtsx_pci_shutdown,
1699};
1700module_pci_driver(rtsx_pci_driver);
1701
1702MODULE_LICENSE("GPL");
1703MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>");
1704MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");