blob: 9c0ea13ca78835d6848e5b1f31ed51e2e684c6d0 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * MediaTek UART APDMA driver.
4 *
5 * Copyright (c) 2019 MediaTek Inc.
6 * Author: Long Cheng <long.cheng@mediatek.com>
7 */
8
9#include <linux/clk.h>
10#include <linux/dmaengine.h>
11#include <linux/dma-mapping.h>
12#include <linux/err.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/iopoll.h>
16#include <linux/kernel.h>
17#include <linux/list.h>
18#include <linux/module.h>
19#include <linux/of_device.h>
20#include <linux/of_dma.h>
21#include <linux/platform_device.h>
22#include <linux/pm_runtime.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25
26#include "../virt-dma.h"
27
28/* The default number of virtual channel */
29#define MTK_UART_APDMA_NR_VCHANS 8
30
31#define VFF_EN_B BIT(0)
32#define VFF_STOP_B BIT(0)
33#define VFF_FLUSH_B BIT(0)
34#define VFF_4G_EN_B BIT(0)
35/* rx valid size >= vff thre */
36#define VFF_RX_INT_EN_B (BIT(0) | BIT(1))
37/* tx left size >= vff thre */
38#define VFF_TX_INT_EN_B BIT(0)
39#define VFF_WARM_RST_B BIT(0)
40#define VFF_RX_INT_CLR_B (BIT(0) | BIT(1))
41#define VFF_TX_INT_CLR_B 0
42#define VFF_STOP_CLR_B 0
43#define VFF_EN_CLR_B 0
44#define VFF_INT_EN_CLR_B 0
45#define VFF_4G_SUPPORT_CLR_B 0
46
47/*
48 * interrupt trigger level for tx
49 * if threshold is n, no polling is required to start tx.
50 * otherwise need polling VFF_FLUSH.
51 */
52#define VFF_TX_THRE(n) (n)
53/* interrupt trigger level for rx */
54#define VFF_RX_THRE(n) ((n) * 3 / 4)
55
56#define VFF_RING_SIZE 0xffff
57/* invert this bit when wrap ring head again */
58#define VFF_RING_WRAP 0x10000
59
60#define VFF_INT_FLAG 0x00
61#define VFF_INT_EN 0x04
62#define VFF_EN 0x08
63#define VFF_RST 0x0c
64#define VFF_STOP 0x10
65#define VFF_FLUSH 0x14
66#define VFF_ADDR 0x1c
67#define VFF_LEN 0x24
68#define VFF_THRE 0x28
69#define VFF_WPT 0x2c
70#define VFF_RPT 0x30
71/* TX: the buffer size HW can read. RX: the buffer size SW can read. */
72#define VFF_VALID_SIZE 0x3c
73/* TX: the buffer size SW can write. RX: the buffer size HW can write. */
74#define VFF_LEFT_SIZE 0x40
75#define VFF_DEBUG_STATUS 0x50
76#define VFF_4G_SUPPORT 0x54
77
78struct mtk_uart_apdmadev {
79 struct dma_device ddev;
80 struct clk *clk;
81 bool support_33bits;
82 unsigned int dma_requests;
83};
84
85struct mtk_uart_apdma_desc {
86 struct virt_dma_desc vd;
87
88 dma_addr_t addr;
89 unsigned int avail_len;
90};
91
92struct mtk_chan {
93 struct virt_dma_chan vc;
94 struct dma_slave_config cfg;
95 struct mtk_uart_apdma_desc *desc;
96 enum dma_transfer_direction dir;
97
98 void __iomem *base;
99 unsigned int irq;
100
101 unsigned int rx_status;
102};
103
104static inline struct mtk_uart_apdmadev *
105to_mtk_uart_apdma_dev(struct dma_device *d)
106{
107 return container_of(d, struct mtk_uart_apdmadev, ddev);
108}
109
110static inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c)
111{
112 return container_of(c, struct mtk_chan, vc.chan);
113}
114
115static inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc
116 (struct dma_async_tx_descriptor *t)
117{
118 return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
119}
120
121static void mtk_uart_apdma_write(struct mtk_chan *c,
122 unsigned int reg, unsigned int val)
123{
124 writel(val, c->base + reg);
125}
126
127static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
128{
129 return readl(c->base + reg);
130}
131
132static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
133{
Olivier Deprez0e641232021-09-23 10:07:05 +0200134 kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
David Brazdil0f672f62019-12-10 10:32:29 +0000135}
136
137static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
138{
139 struct mtk_uart_apdmadev *mtkd =
140 to_mtk_uart_apdma_dev(c->vc.chan.device);
141 struct mtk_uart_apdma_desc *d = c->desc;
142 unsigned int wpt, vff_sz;
143
144 vff_sz = c->cfg.dst_port_window_size;
145 if (!mtk_uart_apdma_read(c, VFF_LEN)) {
146 mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
147 mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
148 mtk_uart_apdma_write(c, VFF_THRE, VFF_TX_THRE(vff_sz));
149 mtk_uart_apdma_write(c, VFF_WPT, 0);
150 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
151
152 if (mtkd->support_33bits)
153 mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
154 }
155
156 mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
157 if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
158 dev_err(c->vc.chan.device->dev, "Enable TX fail\n");
159
160 if (!mtk_uart_apdma_read(c, VFF_LEFT_SIZE)) {
161 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
162 return;
163 }
164
165 wpt = mtk_uart_apdma_read(c, VFF_WPT);
166
167 wpt += c->desc->avail_len;
168 if ((wpt & VFF_RING_SIZE) == vff_sz)
169 wpt = (wpt & VFF_RING_WRAP) ^ VFF_RING_WRAP;
170
171 /* Let DMA start moving data */
172 mtk_uart_apdma_write(c, VFF_WPT, wpt);
173
174 /* HW auto set to 0 when left size >= threshold */
175 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
176 if (!mtk_uart_apdma_read(c, VFF_FLUSH))
177 mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
178}
179
180static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
181{
182 struct mtk_uart_apdmadev *mtkd =
183 to_mtk_uart_apdma_dev(c->vc.chan.device);
184 struct mtk_uart_apdma_desc *d = c->desc;
185 unsigned int vff_sz;
186
187 vff_sz = c->cfg.src_port_window_size;
188 if (!mtk_uart_apdma_read(c, VFF_LEN)) {
189 mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
190 mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
191 mtk_uart_apdma_write(c, VFF_THRE, VFF_RX_THRE(vff_sz));
192 mtk_uart_apdma_write(c, VFF_RPT, 0);
193 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
194
195 if (mtkd->support_33bits)
196 mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
197 }
198
199 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B);
200 mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
201 if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
202 dev_err(c->vc.chan.device->dev, "Enable RX fail\n");
203}
204
205static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
206{
David Brazdil0f672f62019-12-10 10:32:29 +0000207 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
208 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
209 mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
David Brazdil0f672f62019-12-10 10:32:29 +0000210}
211
212static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
213{
214 struct mtk_uart_apdma_desc *d = c->desc;
215 unsigned int len, wg, rg;
216 int cnt;
217
218 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
219
220 if (!mtk_uart_apdma_read(c, VFF_VALID_SIZE))
221 return;
222
223 mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
224 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
225
226 len = c->cfg.src_port_window_size;
227 rg = mtk_uart_apdma_read(c, VFF_RPT);
228 wg = mtk_uart_apdma_read(c, VFF_WPT);
229 cnt = (wg & VFF_RING_SIZE) - (rg & VFF_RING_SIZE);
230
231 /*
232 * The buffer is ring buffer. If wrap bit different,
233 * represents the start of the next cycle for WPT
234 */
235 if ((rg ^ wg) & VFF_RING_WRAP)
236 cnt += len;
237
238 c->rx_status = d->avail_len - cnt;
239 mtk_uart_apdma_write(c, VFF_RPT, wg);
Olivier Deprez0e641232021-09-23 10:07:05 +0200240}
David Brazdil0f672f62019-12-10 10:32:29 +0000241
Olivier Deprez0e641232021-09-23 10:07:05 +0200242static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
243{
244 struct mtk_uart_apdma_desc *d = c->desc;
245
246 if (d) {
247 list_del(&d->vd.node);
248 vchan_cookie_complete(&d->vd);
249 c->desc = NULL;
250 }
David Brazdil0f672f62019-12-10 10:32:29 +0000251}
252
253static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
254{
255 struct dma_chan *chan = (struct dma_chan *)dev_id;
256 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
257 unsigned long flags;
258
259 spin_lock_irqsave(&c->vc.lock, flags);
260 if (c->dir == DMA_DEV_TO_MEM)
261 mtk_uart_apdma_rx_handler(c);
262 else if (c->dir == DMA_MEM_TO_DEV)
263 mtk_uart_apdma_tx_handler(c);
Olivier Deprez0e641232021-09-23 10:07:05 +0200264 mtk_uart_apdma_chan_complete_handler(c);
David Brazdil0f672f62019-12-10 10:32:29 +0000265 spin_unlock_irqrestore(&c->vc.lock, flags);
266
267 return IRQ_HANDLED;
268}
269
270static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
271{
272 struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
273 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
274 unsigned int status;
275 int ret;
276
277 ret = pm_runtime_get_sync(mtkd->ddev.dev);
278 if (ret < 0) {
279 pm_runtime_put_noidle(chan->device->dev);
280 return ret;
281 }
282
283 mtk_uart_apdma_write(c, VFF_ADDR, 0);
284 mtk_uart_apdma_write(c, VFF_THRE, 0);
285 mtk_uart_apdma_write(c, VFF_LEN, 0);
286 mtk_uart_apdma_write(c, VFF_RST, VFF_WARM_RST_B);
287
288 ret = readx_poll_timeout(readl, c->base + VFF_EN,
289 status, !status, 10, 100);
290 if (ret)
291 return ret;
292
293 ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
294 IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
295 if (ret < 0) {
296 dev_err(chan->device->dev, "Can't request dma IRQ\n");
297 return -EINVAL;
298 }
299
300 if (mtkd->support_33bits)
301 mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
302
303 return ret;
304}
305
306static void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan)
307{
308 struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
309 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
310
311 free_irq(c->irq, chan);
312
313 tasklet_kill(&c->vc.task);
314
315 vchan_free_chan_resources(&c->vc);
316
317 pm_runtime_put_sync(mtkd->ddev.dev);
318}
319
320static enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan,
321 dma_cookie_t cookie,
322 struct dma_tx_state *txstate)
323{
324 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
325 enum dma_status ret;
326
327 ret = dma_cookie_status(chan, cookie, txstate);
328 if (!txstate)
329 return ret;
330
331 dma_set_residue(txstate, c->rx_status);
332
333 return ret;
334}
335
336/*
337 * dmaengine_prep_slave_single will call the function. and sglen is 1.
338 * 8250 uart using one ring buffer, and deal with one sg.
339 */
340static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
341 (struct dma_chan *chan, struct scatterlist *sgl,
342 unsigned int sglen, enum dma_transfer_direction dir,
343 unsigned long tx_flags, void *context)
344{
345 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
346 struct mtk_uart_apdma_desc *d;
347
348 if (!is_slave_direction(dir) || sglen != 1)
349 return NULL;
350
351 /* Now allocate and setup the descriptor */
Olivier Deprez0e641232021-09-23 10:07:05 +0200352 d = kzalloc(sizeof(*d), GFP_NOWAIT);
David Brazdil0f672f62019-12-10 10:32:29 +0000353 if (!d)
354 return NULL;
355
356 d->avail_len = sg_dma_len(sgl);
357 d->addr = sg_dma_address(sgl);
358 c->dir = dir;
359
360 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
361}
362
363static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
364{
365 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
366 struct virt_dma_desc *vd;
367 unsigned long flags;
368
369 spin_lock_irqsave(&c->vc.lock, flags);
Olivier Deprez0e641232021-09-23 10:07:05 +0200370 if (vchan_issue_pending(&c->vc) && !c->desc) {
David Brazdil0f672f62019-12-10 10:32:29 +0000371 vd = vchan_next_desc(&c->vc);
372 c->desc = to_mtk_uart_apdma_desc(&vd->tx);
373
374 if (c->dir == DMA_DEV_TO_MEM)
375 mtk_uart_apdma_start_rx(c);
376 else if (c->dir == DMA_MEM_TO_DEV)
377 mtk_uart_apdma_start_tx(c);
378 }
379
380 spin_unlock_irqrestore(&c->vc.lock, flags);
381}
382
383static int mtk_uart_apdma_slave_config(struct dma_chan *chan,
384 struct dma_slave_config *config)
385{
386 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
387
388 memcpy(&c->cfg, config, sizeof(*config));
389
390 return 0;
391}
392
393static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
394{
395 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
396 unsigned long flags;
397 unsigned int status;
398 LIST_HEAD(head);
399 int ret;
400
401 mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
402
403 ret = readx_poll_timeout(readl, c->base + VFF_FLUSH,
404 status, status != VFF_FLUSH_B, 10, 100);
405 if (ret)
406 dev_err(c->vc.chan.device->dev, "flush: fail, status=0x%x\n",
407 mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
408
409 /*
410 * Stop need 3 steps.
411 * 1. set stop to 1
412 * 2. wait en to 0
413 * 3. set stop as 0
414 */
415 mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_B);
416 ret = readx_poll_timeout(readl, c->base + VFF_EN,
417 status, !status, 10, 100);
418 if (ret)
419 dev_err(c->vc.chan.device->dev, "stop: fail, status=0x%x\n",
420 mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
421
422 mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_CLR_B);
423 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
424
425 if (c->dir == DMA_DEV_TO_MEM)
426 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
427 else if (c->dir == DMA_MEM_TO_DEV)
428 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
429
430 synchronize_irq(c->irq);
431
432 spin_lock_irqsave(&c->vc.lock, flags);
433 vchan_get_all_descriptors(&c->vc, &head);
434 vchan_dma_desc_free_list(&c->vc, &head);
435 spin_unlock_irqrestore(&c->vc.lock, flags);
436
437 return 0;
438}
439
440static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
441{
442 struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
443 unsigned long flags;
444
445 spin_lock_irqsave(&c->vc.lock, flags);
446
447 mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
448 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
449
450 synchronize_irq(c->irq);
451
452 spin_unlock_irqrestore(&c->vc.lock, flags);
453
454 return 0;
455}
456
457static void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
458{
459 while (!list_empty(&mtkd->ddev.channels)) {
460 struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
461 struct mtk_chan, vc.chan.device_node);
462
463 list_del(&c->vc.chan.device_node);
464 tasklet_kill(&c->vc.task);
465 }
466}
467
468static const struct of_device_id mtk_uart_apdma_match[] = {
469 { .compatible = "mediatek,mt6577-uart-dma", },
470 { /* sentinel */ },
471};
472MODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
473
474static int mtk_uart_apdma_probe(struct platform_device *pdev)
475{
476 struct device_node *np = pdev->dev.of_node;
477 struct mtk_uart_apdmadev *mtkd;
478 int bit_mask = 32, rc;
479 struct resource *res;
480 struct mtk_chan *c;
481 unsigned int i;
482
483 mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL);
484 if (!mtkd)
485 return -ENOMEM;
486
487 mtkd->clk = devm_clk_get(&pdev->dev, NULL);
488 if (IS_ERR(mtkd->clk)) {
489 dev_err(&pdev->dev, "No clock specified\n");
490 rc = PTR_ERR(mtkd->clk);
491 return rc;
492 }
493
494 if (of_property_read_bool(np, "mediatek,dma-33bits"))
495 mtkd->support_33bits = true;
496
497 if (mtkd->support_33bits)
498 bit_mask = 33;
499
500 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask));
501 if (rc)
502 return rc;
503
504 dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask);
505 mtkd->ddev.device_alloc_chan_resources =
506 mtk_uart_apdma_alloc_chan_resources;
507 mtkd->ddev.device_free_chan_resources =
508 mtk_uart_apdma_free_chan_resources;
509 mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status;
510 mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending;
511 mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg;
512 mtkd->ddev.device_config = mtk_uart_apdma_slave_config;
513 mtkd->ddev.device_pause = mtk_uart_apdma_device_pause;
514 mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all;
515 mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
516 mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
517 mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
518 mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
519 mtkd->ddev.dev = &pdev->dev;
520 INIT_LIST_HEAD(&mtkd->ddev.channels);
521
522 mtkd->dma_requests = MTK_UART_APDMA_NR_VCHANS;
523 if (of_property_read_u32(np, "dma-requests", &mtkd->dma_requests)) {
524 dev_info(&pdev->dev,
525 "Using %u as missing dma-requests property\n",
526 MTK_UART_APDMA_NR_VCHANS);
527 }
528
529 for (i = 0; i < mtkd->dma_requests; i++) {
530 c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL);
531 if (!c) {
532 rc = -ENODEV;
533 goto err_no_dma;
534 }
535
536 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
537 if (!res) {
538 rc = -ENODEV;
539 goto err_no_dma;
540 }
541
542 c->base = devm_ioremap_resource(&pdev->dev, res);
543 if (IS_ERR(c->base)) {
544 rc = PTR_ERR(c->base);
545 goto err_no_dma;
546 }
547 c->vc.desc_free = mtk_uart_apdma_desc_free;
548 vchan_init(&c->vc, &mtkd->ddev);
549
550 rc = platform_get_irq(pdev, i);
551 if (rc < 0)
552 goto err_no_dma;
553 c->irq = rc;
554 }
555
556 pm_runtime_enable(&pdev->dev);
557 pm_runtime_set_active(&pdev->dev);
558
559 rc = dma_async_device_register(&mtkd->ddev);
560 if (rc)
561 goto rpm_disable;
562
563 platform_set_drvdata(pdev, mtkd);
564
565 /* Device-tree DMA controller registration */
566 rc = of_dma_controller_register(np, of_dma_xlate_by_chan_id, mtkd);
567 if (rc)
568 goto dma_remove;
569
570 return rc;
571
572dma_remove:
573 dma_async_device_unregister(&mtkd->ddev);
574rpm_disable:
575 pm_runtime_disable(&pdev->dev);
576err_no_dma:
577 mtk_uart_apdma_free(mtkd);
578 return rc;
579}
580
581static int mtk_uart_apdma_remove(struct platform_device *pdev)
582{
583 struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
584
585 of_dma_controller_free(pdev->dev.of_node);
586
587 mtk_uart_apdma_free(mtkd);
588
589 dma_async_device_unregister(&mtkd->ddev);
590
591 pm_runtime_disable(&pdev->dev);
592
593 return 0;
594}
595
596#ifdef CONFIG_PM_SLEEP
597static int mtk_uart_apdma_suspend(struct device *dev)
598{
599 struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
600
601 if (!pm_runtime_suspended(dev))
602 clk_disable_unprepare(mtkd->clk);
603
604 return 0;
605}
606
607static int mtk_uart_apdma_resume(struct device *dev)
608{
609 int ret;
610 struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
611
612 if (!pm_runtime_suspended(dev)) {
613 ret = clk_prepare_enable(mtkd->clk);
614 if (ret)
615 return ret;
616 }
617
618 return 0;
619}
620#endif /* CONFIG_PM_SLEEP */
621
622#ifdef CONFIG_PM
623static int mtk_uart_apdma_runtime_suspend(struct device *dev)
624{
625 struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
626
627 clk_disable_unprepare(mtkd->clk);
628
629 return 0;
630}
631
632static int mtk_uart_apdma_runtime_resume(struct device *dev)
633{
634 int ret;
635 struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
636
637 ret = clk_prepare_enable(mtkd->clk);
638 if (ret)
639 return ret;
640
641 return 0;
642}
643#endif /* CONFIG_PM */
644
645static const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
646 SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume)
647 SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend,
648 mtk_uart_apdma_runtime_resume, NULL)
649};
650
651static struct platform_driver mtk_uart_apdma_driver = {
652 .probe = mtk_uart_apdma_probe,
653 .remove = mtk_uart_apdma_remove,
654 .driver = {
655 .name = KBUILD_MODNAME,
656 .pm = &mtk_uart_apdma_pm_ops,
657 .of_match_table = of_match_ptr(mtk_uart_apdma_match),
658 },
659};
660
661module_platform_driver(mtk_uart_apdma_driver);
662
663MODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
664MODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
665MODULE_LICENSE("GPL v2");