blob: 67a483c1a9357c7629d569ae1456ca8181b92c89 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (c) 2015-2016 MediaTek Inc.
4 * Author: Yong Wu <yong.wu@mediatek.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 */
David Brazdil0f672f62019-12-10 10:32:29 +00006#include <linux/memblock.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007#include <linux/bug.h>
8#include <linux/clk.h>
9#include <linux/component.h>
10#include <linux/device.h>
11#include <linux/dma-iommu.h>
12#include <linux/err.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/iommu.h>
16#include <linux/iopoll.h>
17#include <linux/list.h>
18#include <linux/of_address.h>
19#include <linux/of_iommu.h>
20#include <linux/of_irq.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25#include <asm/barrier.h>
26#include <soc/mediatek/smi.h>
27
28#include "mtk_iommu.h"
29
30#define REG_MMU_PT_BASE_ADDR 0x000
David Brazdil0f672f62019-12-10 10:32:29 +000031#define MMU_PT_ADDR_MASK GENMASK(31, 7)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000032
33#define REG_MMU_INVALIDATE 0x020
34#define F_ALL_INVLD 0x2
35#define F_MMU_INV_RANGE 0x1
36
37#define REG_MMU_INVLD_START_A 0x024
38#define REG_MMU_INVLD_END_A 0x028
39
40#define REG_MMU_INV_SEL 0x038
41#define F_INVLD_EN0 BIT(0)
42#define F_INVLD_EN1 BIT(1)
43
44#define REG_MMU_STANDARD_AXI_MODE 0x048
45#define REG_MMU_DCM_DIS 0x050
46
47#define REG_MMU_CTRL_REG 0x110
David Brazdil0f672f62019-12-10 10:32:29 +000048#define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000049#define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
David Brazdil0f672f62019-12-10 10:32:29 +000050#define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051
52#define REG_MMU_IVRP_PADDR 0x114
53
54#define REG_MMU_VLD_PA_RNG 0x118
55#define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA))
56
57#define REG_MMU_INT_CONTROL0 0x120
58#define F_L2_MULIT_HIT_EN BIT(0)
59#define F_TABLE_WALK_FAULT_INT_EN BIT(1)
60#define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2)
61#define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3)
62#define F_PREFETCH_FIFO_ERR_INT_EN BIT(5)
63#define F_MISS_FIFO_ERR_INT_EN BIT(6)
64#define F_INT_CLR_BIT BIT(12)
65
66#define REG_MMU_INT_MAIN_CONTROL 0x124
David Brazdil0f672f62019-12-10 10:32:29 +000067 /* mmu0 | mmu1 */
68#define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7))
69#define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8))
70#define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9))
71#define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10))
72#define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11))
73#define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12))
74#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000075
76#define REG_MMU_CPE_DONE 0x12C
77
78#define REG_MMU_FAULT_ST1 0x134
David Brazdil0f672f62019-12-10 10:32:29 +000079#define F_REG_MMU0_FAULT_MASK GENMASK(6, 0)
80#define F_REG_MMU1_FAULT_MASK GENMASK(13, 7)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000081
David Brazdil0f672f62019-12-10 10:32:29 +000082#define REG_MMU0_FAULT_VA 0x13c
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083#define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
84#define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
85
David Brazdil0f672f62019-12-10 10:32:29 +000086#define REG_MMU0_INVLD_PA 0x140
87#define REG_MMU1_FAULT_VA 0x144
88#define REG_MMU1_INVLD_PA 0x148
89#define REG_MMU0_INT_ID 0x150
90#define REG_MMU1_INT_ID 0x154
91#define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
92#define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093
94#define MTK_PROTECT_PA_ALIGN 128
95
96/*
97 * Get the local arbiter ID and the portid within the larb arbiter
98 * from mtk_m4u_id which is defined by MTK_M4U_ID.
99 */
100#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf)
101#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
102
103struct mtk_iommu_domain {
104 spinlock_t pgtlock; /* lock for page table */
105
106 struct io_pgtable_cfg cfg;
107 struct io_pgtable_ops *iop;
108
109 struct iommu_domain domain;
110};
111
David Brazdil0f672f62019-12-10 10:32:29 +0000112static const struct iommu_ops mtk_iommu_ops;
113
114/*
115 * In M4U 4GB mode, the physical address is remapped as below:
116 *
117 * CPU Physical address:
118 * ====================
119 *
120 * 0 1G 2G 3G 4G 5G
121 * |---A---|---B---|---C---|---D---|---E---|
122 * +--I/O--+------------Memory-------------+
123 *
124 * IOMMU output physical address:
125 * =============================
126 *
127 * 4G 5G 6G 7G 8G
128 * |---E---|---B---|---C---|---D---|
129 * +------------Memory-------------+
130 *
131 * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the
132 * bit32 of the CPU physical address always is needed to set, and for Region
133 * 'E', the CPU physical address keep as is.
134 * Additionally, The iommu consumers always use the CPU phyiscal address.
135 */
136#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000137
138static LIST_HEAD(m4ulist); /* List all the M4U HWs */
139
140#define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)
141
142/*
143 * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain
144 * for the performance.
145 *
146 * Here always return the mtk_iommu_data of the first probed M4U where the
147 * iommu domain information is recorded.
148 */
149static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void)
150{
151 struct mtk_iommu_data *data;
152
153 for_each_m4u(data)
154 return data;
155
156 return NULL;
157}
158
159static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
160{
161 return container_of(dom, struct mtk_iommu_domain, domain);
162}
163
164static void mtk_iommu_tlb_flush_all(void *cookie)
165{
166 struct mtk_iommu_data *data = cookie;
167
168 for_each_m4u(data) {
169 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
170 data->base + REG_MMU_INV_SEL);
171 writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
172 wmb(); /* Make sure the tlb flush all done */
173 }
174}
175
176static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
177 size_t granule, bool leaf,
178 void *cookie)
179{
180 struct mtk_iommu_data *data = cookie;
181
182 for_each_m4u(data) {
183 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
184 data->base + REG_MMU_INV_SEL);
185
186 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
187 writel_relaxed(iova + size - 1,
188 data->base + REG_MMU_INVLD_END_A);
189 writel_relaxed(F_MMU_INV_RANGE,
190 data->base + REG_MMU_INVALIDATE);
191 data->tlb_flush_active = true;
192 }
193}
194
195static void mtk_iommu_tlb_sync(void *cookie)
196{
197 struct mtk_iommu_data *data = cookie;
198 int ret;
199 u32 tmp;
200
201 for_each_m4u(data) {
202 /* Avoid timing out if there's nothing to wait for */
203 if (!data->tlb_flush_active)
204 return;
205
206 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
207 tmp, tmp != 0, 10, 100000);
208 if (ret) {
209 dev_warn(data->dev,
210 "Partial TLB flush timed out, falling back to full flush\n");
211 mtk_iommu_tlb_flush_all(cookie);
212 }
213 /* Clear the CPE status */
214 writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
215 data->tlb_flush_active = false;
216 }
217}
218
David Brazdil0f672f62019-12-10 10:32:29 +0000219static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size,
220 size_t granule, void *cookie)
221{
222 mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie);
223 mtk_iommu_tlb_sync(cookie);
224}
225
226static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
227 size_t granule, void *cookie)
228{
229 mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie);
230 mtk_iommu_tlb_sync(cookie);
231}
232
233static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
234 unsigned long iova, size_t granule,
235 void *cookie)
236{
237 mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie);
238}
239
240static const struct iommu_flush_ops mtk_iommu_flush_ops = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000241 .tlb_flush_all = mtk_iommu_tlb_flush_all,
David Brazdil0f672f62019-12-10 10:32:29 +0000242 .tlb_flush_walk = mtk_iommu_tlb_flush_walk,
243 .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf,
244 .tlb_add_page = mtk_iommu_tlb_flush_page_nosync,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000245};
246
247static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
248{
249 struct mtk_iommu_data *data = dev_id;
250 struct mtk_iommu_domain *dom = data->m4u_dom;
251 u32 int_state, regval, fault_iova, fault_pa;
252 unsigned int fault_larb, fault_port;
253 bool layer, write;
254
255 /* Read error info from registers */
256 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
David Brazdil0f672f62019-12-10 10:32:29 +0000257 if (int_state & F_REG_MMU0_FAULT_MASK) {
258 regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
259 fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
260 fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
261 } else {
262 regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
263 fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
264 fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
265 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000266 layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
267 write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
David Brazdil0f672f62019-12-10 10:32:29 +0000268 fault_larb = F_MMU_INT_ID_LARB_ID(regval);
269 fault_port = F_MMU_INT_ID_PORT_ID(regval);
270
271 fault_larb = data->plat_data->larbid_remap[fault_larb];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000272
273 if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
274 write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
275 dev_err_ratelimited(
276 data->dev,
277 "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
278 int_state, fault_iova, fault_pa, fault_larb, fault_port,
279 layer, write ? "write" : "read");
280 }
281
282 /* Interrupt clear */
283 regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
284 regval |= F_INT_CLR_BIT;
285 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
286
287 mtk_iommu_tlb_flush_all(data);
288
289 return IRQ_HANDLED;
290}
291
292static void mtk_iommu_config(struct mtk_iommu_data *data,
293 struct device *dev, bool enable)
294{
295 struct mtk_smi_larb_iommu *larb_mmu;
296 unsigned int larbid, portid;
David Brazdil0f672f62019-12-10 10:32:29 +0000297 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000298 int i;
299
300 for (i = 0; i < fwspec->num_ids; ++i) {
301 larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
302 portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
David Brazdil0f672f62019-12-10 10:32:29 +0000303 larb_mmu = &data->larb_imu[larbid];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000304
305 dev_dbg(dev, "%s iommu port: %d\n",
306 enable ? "enable" : "disable", portid);
307
308 if (enable)
309 larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
310 else
311 larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
312 }
313}
314
315static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
316{
317 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
318
319 spin_lock_init(&dom->pgtlock);
320
321 dom->cfg = (struct io_pgtable_cfg) {
322 .quirks = IO_PGTABLE_QUIRK_ARM_NS |
323 IO_PGTABLE_QUIRK_NO_PERMS |
David Brazdil0f672f62019-12-10 10:32:29 +0000324 IO_PGTABLE_QUIRK_TLBI_ON_MAP |
325 IO_PGTABLE_QUIRK_ARM_MTK_EXT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000326 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
327 .ias = 32,
David Brazdil0f672f62019-12-10 10:32:29 +0000328 .oas = 34,
329 .tlb = &mtk_iommu_flush_ops,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000330 .iommu_dev = data->dev,
331 };
332
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000333 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
334 if (!dom->iop) {
335 dev_err(data->dev, "Failed to alloc io pgtable\n");
336 return -EINVAL;
337 }
338
339 /* Update our support page sizes bitmap */
340 dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
341 return 0;
342}
343
344static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
345{
346 struct mtk_iommu_domain *dom;
347
348 if (type != IOMMU_DOMAIN_DMA)
349 return NULL;
350
351 dom = kzalloc(sizeof(*dom), GFP_KERNEL);
352 if (!dom)
353 return NULL;
354
355 if (iommu_get_dma_cookie(&dom->domain))
356 goto free_dom;
357
358 if (mtk_iommu_domain_finalise(dom))
359 goto put_dma_cookie;
360
361 dom->domain.geometry.aperture_start = 0;
362 dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
363 dom->domain.geometry.force_aperture = true;
364
365 return &dom->domain;
366
367put_dma_cookie:
368 iommu_put_dma_cookie(&dom->domain);
369free_dom:
370 kfree(dom);
371 return NULL;
372}
373
374static void mtk_iommu_domain_free(struct iommu_domain *domain)
375{
376 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
377
378 free_io_pgtable_ops(dom->iop);
379 iommu_put_dma_cookie(domain);
380 kfree(to_mtk_domain(domain));
381}
382
383static int mtk_iommu_attach_device(struct iommu_domain *domain,
384 struct device *dev)
385{
386 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
David Brazdil0f672f62019-12-10 10:32:29 +0000387 struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000388
389 if (!data)
390 return -ENODEV;
391
392 /* Update the pgtable base address register of the M4U HW */
393 if (!data->m4u_dom) {
394 data->m4u_dom = dom;
David Brazdil0f672f62019-12-10 10:32:29 +0000395 writel(dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000396 data->base + REG_MMU_PT_BASE_ADDR);
397 }
398
399 mtk_iommu_config(data, dev, true);
400 return 0;
401}
402
403static void mtk_iommu_detach_device(struct iommu_domain *domain,
404 struct device *dev)
405{
David Brazdil0f672f62019-12-10 10:32:29 +0000406 struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000407
408 if (!data)
409 return;
410
411 mtk_iommu_config(data, dev, false);
412}
413
414static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
415 phys_addr_t paddr, size_t size, int prot)
416{
417 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
David Brazdil0f672f62019-12-10 10:32:29 +0000418 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000419 unsigned long flags;
420 int ret;
421
David Brazdil0f672f62019-12-10 10:32:29 +0000422 /* The "4GB mode" M4U physically can not use the lower remap of Dram. */
423 if (data->enable_4GB)
424 paddr |= BIT_ULL(32);
425
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000426 spin_lock_irqsave(&dom->pgtlock, flags);
David Brazdil0f672f62019-12-10 10:32:29 +0000427 ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000428 spin_unlock_irqrestore(&dom->pgtlock, flags);
429
430 return ret;
431}
432
433static size_t mtk_iommu_unmap(struct iommu_domain *domain,
David Brazdil0f672f62019-12-10 10:32:29 +0000434 unsigned long iova, size_t size,
435 struct iommu_iotlb_gather *gather)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000436{
437 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
438 unsigned long flags;
439 size_t unmapsz;
440
441 spin_lock_irqsave(&dom->pgtlock, flags);
David Brazdil0f672f62019-12-10 10:32:29 +0000442 unmapsz = dom->iop->unmap(dom->iop, iova, size, gather);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000443 spin_unlock_irqrestore(&dom->pgtlock, flags);
444
445 return unmapsz;
446}
447
David Brazdil0f672f62019-12-10 10:32:29 +0000448static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
449{
450 mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
451}
452
453static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
454 struct iommu_iotlb_gather *gather)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000455{
456 mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
457}
458
459static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
460 dma_addr_t iova)
461{
462 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
463 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
464 unsigned long flags;
465 phys_addr_t pa;
466
467 spin_lock_irqsave(&dom->pgtlock, flags);
468 pa = dom->iop->iova_to_phys(dom->iop, iova);
469 spin_unlock_irqrestore(&dom->pgtlock, flags);
470
David Brazdil0f672f62019-12-10 10:32:29 +0000471 if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
472 pa &= ~BIT_ULL(32);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000473
474 return pa;
475}
476
477static int mtk_iommu_add_device(struct device *dev)
478{
David Brazdil0f672f62019-12-10 10:32:29 +0000479 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480 struct mtk_iommu_data *data;
481 struct iommu_group *group;
482
David Brazdil0f672f62019-12-10 10:32:29 +0000483 if (!fwspec || fwspec->ops != &mtk_iommu_ops)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000484 return -ENODEV; /* Not a iommu client device */
485
David Brazdil0f672f62019-12-10 10:32:29 +0000486 data = fwspec->iommu_priv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000487 iommu_device_link(&data->iommu, dev);
488
489 group = iommu_group_get_for_dev(dev);
490 if (IS_ERR(group))
491 return PTR_ERR(group);
492
493 iommu_group_put(group);
494 return 0;
495}
496
497static void mtk_iommu_remove_device(struct device *dev)
498{
David Brazdil0f672f62019-12-10 10:32:29 +0000499 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000500 struct mtk_iommu_data *data;
501
David Brazdil0f672f62019-12-10 10:32:29 +0000502 if (!fwspec || fwspec->ops != &mtk_iommu_ops)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000503 return;
504
David Brazdil0f672f62019-12-10 10:32:29 +0000505 data = fwspec->iommu_priv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000506 iommu_device_unlink(&data->iommu, dev);
507
508 iommu_group_remove_device(dev);
509 iommu_fwspec_free(dev);
510}
511
512static struct iommu_group *mtk_iommu_device_group(struct device *dev)
513{
514 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
515
516 if (!data)
517 return ERR_PTR(-ENODEV);
518
519 /* All the client devices are in the same m4u iommu-group */
520 if (!data->m4u_group) {
521 data->m4u_group = iommu_group_alloc();
522 if (IS_ERR(data->m4u_group))
523 dev_err(dev, "Failed to allocate M4U IOMMU group\n");
524 } else {
525 iommu_group_ref_get(data->m4u_group);
526 }
527 return data->m4u_group;
528}
529
530static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
531{
David Brazdil0f672f62019-12-10 10:32:29 +0000532 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000533 struct platform_device *m4updev;
534
535 if (args->args_count != 1) {
536 dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
537 args->args_count);
538 return -EINVAL;
539 }
540
David Brazdil0f672f62019-12-10 10:32:29 +0000541 if (!fwspec->iommu_priv) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000542 /* Get the m4u device */
543 m4updev = of_find_device_by_node(args->np);
544 if (WARN_ON(!m4updev))
545 return -EINVAL;
546
David Brazdil0f672f62019-12-10 10:32:29 +0000547 fwspec->iommu_priv = platform_get_drvdata(m4updev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000548 }
549
550 return iommu_fwspec_add_ids(dev, args->args, 1);
551}
552
David Brazdil0f672f62019-12-10 10:32:29 +0000553static const struct iommu_ops mtk_iommu_ops = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000554 .domain_alloc = mtk_iommu_domain_alloc,
555 .domain_free = mtk_iommu_domain_free,
556 .attach_dev = mtk_iommu_attach_device,
557 .detach_dev = mtk_iommu_detach_device,
558 .map = mtk_iommu_map,
559 .unmap = mtk_iommu_unmap,
David Brazdil0f672f62019-12-10 10:32:29 +0000560 .flush_iotlb_all = mtk_iommu_flush_iotlb_all,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000561 .iotlb_sync = mtk_iommu_iotlb_sync,
562 .iova_to_phys = mtk_iommu_iova_to_phys,
563 .add_device = mtk_iommu_add_device,
564 .remove_device = mtk_iommu_remove_device,
565 .device_group = mtk_iommu_device_group,
566 .of_xlate = mtk_iommu_of_xlate,
567 .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
568};
569
570static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
571{
572 u32 regval;
573 int ret;
574
575 ret = clk_prepare_enable(data->bclk);
576 if (ret) {
577 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
578 return ret;
579 }
580
David Brazdil0f672f62019-12-10 10:32:29 +0000581 if (data->plat_data->m4u_plat == M4U_MT8173)
582 regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
583 F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
584 else
585 regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000586 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
587
588 regval = F_L2_MULIT_HIT_EN |
589 F_TABLE_WALK_FAULT_INT_EN |
590 F_PREETCH_FIFO_OVERFLOW_INT_EN |
591 F_MISS_FIFO_OVERFLOW_INT_EN |
592 F_PREFETCH_FIFO_ERR_INT_EN |
593 F_MISS_FIFO_ERR_INT_EN;
594 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
595
596 regval = F_INT_TRANSLATION_FAULT |
597 F_INT_MAIN_MULTI_HIT_FAULT |
598 F_INT_INVALID_PA_FAULT |
599 F_INT_ENTRY_REPLACEMENT_FAULT |
600 F_INT_TLB_MISS_FAULT |
601 F_INT_MISS_TRANSACTION_FIFO_FAULT |
602 F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
603 writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
604
David Brazdil0f672f62019-12-10 10:32:29 +0000605 if (data->plat_data->m4u_plat == M4U_MT8173)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000606 regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
607 else
608 regval = lower_32_bits(data->protect_base) |
609 upper_32_bits(data->protect_base);
610 writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
611
David Brazdil0f672f62019-12-10 10:32:29 +0000612 if (data->enable_4GB && data->plat_data->has_vld_pa_rng) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000613 /*
614 * If 4GB mode is enabled, the validate PA range is from
615 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
616 */
617 regval = F_MMU_VLD_PA_RNG(7, 4);
618 writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
619 }
620 writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
621
David Brazdil0f672f62019-12-10 10:32:29 +0000622 if (data->plat_data->reset_axi)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000623 writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
624
625 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
626 dev_name(data->dev), (void *)data)) {
627 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
628 clk_disable_unprepare(data->bclk);
629 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
630 return -ENODEV;
631 }
632
633 return 0;
634}
635
636static const struct component_master_ops mtk_iommu_com_ops = {
637 .bind = mtk_iommu_bind,
638 .unbind = mtk_iommu_unbind,
639};
640
641static int mtk_iommu_probe(struct platform_device *pdev)
642{
643 struct mtk_iommu_data *data;
644 struct device *dev = &pdev->dev;
645 struct resource *res;
646 resource_size_t ioaddr;
647 struct component_match *match = NULL;
648 void *protect;
649 int i, larb_nr, ret;
650
651 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
652 if (!data)
653 return -ENOMEM;
654 data->dev = dev;
David Brazdil0f672f62019-12-10 10:32:29 +0000655 data->plat_data = of_device_get_match_data(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000656
657 /* Protect memory. HW will access here while translation fault.*/
658 protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
659 if (!protect)
660 return -ENOMEM;
661 data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
662
663 /* Whether the current dram is over 4GB */
664 data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
David Brazdil0f672f62019-12-10 10:32:29 +0000665 if (!data->plat_data->has_4gb_mode)
666 data->enable_4GB = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000667
668 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
669 data->base = devm_ioremap_resource(dev, res);
670 if (IS_ERR(data->base))
671 return PTR_ERR(data->base);
672 ioaddr = res->start;
673
674 data->irq = platform_get_irq(pdev, 0);
675 if (data->irq < 0)
676 return data->irq;
677
David Brazdil0f672f62019-12-10 10:32:29 +0000678 if (data->plat_data->has_bclk) {
679 data->bclk = devm_clk_get(dev, "bclk");
680 if (IS_ERR(data->bclk))
681 return PTR_ERR(data->bclk);
682 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000683
684 larb_nr = of_count_phandle_with_args(dev->of_node,
685 "mediatek,larbs", NULL);
686 if (larb_nr < 0)
687 return larb_nr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000688
689 for (i = 0; i < larb_nr; i++) {
690 struct device_node *larbnode;
691 struct platform_device *plarbdev;
692 u32 id;
693
694 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
695 if (!larbnode)
696 return -EINVAL;
697
David Brazdil0f672f62019-12-10 10:32:29 +0000698 if (!of_device_is_available(larbnode)) {
699 of_node_put(larbnode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000700 continue;
David Brazdil0f672f62019-12-10 10:32:29 +0000701 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000702
703 ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
704 if (ret)/* The id is consecutive if there is no this property */
705 id = i;
706
707 plarbdev = of_find_device_by_node(larbnode);
David Brazdil0f672f62019-12-10 10:32:29 +0000708 if (!plarbdev) {
709 of_node_put(larbnode);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000710 return -EPROBE_DEFER;
David Brazdil0f672f62019-12-10 10:32:29 +0000711 }
712 data->larb_imu[id].dev = &plarbdev->dev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000713
714 component_match_add_release(dev, &match, release_of,
715 compare_of, larbnode);
716 }
717
718 platform_set_drvdata(pdev, data);
719
720 ret = mtk_iommu_hw_init(data);
721 if (ret)
722 return ret;
723
724 ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
725 "mtk-iommu.%pa", &ioaddr);
726 if (ret)
727 return ret;
728
729 iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
730 iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
731
732 ret = iommu_device_register(&data->iommu);
733 if (ret)
734 return ret;
735
736 list_add_tail(&data->list, &m4ulist);
737
738 if (!iommu_present(&platform_bus_type))
739 bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
740
741 return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
742}
743
744static int mtk_iommu_remove(struct platform_device *pdev)
745{
746 struct mtk_iommu_data *data = platform_get_drvdata(pdev);
747
748 iommu_device_sysfs_remove(&data->iommu);
749 iommu_device_unregister(&data->iommu);
750
751 if (iommu_present(&platform_bus_type))
752 bus_set_iommu(&platform_bus_type, NULL);
753
754 clk_disable_unprepare(data->bclk);
755 devm_free_irq(&pdev->dev, data->irq, data);
756 component_master_del(&pdev->dev, &mtk_iommu_com_ops);
757 return 0;
758}
759
760static int __maybe_unused mtk_iommu_suspend(struct device *dev)
761{
762 struct mtk_iommu_data *data = dev_get_drvdata(dev);
763 struct mtk_iommu_suspend_reg *reg = &data->reg;
764 void __iomem *base = data->base;
765
766 reg->standard_axi_mode = readl_relaxed(base +
767 REG_MMU_STANDARD_AXI_MODE);
768 reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
769 reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
770 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
771 reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
772 reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
David Brazdil0f672f62019-12-10 10:32:29 +0000773 reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000774 clk_disable_unprepare(data->bclk);
775 return 0;
776}
777
778static int __maybe_unused mtk_iommu_resume(struct device *dev)
779{
780 struct mtk_iommu_data *data = dev_get_drvdata(dev);
781 struct mtk_iommu_suspend_reg *reg = &data->reg;
David Brazdil0f672f62019-12-10 10:32:29 +0000782 struct mtk_iommu_domain *m4u_dom = data->m4u_dom;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000783 void __iomem *base = data->base;
784 int ret;
785
786 ret = clk_prepare_enable(data->bclk);
787 if (ret) {
788 dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
789 return ret;
790 }
791 writel_relaxed(reg->standard_axi_mode,
792 base + REG_MMU_STANDARD_AXI_MODE);
793 writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
794 writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
795 writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
796 writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
797 writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
David Brazdil0f672f62019-12-10 10:32:29 +0000798 writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
799 if (m4u_dom)
800 writel(m4u_dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000801 base + REG_MMU_PT_BASE_ADDR);
802 return 0;
803}
804
805static const struct dev_pm_ops mtk_iommu_pm_ops = {
806 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
807};
808
David Brazdil0f672f62019-12-10 10:32:29 +0000809static const struct mtk_iommu_plat_data mt2712_data = {
810 .m4u_plat = M4U_MT2712,
811 .has_4gb_mode = true,
812 .has_bclk = true,
813 .has_vld_pa_rng = true,
814 .larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
815};
816
817static const struct mtk_iommu_plat_data mt8173_data = {
818 .m4u_plat = M4U_MT8173,
819 .has_4gb_mode = true,
820 .has_bclk = true,
821 .reset_axi = true,
822 .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */
823};
824
825static const struct mtk_iommu_plat_data mt8183_data = {
826 .m4u_plat = M4U_MT8183,
827 .reset_axi = true,
828 .larbid_remap = {0, 4, 5, 6, 7, 2, 3, 1},
829};
830
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000831static const struct of_device_id mtk_iommu_of_ids[] = {
David Brazdil0f672f62019-12-10 10:32:29 +0000832 { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
833 { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
834 { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000835 {}
836};
837
838static struct platform_driver mtk_iommu_driver = {
839 .probe = mtk_iommu_probe,
840 .remove = mtk_iommu_remove,
841 .driver = {
842 .name = "mtk-iommu",
843 .of_match_table = of_match_ptr(mtk_iommu_of_ids),
844 .pm = &mtk_iommu_pm_ops,
845 }
846};
847
848static int __init mtk_iommu_init(void)
849{
850 int ret;
851
852 ret = platform_driver_register(&mtk_iommu_driver);
853 if (ret != 0)
854 pr_err("Failed to register MTK IOMMU driver\n");
855
856 return ret;
857}
858
859subsys_initcall(mtk_iommu_init)