David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * omap iommu: tlb and pagetable primitives |
| 4 | * |
| 5 | * Copyright (C) 2008-2010 Nokia Corporation |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 6 | * Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7 | * |
| 8 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, |
| 9 | * Paul Mundt and Toshihiro Kobayashi |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/dma-mapping.h> |
| 13 | #include <linux/err.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/interrupt.h> |
| 16 | #include <linux/ioport.h> |
| 17 | #include <linux/platform_device.h> |
| 18 | #include <linux/iommu.h> |
| 19 | #include <linux/omap-iommu.h> |
| 20 | #include <linux/mutex.h> |
| 21 | #include <linux/spinlock.h> |
| 22 | #include <linux/io.h> |
| 23 | #include <linux/pm_runtime.h> |
| 24 | #include <linux/of.h> |
| 25 | #include <linux/of_iommu.h> |
| 26 | #include <linux/of_irq.h> |
| 27 | #include <linux/of_platform.h> |
| 28 | #include <linux/regmap.h> |
| 29 | #include <linux/mfd/syscon.h> |
| 30 | |
| 31 | #include <linux/platform_data/iommu-omap.h> |
| 32 | |
| 33 | #include "omap-iopgtable.h" |
| 34 | #include "omap-iommu.h" |
| 35 | |
| 36 | static const struct iommu_ops omap_iommu_ops; |
| 37 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 38 | #define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 39 | |
| 40 | /* bitmap of the page sizes currently supported */ |
| 41 | #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) |
| 42 | |
| 43 | #define MMU_LOCK_BASE_SHIFT 10 |
| 44 | #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) |
| 45 | #define MMU_LOCK_BASE(x) \ |
| 46 | ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT) |
| 47 | |
| 48 | #define MMU_LOCK_VICT_SHIFT 4 |
| 49 | #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT) |
| 50 | #define MMU_LOCK_VICT(x) \ |
| 51 | ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT) |
| 52 | |
| 53 | static struct platform_driver omap_iommu_driver; |
| 54 | static struct kmem_cache *iopte_cachep; |
| 55 | |
| 56 | /** |
| 57 | * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain |
| 58 | * @dom: generic iommu domain handle |
| 59 | **/ |
| 60 | static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom) |
| 61 | { |
| 62 | return container_of(dom, struct omap_iommu_domain, domain); |
| 63 | } |
| 64 | |
| 65 | /** |
| 66 | * omap_iommu_save_ctx - Save registers for pm off-mode support |
| 67 | * @dev: client device |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 68 | * |
| 69 | * This should be treated as an deprecated API. It is preserved only |
| 70 | * to maintain existing functionality for OMAP3 ISP driver. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 71 | **/ |
| 72 | void omap_iommu_save_ctx(struct device *dev) |
| 73 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 74 | struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 75 | struct omap_iommu *obj; |
| 76 | u32 *p; |
| 77 | int i; |
| 78 | |
| 79 | if (!arch_data) |
| 80 | return; |
| 81 | |
| 82 | while (arch_data->iommu_dev) { |
| 83 | obj = arch_data->iommu_dev; |
| 84 | p = obj->ctx; |
| 85 | for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { |
| 86 | p[i] = iommu_read_reg(obj, i * sizeof(u32)); |
| 87 | dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, |
| 88 | p[i]); |
| 89 | } |
| 90 | arch_data++; |
| 91 | } |
| 92 | } |
| 93 | EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); |
| 94 | |
| 95 | /** |
| 96 | * omap_iommu_restore_ctx - Restore registers for pm off-mode support |
| 97 | * @dev: client device |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 98 | * |
| 99 | * This should be treated as an deprecated API. It is preserved only |
| 100 | * to maintain existing functionality for OMAP3 ISP driver. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 101 | **/ |
| 102 | void omap_iommu_restore_ctx(struct device *dev) |
| 103 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 104 | struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 105 | struct omap_iommu *obj; |
| 106 | u32 *p; |
| 107 | int i; |
| 108 | |
| 109 | if (!arch_data) |
| 110 | return; |
| 111 | |
| 112 | while (arch_data->iommu_dev) { |
| 113 | obj = arch_data->iommu_dev; |
| 114 | p = obj->ctx; |
| 115 | for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) { |
| 116 | iommu_write_reg(obj, p[i], i * sizeof(u32)); |
| 117 | dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, |
| 118 | p[i]); |
| 119 | } |
| 120 | arch_data++; |
| 121 | } |
| 122 | } |
| 123 | EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); |
| 124 | |
| 125 | static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable) |
| 126 | { |
| 127 | u32 val, mask; |
| 128 | |
| 129 | if (!obj->syscfg) |
| 130 | return; |
| 131 | |
| 132 | mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT)); |
| 133 | val = enable ? mask : 0; |
| 134 | regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val); |
| 135 | } |
| 136 | |
| 137 | static void __iommu_set_twl(struct omap_iommu *obj, bool on) |
| 138 | { |
| 139 | u32 l = iommu_read_reg(obj, MMU_CNTL); |
| 140 | |
| 141 | if (on) |
| 142 | iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE); |
| 143 | else |
| 144 | iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE); |
| 145 | |
| 146 | l &= ~MMU_CNTL_MASK; |
| 147 | if (on) |
| 148 | l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN); |
| 149 | else |
| 150 | l |= (MMU_CNTL_MMU_EN); |
| 151 | |
| 152 | iommu_write_reg(obj, l, MMU_CNTL); |
| 153 | } |
| 154 | |
| 155 | static int omap2_iommu_enable(struct omap_iommu *obj) |
| 156 | { |
| 157 | u32 l, pa; |
| 158 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 159 | if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd, SZ_16K)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 160 | return -EINVAL; |
| 161 | |
| 162 | pa = virt_to_phys(obj->iopgd); |
| 163 | if (!IS_ALIGNED(pa, SZ_16K)) |
| 164 | return -EINVAL; |
| 165 | |
| 166 | l = iommu_read_reg(obj, MMU_REVISION); |
| 167 | dev_info(obj->dev, "%s: version %d.%d\n", obj->name, |
| 168 | (l >> 4) & 0xf, l & 0xf); |
| 169 | |
| 170 | iommu_write_reg(obj, pa, MMU_TTB); |
| 171 | |
| 172 | dra7_cfg_dspsys_mmu(obj, true); |
| 173 | |
| 174 | if (obj->has_bus_err_back) |
| 175 | iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); |
| 176 | |
| 177 | __iommu_set_twl(obj, true); |
| 178 | |
| 179 | return 0; |
| 180 | } |
| 181 | |
| 182 | static void omap2_iommu_disable(struct omap_iommu *obj) |
| 183 | { |
| 184 | u32 l = iommu_read_reg(obj, MMU_CNTL); |
| 185 | |
| 186 | l &= ~MMU_CNTL_MASK; |
| 187 | iommu_write_reg(obj, l, MMU_CNTL); |
| 188 | dra7_cfg_dspsys_mmu(obj, false); |
| 189 | |
| 190 | dev_dbg(obj->dev, "%s is shutting down\n", obj->name); |
| 191 | } |
| 192 | |
| 193 | static int iommu_enable(struct omap_iommu *obj) |
| 194 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 195 | int ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 196 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 197 | ret = pm_runtime_get_sync(obj->dev); |
| 198 | if (ret < 0) |
| 199 | pm_runtime_put_noidle(obj->dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 200 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 201 | return ret < 0 ? ret : 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 202 | } |
| 203 | |
| 204 | static void iommu_disable(struct omap_iommu *obj) |
| 205 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 206 | pm_runtime_put_sync(obj->dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 207 | } |
| 208 | |
| 209 | /* |
| 210 | * TLB operations |
| 211 | */ |
| 212 | static u32 iotlb_cr_to_virt(struct cr_regs *cr) |
| 213 | { |
| 214 | u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; |
| 215 | u32 mask = get_cam_va_mask(cr->cam & page_size); |
| 216 | |
| 217 | return cr->cam & mask; |
| 218 | } |
| 219 | |
| 220 | static u32 get_iopte_attr(struct iotlb_entry *e) |
| 221 | { |
| 222 | u32 attr; |
| 223 | |
| 224 | attr = e->mixed << 5; |
| 225 | attr |= e->endian; |
| 226 | attr |= e->elsz >> 3; |
| 227 | attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) || |
| 228 | (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6); |
| 229 | return attr; |
| 230 | } |
| 231 | |
| 232 | static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) |
| 233 | { |
| 234 | u32 status, fault_addr; |
| 235 | |
| 236 | status = iommu_read_reg(obj, MMU_IRQSTATUS); |
| 237 | status &= MMU_IRQ_MASK; |
| 238 | if (!status) { |
| 239 | *da = 0; |
| 240 | return 0; |
| 241 | } |
| 242 | |
| 243 | fault_addr = iommu_read_reg(obj, MMU_FAULT_AD); |
| 244 | *da = fault_addr; |
| 245 | |
| 246 | iommu_write_reg(obj, status, MMU_IRQSTATUS); |
| 247 | |
| 248 | return status; |
| 249 | } |
| 250 | |
| 251 | void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) |
| 252 | { |
| 253 | u32 val; |
| 254 | |
| 255 | val = iommu_read_reg(obj, MMU_LOCK); |
| 256 | |
| 257 | l->base = MMU_LOCK_BASE(val); |
| 258 | l->vict = MMU_LOCK_VICT(val); |
| 259 | } |
| 260 | |
| 261 | void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) |
| 262 | { |
| 263 | u32 val; |
| 264 | |
| 265 | val = (l->base << MMU_LOCK_BASE_SHIFT); |
| 266 | val |= (l->vict << MMU_LOCK_VICT_SHIFT); |
| 267 | |
| 268 | iommu_write_reg(obj, val, MMU_LOCK); |
| 269 | } |
| 270 | |
| 271 | static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) |
| 272 | { |
| 273 | cr->cam = iommu_read_reg(obj, MMU_READ_CAM); |
| 274 | cr->ram = iommu_read_reg(obj, MMU_READ_RAM); |
| 275 | } |
| 276 | |
| 277 | static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) |
| 278 | { |
| 279 | iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM); |
| 280 | iommu_write_reg(obj, cr->ram, MMU_RAM); |
| 281 | |
| 282 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); |
| 283 | iommu_write_reg(obj, 1, MMU_LD_TLB); |
| 284 | } |
| 285 | |
| 286 | /* only used in iotlb iteration for-loop */ |
| 287 | struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) |
| 288 | { |
| 289 | struct cr_regs cr; |
| 290 | struct iotlb_lock l; |
| 291 | |
| 292 | iotlb_lock_get(obj, &l); |
| 293 | l.vict = n; |
| 294 | iotlb_lock_set(obj, &l); |
| 295 | iotlb_read_cr(obj, &cr); |
| 296 | |
| 297 | return cr; |
| 298 | } |
| 299 | |
| 300 | #ifdef PREFETCH_IOTLB |
| 301 | static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, |
| 302 | struct iotlb_entry *e) |
| 303 | { |
| 304 | struct cr_regs *cr; |
| 305 | |
| 306 | if (!e) |
| 307 | return NULL; |
| 308 | |
| 309 | if (e->da & ~(get_cam_va_mask(e->pgsz))) { |
| 310 | dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__, |
| 311 | e->da); |
| 312 | return ERR_PTR(-EINVAL); |
| 313 | } |
| 314 | |
| 315 | cr = kmalloc(sizeof(*cr), GFP_KERNEL); |
| 316 | if (!cr) |
| 317 | return ERR_PTR(-ENOMEM); |
| 318 | |
| 319 | cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid; |
| 320 | cr->ram = e->pa | e->endian | e->elsz | e->mixed; |
| 321 | |
| 322 | return cr; |
| 323 | } |
| 324 | |
| 325 | /** |
| 326 | * load_iotlb_entry - Set an iommu tlb entry |
| 327 | * @obj: target iommu |
| 328 | * @e: an iommu tlb entry info |
| 329 | **/ |
| 330 | static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
| 331 | { |
| 332 | int err = 0; |
| 333 | struct iotlb_lock l; |
| 334 | struct cr_regs *cr; |
| 335 | |
| 336 | if (!obj || !obj->nr_tlb_entries || !e) |
| 337 | return -EINVAL; |
| 338 | |
| 339 | pm_runtime_get_sync(obj->dev); |
| 340 | |
| 341 | iotlb_lock_get(obj, &l); |
| 342 | if (l.base == obj->nr_tlb_entries) { |
| 343 | dev_warn(obj->dev, "%s: preserve entries full\n", __func__); |
| 344 | err = -EBUSY; |
| 345 | goto out; |
| 346 | } |
| 347 | if (!e->prsvd) { |
| 348 | int i; |
| 349 | struct cr_regs tmp; |
| 350 | |
| 351 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) |
| 352 | if (!iotlb_cr_valid(&tmp)) |
| 353 | break; |
| 354 | |
| 355 | if (i == obj->nr_tlb_entries) { |
| 356 | dev_dbg(obj->dev, "%s: full: no entry\n", __func__); |
| 357 | err = -EBUSY; |
| 358 | goto out; |
| 359 | } |
| 360 | |
| 361 | iotlb_lock_get(obj, &l); |
| 362 | } else { |
| 363 | l.vict = l.base; |
| 364 | iotlb_lock_set(obj, &l); |
| 365 | } |
| 366 | |
| 367 | cr = iotlb_alloc_cr(obj, e); |
| 368 | if (IS_ERR(cr)) { |
| 369 | pm_runtime_put_sync(obj->dev); |
| 370 | return PTR_ERR(cr); |
| 371 | } |
| 372 | |
| 373 | iotlb_load_cr(obj, cr); |
| 374 | kfree(cr); |
| 375 | |
| 376 | if (e->prsvd) |
| 377 | l.base++; |
| 378 | /* increment victim for next tlb load */ |
| 379 | if (++l.vict == obj->nr_tlb_entries) |
| 380 | l.vict = l.base; |
| 381 | iotlb_lock_set(obj, &l); |
| 382 | out: |
| 383 | pm_runtime_put_sync(obj->dev); |
| 384 | return err; |
| 385 | } |
| 386 | |
| 387 | #else /* !PREFETCH_IOTLB */ |
| 388 | |
| 389 | static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
| 390 | { |
| 391 | return 0; |
| 392 | } |
| 393 | |
| 394 | #endif /* !PREFETCH_IOTLB */ |
| 395 | |
| 396 | static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
| 397 | { |
| 398 | return load_iotlb_entry(obj, e); |
| 399 | } |
| 400 | |
| 401 | /** |
| 402 | * flush_iotlb_page - Clear an iommu tlb entry |
| 403 | * @obj: target iommu |
| 404 | * @da: iommu device virtual address |
| 405 | * |
| 406 | * Clear an iommu tlb entry which includes 'da' address. |
| 407 | **/ |
| 408 | static void flush_iotlb_page(struct omap_iommu *obj, u32 da) |
| 409 | { |
| 410 | int i; |
| 411 | struct cr_regs cr; |
| 412 | |
| 413 | pm_runtime_get_sync(obj->dev); |
| 414 | |
| 415 | for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { |
| 416 | u32 start; |
| 417 | size_t bytes; |
| 418 | |
| 419 | if (!iotlb_cr_valid(&cr)) |
| 420 | continue; |
| 421 | |
| 422 | start = iotlb_cr_to_virt(&cr); |
| 423 | bytes = iopgsz_to_bytes(cr.cam & 3); |
| 424 | |
| 425 | if ((start <= da) && (da < start + bytes)) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 426 | dev_dbg(obj->dev, "%s: %08x<=%08x(%zx)\n", |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 427 | __func__, start, da, bytes); |
| 428 | iotlb_load_cr(obj, &cr); |
| 429 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); |
| 430 | break; |
| 431 | } |
| 432 | } |
| 433 | pm_runtime_put_sync(obj->dev); |
| 434 | |
| 435 | if (i == obj->nr_tlb_entries) |
| 436 | dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); |
| 437 | } |
| 438 | |
| 439 | /** |
| 440 | * flush_iotlb_all - Clear all iommu tlb entries |
| 441 | * @obj: target iommu |
| 442 | **/ |
| 443 | static void flush_iotlb_all(struct omap_iommu *obj) |
| 444 | { |
| 445 | struct iotlb_lock l; |
| 446 | |
| 447 | pm_runtime_get_sync(obj->dev); |
| 448 | |
| 449 | l.base = 0; |
| 450 | l.vict = 0; |
| 451 | iotlb_lock_set(obj, &l); |
| 452 | |
| 453 | iommu_write_reg(obj, 1, MMU_GFLUSH); |
| 454 | |
| 455 | pm_runtime_put_sync(obj->dev); |
| 456 | } |
| 457 | |
| 458 | /* |
| 459 | * H/W pagetable operations |
| 460 | */ |
| 461 | static void flush_iopte_range(struct device *dev, dma_addr_t dma, |
| 462 | unsigned long offset, int num_entries) |
| 463 | { |
| 464 | size_t size = num_entries * sizeof(u32); |
| 465 | |
| 466 | dma_sync_single_range_for_device(dev, dma, offset, size, DMA_TO_DEVICE); |
| 467 | } |
| 468 | |
| 469 | static void iopte_free(struct omap_iommu *obj, u32 *iopte, bool dma_valid) |
| 470 | { |
| 471 | dma_addr_t pt_dma; |
| 472 | |
| 473 | /* Note: freed iopte's must be clean ready for re-use */ |
| 474 | if (iopte) { |
| 475 | if (dma_valid) { |
| 476 | pt_dma = virt_to_phys(iopte); |
| 477 | dma_unmap_single(obj->dev, pt_dma, IOPTE_TABLE_SIZE, |
| 478 | DMA_TO_DEVICE); |
| 479 | } |
| 480 | |
| 481 | kmem_cache_free(iopte_cachep, iopte); |
| 482 | } |
| 483 | } |
| 484 | |
| 485 | static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, |
| 486 | dma_addr_t *pt_dma, u32 da) |
| 487 | { |
| 488 | u32 *iopte; |
| 489 | unsigned long offset = iopgd_index(da) * sizeof(da); |
| 490 | |
| 491 | /* a table has already existed */ |
| 492 | if (*iopgd) |
| 493 | goto pte_ready; |
| 494 | |
| 495 | /* |
| 496 | * do the allocation outside the page table lock |
| 497 | */ |
| 498 | spin_unlock(&obj->page_table_lock); |
| 499 | iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); |
| 500 | spin_lock(&obj->page_table_lock); |
| 501 | |
| 502 | if (!*iopgd) { |
| 503 | if (!iopte) |
| 504 | return ERR_PTR(-ENOMEM); |
| 505 | |
| 506 | *pt_dma = dma_map_single(obj->dev, iopte, IOPTE_TABLE_SIZE, |
| 507 | DMA_TO_DEVICE); |
| 508 | if (dma_mapping_error(obj->dev, *pt_dma)) { |
| 509 | dev_err(obj->dev, "DMA map error for L2 table\n"); |
| 510 | iopte_free(obj, iopte, false); |
| 511 | return ERR_PTR(-ENOMEM); |
| 512 | } |
| 513 | |
| 514 | /* |
| 515 | * we rely on dma address and the physical address to be |
| 516 | * the same for mapping the L2 table |
| 517 | */ |
| 518 | if (WARN_ON(*pt_dma != virt_to_phys(iopte))) { |
| 519 | dev_err(obj->dev, "DMA translation error for L2 table\n"); |
| 520 | dma_unmap_single(obj->dev, *pt_dma, IOPTE_TABLE_SIZE, |
| 521 | DMA_TO_DEVICE); |
| 522 | iopte_free(obj, iopte, false); |
| 523 | return ERR_PTR(-ENOMEM); |
| 524 | } |
| 525 | |
| 526 | *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; |
| 527 | |
| 528 | flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); |
| 529 | dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); |
| 530 | } else { |
| 531 | /* We raced, free the reduniovant table */ |
| 532 | iopte_free(obj, iopte, false); |
| 533 | } |
| 534 | |
| 535 | pte_ready: |
| 536 | iopte = iopte_offset(iopgd, da); |
| 537 | *pt_dma = iopgd_page_paddr(iopgd); |
| 538 | dev_vdbg(obj->dev, |
| 539 | "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", |
| 540 | __func__, da, iopgd, *iopgd, iopte, *iopte); |
| 541 | |
| 542 | return iopte; |
| 543 | } |
| 544 | |
| 545 | static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
| 546 | { |
| 547 | u32 *iopgd = iopgd_offset(obj, da); |
| 548 | unsigned long offset = iopgd_index(da) * sizeof(da); |
| 549 | |
| 550 | if ((da | pa) & ~IOSECTION_MASK) { |
| 551 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", |
| 552 | __func__, da, pa, IOSECTION_SIZE); |
| 553 | return -EINVAL; |
| 554 | } |
| 555 | |
| 556 | *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; |
| 557 | flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); |
| 558 | return 0; |
| 559 | } |
| 560 | |
| 561 | static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
| 562 | { |
| 563 | u32 *iopgd = iopgd_offset(obj, da); |
| 564 | unsigned long offset = iopgd_index(da) * sizeof(da); |
| 565 | int i; |
| 566 | |
| 567 | if ((da | pa) & ~IOSUPER_MASK) { |
| 568 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", |
| 569 | __func__, da, pa, IOSUPER_SIZE); |
| 570 | return -EINVAL; |
| 571 | } |
| 572 | |
| 573 | for (i = 0; i < 16; i++) |
| 574 | *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; |
| 575 | flush_iopte_range(obj->dev, obj->pd_dma, offset, 16); |
| 576 | return 0; |
| 577 | } |
| 578 | |
| 579 | static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
| 580 | { |
| 581 | u32 *iopgd = iopgd_offset(obj, da); |
| 582 | dma_addr_t pt_dma; |
| 583 | u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da); |
| 584 | unsigned long offset = iopte_index(da) * sizeof(da); |
| 585 | |
| 586 | if (IS_ERR(iopte)) |
| 587 | return PTR_ERR(iopte); |
| 588 | |
| 589 | *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; |
| 590 | flush_iopte_range(obj->dev, pt_dma, offset, 1); |
| 591 | |
| 592 | dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", |
| 593 | __func__, da, pa, iopte, *iopte); |
| 594 | |
| 595 | return 0; |
| 596 | } |
| 597 | |
| 598 | static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) |
| 599 | { |
| 600 | u32 *iopgd = iopgd_offset(obj, da); |
| 601 | dma_addr_t pt_dma; |
| 602 | u32 *iopte = iopte_alloc(obj, iopgd, &pt_dma, da); |
| 603 | unsigned long offset = iopte_index(da) * sizeof(da); |
| 604 | int i; |
| 605 | |
| 606 | if ((da | pa) & ~IOLARGE_MASK) { |
| 607 | dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", |
| 608 | __func__, da, pa, IOLARGE_SIZE); |
| 609 | return -EINVAL; |
| 610 | } |
| 611 | |
| 612 | if (IS_ERR(iopte)) |
| 613 | return PTR_ERR(iopte); |
| 614 | |
| 615 | for (i = 0; i < 16; i++) |
| 616 | *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; |
| 617 | flush_iopte_range(obj->dev, pt_dma, offset, 16); |
| 618 | return 0; |
| 619 | } |
| 620 | |
| 621 | static int |
| 622 | iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) |
| 623 | { |
| 624 | int (*fn)(struct omap_iommu *, u32, u32, u32); |
| 625 | u32 prot; |
| 626 | int err; |
| 627 | |
| 628 | if (!obj || !e) |
| 629 | return -EINVAL; |
| 630 | |
| 631 | switch (e->pgsz) { |
| 632 | case MMU_CAM_PGSZ_16M: |
| 633 | fn = iopgd_alloc_super; |
| 634 | break; |
| 635 | case MMU_CAM_PGSZ_1M: |
| 636 | fn = iopgd_alloc_section; |
| 637 | break; |
| 638 | case MMU_CAM_PGSZ_64K: |
| 639 | fn = iopte_alloc_large; |
| 640 | break; |
| 641 | case MMU_CAM_PGSZ_4K: |
| 642 | fn = iopte_alloc_page; |
| 643 | break; |
| 644 | default: |
| 645 | fn = NULL; |
| 646 | break; |
| 647 | } |
| 648 | |
| 649 | if (WARN_ON(!fn)) |
| 650 | return -EINVAL; |
| 651 | |
| 652 | prot = get_iopte_attr(e); |
| 653 | |
| 654 | spin_lock(&obj->page_table_lock); |
| 655 | err = fn(obj, e->da, e->pa, prot); |
| 656 | spin_unlock(&obj->page_table_lock); |
| 657 | |
| 658 | return err; |
| 659 | } |
| 660 | |
| 661 | /** |
| 662 | * omap_iopgtable_store_entry - Make an iommu pte entry |
| 663 | * @obj: target iommu |
| 664 | * @e: an iommu tlb entry info |
| 665 | **/ |
| 666 | static int |
| 667 | omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) |
| 668 | { |
| 669 | int err; |
| 670 | |
| 671 | flush_iotlb_page(obj, e->da); |
| 672 | err = iopgtable_store_entry_core(obj, e); |
| 673 | if (!err) |
| 674 | prefetch_iotlb_entry(obj, e); |
| 675 | return err; |
| 676 | } |
| 677 | |
| 678 | /** |
| 679 | * iopgtable_lookup_entry - Lookup an iommu pte entry |
| 680 | * @obj: target iommu |
| 681 | * @da: iommu device virtual address |
| 682 | * @ppgd: iommu pgd entry pointer to be returned |
| 683 | * @ppte: iommu pte entry pointer to be returned |
| 684 | **/ |
| 685 | static void |
| 686 | iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) |
| 687 | { |
| 688 | u32 *iopgd, *iopte = NULL; |
| 689 | |
| 690 | iopgd = iopgd_offset(obj, da); |
| 691 | if (!*iopgd) |
| 692 | goto out; |
| 693 | |
| 694 | if (iopgd_is_table(*iopgd)) |
| 695 | iopte = iopte_offset(iopgd, da); |
| 696 | out: |
| 697 | *ppgd = iopgd; |
| 698 | *ppte = iopte; |
| 699 | } |
| 700 | |
| 701 | static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) |
| 702 | { |
| 703 | size_t bytes; |
| 704 | u32 *iopgd = iopgd_offset(obj, da); |
| 705 | int nent = 1; |
| 706 | dma_addr_t pt_dma; |
| 707 | unsigned long pd_offset = iopgd_index(da) * sizeof(da); |
| 708 | unsigned long pt_offset = iopte_index(da) * sizeof(da); |
| 709 | |
| 710 | if (!*iopgd) |
| 711 | return 0; |
| 712 | |
| 713 | if (iopgd_is_table(*iopgd)) { |
| 714 | int i; |
| 715 | u32 *iopte = iopte_offset(iopgd, da); |
| 716 | |
| 717 | bytes = IOPTE_SIZE; |
| 718 | if (*iopte & IOPTE_LARGE) { |
| 719 | nent *= 16; |
| 720 | /* rewind to the 1st entry */ |
| 721 | iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); |
| 722 | } |
| 723 | bytes *= nent; |
| 724 | memset(iopte, 0, nent * sizeof(*iopte)); |
| 725 | pt_dma = iopgd_page_paddr(iopgd); |
| 726 | flush_iopte_range(obj->dev, pt_dma, pt_offset, nent); |
| 727 | |
| 728 | /* |
| 729 | * do table walk to check if this table is necessary or not |
| 730 | */ |
| 731 | iopte = iopte_offset(iopgd, 0); |
| 732 | for (i = 0; i < PTRS_PER_IOPTE; i++) |
| 733 | if (iopte[i]) |
| 734 | goto out; |
| 735 | |
| 736 | iopte_free(obj, iopte, true); |
| 737 | nent = 1; /* for the next L1 entry */ |
| 738 | } else { |
| 739 | bytes = IOPGD_SIZE; |
| 740 | if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { |
| 741 | nent *= 16; |
| 742 | /* rewind to the 1st entry */ |
| 743 | iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); |
| 744 | } |
| 745 | bytes *= nent; |
| 746 | } |
| 747 | memset(iopgd, 0, nent * sizeof(*iopgd)); |
| 748 | flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent); |
| 749 | out: |
| 750 | return bytes; |
| 751 | } |
| 752 | |
| 753 | /** |
| 754 | * iopgtable_clear_entry - Remove an iommu pte entry |
| 755 | * @obj: target iommu |
| 756 | * @da: iommu device virtual address |
| 757 | **/ |
| 758 | static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) |
| 759 | { |
| 760 | size_t bytes; |
| 761 | |
| 762 | spin_lock(&obj->page_table_lock); |
| 763 | |
| 764 | bytes = iopgtable_clear_entry_core(obj, da); |
| 765 | flush_iotlb_page(obj, da); |
| 766 | |
| 767 | spin_unlock(&obj->page_table_lock); |
| 768 | |
| 769 | return bytes; |
| 770 | } |
| 771 | |
| 772 | static void iopgtable_clear_entry_all(struct omap_iommu *obj) |
| 773 | { |
| 774 | unsigned long offset; |
| 775 | int i; |
| 776 | |
| 777 | spin_lock(&obj->page_table_lock); |
| 778 | |
| 779 | for (i = 0; i < PTRS_PER_IOPGD; i++) { |
| 780 | u32 da; |
| 781 | u32 *iopgd; |
| 782 | |
| 783 | da = i << IOPGD_SHIFT; |
| 784 | iopgd = iopgd_offset(obj, da); |
| 785 | offset = iopgd_index(da) * sizeof(da); |
| 786 | |
| 787 | if (!*iopgd) |
| 788 | continue; |
| 789 | |
| 790 | if (iopgd_is_table(*iopgd)) |
| 791 | iopte_free(obj, iopte_offset(iopgd, 0), true); |
| 792 | |
| 793 | *iopgd = 0; |
| 794 | flush_iopte_range(obj->dev, obj->pd_dma, offset, 1); |
| 795 | } |
| 796 | |
| 797 | flush_iotlb_all(obj); |
| 798 | |
| 799 | spin_unlock(&obj->page_table_lock); |
| 800 | } |
| 801 | |
| 802 | /* |
| 803 | * Device IOMMU generic operations |
| 804 | */ |
| 805 | static irqreturn_t iommu_fault_handler(int irq, void *data) |
| 806 | { |
| 807 | u32 da, errs; |
| 808 | u32 *iopgd, *iopte; |
| 809 | struct omap_iommu *obj = data; |
| 810 | struct iommu_domain *domain = obj->domain; |
| 811 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
| 812 | |
| 813 | if (!omap_domain->dev) |
| 814 | return IRQ_NONE; |
| 815 | |
| 816 | errs = iommu_report_fault(obj, &da); |
| 817 | if (errs == 0) |
| 818 | return IRQ_HANDLED; |
| 819 | |
| 820 | /* Fault callback or TLB/PTE Dynamic loading */ |
| 821 | if (!report_iommu_fault(domain, obj->dev, da, 0)) |
| 822 | return IRQ_HANDLED; |
| 823 | |
| 824 | iommu_write_reg(obj, 0, MMU_IRQENABLE); |
| 825 | |
| 826 | iopgd = iopgd_offset(obj, da); |
| 827 | |
| 828 | if (!iopgd_is_table(*iopgd)) { |
| 829 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n", |
| 830 | obj->name, errs, da, iopgd, *iopgd); |
| 831 | return IRQ_NONE; |
| 832 | } |
| 833 | |
| 834 | iopte = iopte_offset(iopgd, da); |
| 835 | |
| 836 | dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n", |
| 837 | obj->name, errs, da, iopgd, *iopgd, iopte, *iopte); |
| 838 | |
| 839 | return IRQ_NONE; |
| 840 | } |
| 841 | |
| 842 | /** |
| 843 | * omap_iommu_attach() - attach iommu device to an iommu domain |
| 844 | * @obj: target omap iommu device |
| 845 | * @iopgd: page table |
| 846 | **/ |
| 847 | static int omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd) |
| 848 | { |
| 849 | int err; |
| 850 | |
| 851 | spin_lock(&obj->iommu_lock); |
| 852 | |
| 853 | obj->pd_dma = dma_map_single(obj->dev, iopgd, IOPGD_TABLE_SIZE, |
| 854 | DMA_TO_DEVICE); |
| 855 | if (dma_mapping_error(obj->dev, obj->pd_dma)) { |
| 856 | dev_err(obj->dev, "DMA map error for L1 table\n"); |
| 857 | err = -ENOMEM; |
| 858 | goto out_err; |
| 859 | } |
| 860 | |
| 861 | obj->iopgd = iopgd; |
| 862 | err = iommu_enable(obj); |
| 863 | if (err) |
| 864 | goto out_err; |
| 865 | flush_iotlb_all(obj); |
| 866 | |
| 867 | spin_unlock(&obj->iommu_lock); |
| 868 | |
| 869 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); |
| 870 | |
| 871 | return 0; |
| 872 | |
| 873 | out_err: |
| 874 | spin_unlock(&obj->iommu_lock); |
| 875 | |
| 876 | return err; |
| 877 | } |
| 878 | |
| 879 | /** |
| 880 | * omap_iommu_detach - release iommu device |
| 881 | * @obj: target iommu |
| 882 | **/ |
| 883 | static void omap_iommu_detach(struct omap_iommu *obj) |
| 884 | { |
| 885 | if (!obj || IS_ERR(obj)) |
| 886 | return; |
| 887 | |
| 888 | spin_lock(&obj->iommu_lock); |
| 889 | |
| 890 | dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE, |
| 891 | DMA_TO_DEVICE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 892 | obj->pd_dma = 0; |
| 893 | obj->iopgd = NULL; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 894 | iommu_disable(obj); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 895 | |
| 896 | spin_unlock(&obj->iommu_lock); |
| 897 | |
| 898 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); |
| 899 | } |
| 900 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 901 | static void omap_iommu_save_tlb_entries(struct omap_iommu *obj) |
| 902 | { |
| 903 | struct iotlb_lock lock; |
| 904 | struct cr_regs cr; |
| 905 | struct cr_regs *tmp; |
| 906 | int i; |
| 907 | |
| 908 | /* check if there are any locked tlbs to save */ |
| 909 | iotlb_lock_get(obj, &lock); |
| 910 | obj->num_cr_ctx = lock.base; |
| 911 | if (!obj->num_cr_ctx) |
| 912 | return; |
| 913 | |
| 914 | tmp = obj->cr_ctx; |
| 915 | for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr) |
| 916 | * tmp++ = cr; |
| 917 | } |
| 918 | |
| 919 | static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj) |
| 920 | { |
| 921 | struct iotlb_lock l; |
| 922 | struct cr_regs *tmp; |
| 923 | int i; |
| 924 | |
| 925 | /* no locked tlbs to restore */ |
| 926 | if (!obj->num_cr_ctx) |
| 927 | return; |
| 928 | |
| 929 | l.base = 0; |
| 930 | tmp = obj->cr_ctx; |
| 931 | for (i = 0; i < obj->num_cr_ctx; i++, tmp++) { |
| 932 | l.vict = i; |
| 933 | iotlb_lock_set(obj, &l); |
| 934 | iotlb_load_cr(obj, tmp); |
| 935 | } |
| 936 | l.base = obj->num_cr_ctx; |
| 937 | l.vict = i; |
| 938 | iotlb_lock_set(obj, &l); |
| 939 | } |
| 940 | |
| 941 | /** |
| 942 | * omap_iommu_domain_deactivate - deactivate attached iommu devices |
| 943 | * @domain: iommu domain attached to the target iommu device |
| 944 | * |
| 945 | * This API allows the client devices of IOMMU devices to suspend |
| 946 | * the IOMMUs they control at runtime, after they are idled and |
| 947 | * suspended all activity. System Suspend will leverage the PM |
| 948 | * driver late callbacks. |
| 949 | **/ |
| 950 | int omap_iommu_domain_deactivate(struct iommu_domain *domain) |
| 951 | { |
| 952 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
| 953 | struct omap_iommu_device *iommu; |
| 954 | struct omap_iommu *oiommu; |
| 955 | int i; |
| 956 | |
| 957 | if (!omap_domain->dev) |
| 958 | return 0; |
| 959 | |
| 960 | iommu = omap_domain->iommus; |
| 961 | iommu += (omap_domain->num_iommus - 1); |
| 962 | for (i = 0; i < omap_domain->num_iommus; i++, iommu--) { |
| 963 | oiommu = iommu->iommu_dev; |
| 964 | pm_runtime_put_sync(oiommu->dev); |
| 965 | } |
| 966 | |
| 967 | return 0; |
| 968 | } |
| 969 | EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate); |
| 970 | |
| 971 | /** |
| 972 | * omap_iommu_domain_activate - activate attached iommu devices |
| 973 | * @domain: iommu domain attached to the target iommu device |
| 974 | * |
| 975 | * This API allows the client devices of IOMMU devices to resume the |
| 976 | * IOMMUs they control at runtime, before they can resume operations. |
| 977 | * System Resume will leverage the PM driver late callbacks. |
| 978 | **/ |
| 979 | int omap_iommu_domain_activate(struct iommu_domain *domain) |
| 980 | { |
| 981 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
| 982 | struct omap_iommu_device *iommu; |
| 983 | struct omap_iommu *oiommu; |
| 984 | int i; |
| 985 | |
| 986 | if (!omap_domain->dev) |
| 987 | return 0; |
| 988 | |
| 989 | iommu = omap_domain->iommus; |
| 990 | for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { |
| 991 | oiommu = iommu->iommu_dev; |
| 992 | pm_runtime_get_sync(oiommu->dev); |
| 993 | } |
| 994 | |
| 995 | return 0; |
| 996 | } |
| 997 | EXPORT_SYMBOL_GPL(omap_iommu_domain_activate); |
| 998 | |
| 999 | /** |
| 1000 | * omap_iommu_runtime_suspend - disable an iommu device |
| 1001 | * @dev: iommu device |
| 1002 | * |
| 1003 | * This function performs all that is necessary to disable an |
| 1004 | * IOMMU device, either during final detachment from a client |
| 1005 | * device, or during system/runtime suspend of the device. This |
| 1006 | * includes programming all the appropriate IOMMU registers, and |
| 1007 | * managing the associated omap_hwmod's state and the device's |
| 1008 | * reset line. This function also saves the context of any |
| 1009 | * locked TLBs if suspending. |
| 1010 | **/ |
| 1011 | static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev) |
| 1012 | { |
| 1013 | struct platform_device *pdev = to_platform_device(dev); |
| 1014 | struct iommu_platform_data *pdata = dev_get_platdata(dev); |
| 1015 | struct omap_iommu *obj = to_iommu(dev); |
| 1016 | int ret; |
| 1017 | |
| 1018 | /* save the TLBs only during suspend, and not for power down */ |
| 1019 | if (obj->domain && obj->iopgd) |
| 1020 | omap_iommu_save_tlb_entries(obj); |
| 1021 | |
| 1022 | omap2_iommu_disable(obj); |
| 1023 | |
| 1024 | if (pdata && pdata->device_idle) |
| 1025 | pdata->device_idle(pdev); |
| 1026 | |
| 1027 | if (pdata && pdata->assert_reset) |
| 1028 | pdata->assert_reset(pdev, pdata->reset_name); |
| 1029 | |
| 1030 | if (pdata && pdata->set_pwrdm_constraint) { |
| 1031 | ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst); |
| 1032 | if (ret) { |
| 1033 | dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n", |
| 1034 | ret); |
| 1035 | } |
| 1036 | } |
| 1037 | |
| 1038 | return 0; |
| 1039 | } |
| 1040 | |
| 1041 | /** |
| 1042 | * omap_iommu_runtime_resume - enable an iommu device |
| 1043 | * @dev: iommu device |
| 1044 | * |
| 1045 | * This function performs all that is necessary to enable an |
| 1046 | * IOMMU device, either during initial attachment to a client |
| 1047 | * device, or during system/runtime resume of the device. This |
| 1048 | * includes programming all the appropriate IOMMU registers, and |
| 1049 | * managing the associated omap_hwmod's state and the device's |
| 1050 | * reset line. The function also restores any locked TLBs if |
| 1051 | * resuming after a suspend. |
| 1052 | **/ |
| 1053 | static __maybe_unused int omap_iommu_runtime_resume(struct device *dev) |
| 1054 | { |
| 1055 | struct platform_device *pdev = to_platform_device(dev); |
| 1056 | struct iommu_platform_data *pdata = dev_get_platdata(dev); |
| 1057 | struct omap_iommu *obj = to_iommu(dev); |
| 1058 | int ret = 0; |
| 1059 | |
| 1060 | if (pdata && pdata->set_pwrdm_constraint) { |
| 1061 | ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst); |
| 1062 | if (ret) { |
| 1063 | dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n", |
| 1064 | ret); |
| 1065 | } |
| 1066 | } |
| 1067 | |
| 1068 | if (pdata && pdata->deassert_reset) { |
| 1069 | ret = pdata->deassert_reset(pdev, pdata->reset_name); |
| 1070 | if (ret) { |
| 1071 | dev_err(dev, "deassert_reset failed: %d\n", ret); |
| 1072 | return ret; |
| 1073 | } |
| 1074 | } |
| 1075 | |
| 1076 | if (pdata && pdata->device_enable) |
| 1077 | pdata->device_enable(pdev); |
| 1078 | |
| 1079 | /* restore the TLBs only during resume, and not for power up */ |
| 1080 | if (obj->domain) |
| 1081 | omap_iommu_restore_tlb_entries(obj); |
| 1082 | |
| 1083 | ret = omap2_iommu_enable(obj); |
| 1084 | |
| 1085 | return ret; |
| 1086 | } |
| 1087 | |
| 1088 | /** |
| 1089 | * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation |
| 1090 | * @dev: iommu device |
| 1091 | * |
| 1092 | * This function performs the necessary checks to determine if the IOMMU |
| 1093 | * device needs suspending or not. The function checks if the runtime_pm |
| 1094 | * status of the device is suspended, and returns 1 in that case. This |
| 1095 | * results in the PM core to skip invoking any of the Sleep PM callbacks |
| 1096 | * (suspend, suspend_late, resume, resume_early etc). |
| 1097 | */ |
| 1098 | static int omap_iommu_prepare(struct device *dev) |
| 1099 | { |
| 1100 | if (pm_runtime_status_suspended(dev)) |
| 1101 | return 1; |
| 1102 | return 0; |
| 1103 | } |
| 1104 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1105 | static bool omap_iommu_can_register(struct platform_device *pdev) |
| 1106 | { |
| 1107 | struct device_node *np = pdev->dev.of_node; |
| 1108 | |
| 1109 | if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu")) |
| 1110 | return true; |
| 1111 | |
| 1112 | /* |
| 1113 | * restrict IOMMU core registration only for processor-port MDMA MMUs |
| 1114 | * on DRA7 DSPs |
| 1115 | */ |
| 1116 | if ((!strcmp(dev_name(&pdev->dev), "40d01000.mmu")) || |
| 1117 | (!strcmp(dev_name(&pdev->dev), "41501000.mmu"))) |
| 1118 | return true; |
| 1119 | |
| 1120 | return false; |
| 1121 | } |
| 1122 | |
| 1123 | static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev, |
| 1124 | struct omap_iommu *obj) |
| 1125 | { |
| 1126 | struct device_node *np = pdev->dev.of_node; |
| 1127 | int ret; |
| 1128 | |
| 1129 | if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu")) |
| 1130 | return 0; |
| 1131 | |
| 1132 | if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) { |
| 1133 | dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n"); |
| 1134 | return -EINVAL; |
| 1135 | } |
| 1136 | |
| 1137 | obj->syscfg = |
| 1138 | syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig"); |
| 1139 | if (IS_ERR(obj->syscfg)) { |
| 1140 | /* can fail with -EPROBE_DEFER */ |
| 1141 | ret = PTR_ERR(obj->syscfg); |
| 1142 | return ret; |
| 1143 | } |
| 1144 | |
| 1145 | if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1, |
| 1146 | &obj->id)) { |
| 1147 | dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n"); |
| 1148 | return -EINVAL; |
| 1149 | } |
| 1150 | |
| 1151 | if (obj->id != 0 && obj->id != 1) { |
| 1152 | dev_err(&pdev->dev, "invalid IOMMU instance id\n"); |
| 1153 | return -EINVAL; |
| 1154 | } |
| 1155 | |
| 1156 | return 0; |
| 1157 | } |
| 1158 | |
| 1159 | /* |
| 1160 | * OMAP Device MMU(IOMMU) detection |
| 1161 | */ |
| 1162 | static int omap_iommu_probe(struct platform_device *pdev) |
| 1163 | { |
| 1164 | int err = -ENODEV; |
| 1165 | int irq; |
| 1166 | struct omap_iommu *obj; |
| 1167 | struct resource *res; |
| 1168 | struct device_node *of = pdev->dev.of_node; |
| 1169 | |
| 1170 | if (!of) { |
| 1171 | pr_err("%s: only DT-based devices are supported\n", __func__); |
| 1172 | return -ENODEV; |
| 1173 | } |
| 1174 | |
| 1175 | obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); |
| 1176 | if (!obj) |
| 1177 | return -ENOMEM; |
| 1178 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1179 | /* |
| 1180 | * self-manage the ordering dependencies between omap_device_enable/idle |
| 1181 | * and omap_device_assert/deassert_hardreset API |
| 1182 | */ |
| 1183 | if (pdev->dev.pm_domain) { |
| 1184 | dev_dbg(&pdev->dev, "device pm_domain is being reset\n"); |
| 1185 | pdev->dev.pm_domain = NULL; |
| 1186 | } |
| 1187 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1188 | obj->name = dev_name(&pdev->dev); |
| 1189 | obj->nr_tlb_entries = 32; |
| 1190 | err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries); |
| 1191 | if (err && err != -EINVAL) |
| 1192 | return err; |
| 1193 | if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) |
| 1194 | return -EINVAL; |
| 1195 | if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) |
| 1196 | obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; |
| 1197 | |
| 1198 | obj->dev = &pdev->dev; |
| 1199 | obj->ctx = (void *)obj + sizeof(*obj); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1200 | obj->cr_ctx = devm_kzalloc(&pdev->dev, |
| 1201 | sizeof(*obj->cr_ctx) * obj->nr_tlb_entries, |
| 1202 | GFP_KERNEL); |
| 1203 | if (!obj->cr_ctx) |
| 1204 | return -ENOMEM; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1205 | |
| 1206 | spin_lock_init(&obj->iommu_lock); |
| 1207 | spin_lock_init(&obj->page_table_lock); |
| 1208 | |
| 1209 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1210 | obj->regbase = devm_ioremap_resource(obj->dev, res); |
| 1211 | if (IS_ERR(obj->regbase)) |
| 1212 | return PTR_ERR(obj->regbase); |
| 1213 | |
| 1214 | err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj); |
| 1215 | if (err) |
| 1216 | return err; |
| 1217 | |
| 1218 | irq = platform_get_irq(pdev, 0); |
| 1219 | if (irq < 0) |
| 1220 | return -ENODEV; |
| 1221 | |
| 1222 | err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED, |
| 1223 | dev_name(obj->dev), obj); |
| 1224 | if (err < 0) |
| 1225 | return err; |
| 1226 | platform_set_drvdata(pdev, obj); |
| 1227 | |
| 1228 | if (omap_iommu_can_register(pdev)) { |
| 1229 | obj->group = iommu_group_alloc(); |
| 1230 | if (IS_ERR(obj->group)) |
| 1231 | return PTR_ERR(obj->group); |
| 1232 | |
| 1233 | err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL, |
| 1234 | obj->name); |
| 1235 | if (err) |
| 1236 | goto out_group; |
| 1237 | |
| 1238 | iommu_device_set_ops(&obj->iommu, &omap_iommu_ops); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1239 | iommu_device_set_fwnode(&obj->iommu, &of->fwnode); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1240 | |
| 1241 | err = iommu_device_register(&obj->iommu); |
| 1242 | if (err) |
| 1243 | goto out_sysfs; |
| 1244 | } |
| 1245 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1246 | pm_runtime_enable(obj->dev); |
| 1247 | |
| 1248 | omap_iommu_debugfs_add(obj); |
| 1249 | |
| 1250 | dev_info(&pdev->dev, "%s registered\n", obj->name); |
| 1251 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1252 | /* Re-probe bus to probe device attached to this IOMMU */ |
| 1253 | bus_iommu_probe(&platform_bus_type); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1254 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1255 | return 0; |
| 1256 | |
| 1257 | out_sysfs: |
| 1258 | iommu_device_sysfs_remove(&obj->iommu); |
| 1259 | out_group: |
| 1260 | iommu_group_put(obj->group); |
| 1261 | return err; |
| 1262 | } |
| 1263 | |
| 1264 | static int omap_iommu_remove(struct platform_device *pdev) |
| 1265 | { |
| 1266 | struct omap_iommu *obj = platform_get_drvdata(pdev); |
| 1267 | |
| 1268 | if (obj->group) { |
| 1269 | iommu_group_put(obj->group); |
| 1270 | obj->group = NULL; |
| 1271 | |
| 1272 | iommu_device_sysfs_remove(&obj->iommu); |
| 1273 | iommu_device_unregister(&obj->iommu); |
| 1274 | } |
| 1275 | |
| 1276 | omap_iommu_debugfs_remove(obj); |
| 1277 | |
| 1278 | pm_runtime_disable(obj->dev); |
| 1279 | |
| 1280 | dev_info(&pdev->dev, "%s removed\n", obj->name); |
| 1281 | return 0; |
| 1282 | } |
| 1283 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1284 | static const struct dev_pm_ops omap_iommu_pm_ops = { |
| 1285 | .prepare = omap_iommu_prepare, |
| 1286 | SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
| 1287 | pm_runtime_force_resume) |
| 1288 | SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend, |
| 1289 | omap_iommu_runtime_resume, NULL) |
| 1290 | }; |
| 1291 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1292 | static const struct of_device_id omap_iommu_of_match[] = { |
| 1293 | { .compatible = "ti,omap2-iommu" }, |
| 1294 | { .compatible = "ti,omap4-iommu" }, |
| 1295 | { .compatible = "ti,dra7-iommu" }, |
| 1296 | { .compatible = "ti,dra7-dsp-iommu" }, |
| 1297 | {}, |
| 1298 | }; |
| 1299 | |
| 1300 | static struct platform_driver omap_iommu_driver = { |
| 1301 | .probe = omap_iommu_probe, |
| 1302 | .remove = omap_iommu_remove, |
| 1303 | .driver = { |
| 1304 | .name = "omap-iommu", |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1305 | .pm = &omap_iommu_pm_ops, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1306 | .of_match_table = of_match_ptr(omap_iommu_of_match), |
| 1307 | }, |
| 1308 | }; |
| 1309 | |
| 1310 | static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) |
| 1311 | { |
| 1312 | memset(e, 0, sizeof(*e)); |
| 1313 | |
| 1314 | e->da = da; |
| 1315 | e->pa = pa; |
| 1316 | e->valid = MMU_CAM_V; |
| 1317 | e->pgsz = pgsz; |
| 1318 | e->endian = MMU_RAM_ENDIAN_LITTLE; |
| 1319 | e->elsz = MMU_RAM_ELSZ_8; |
| 1320 | e->mixed = 0; |
| 1321 | |
| 1322 | return iopgsz_to_bytes(e->pgsz); |
| 1323 | } |
| 1324 | |
| 1325 | static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1326 | phys_addr_t pa, size_t bytes, int prot, gfp_t gfp) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1327 | { |
| 1328 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
| 1329 | struct device *dev = omap_domain->dev; |
| 1330 | struct omap_iommu_device *iommu; |
| 1331 | struct omap_iommu *oiommu; |
| 1332 | struct iotlb_entry e; |
| 1333 | int omap_pgsz; |
| 1334 | u32 ret = -EINVAL; |
| 1335 | int i; |
| 1336 | |
| 1337 | omap_pgsz = bytes_to_iopgsz(bytes); |
| 1338 | if (omap_pgsz < 0) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1339 | dev_err(dev, "invalid size to map: %zu\n", bytes); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1340 | return -EINVAL; |
| 1341 | } |
| 1342 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1343 | dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%zx\n", da, &pa, bytes); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1344 | |
| 1345 | iotlb_init_entry(&e, da, pa, omap_pgsz); |
| 1346 | |
| 1347 | iommu = omap_domain->iommus; |
| 1348 | for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { |
| 1349 | oiommu = iommu->iommu_dev; |
| 1350 | ret = omap_iopgtable_store_entry(oiommu, &e); |
| 1351 | if (ret) { |
| 1352 | dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", |
| 1353 | ret); |
| 1354 | break; |
| 1355 | } |
| 1356 | } |
| 1357 | |
| 1358 | if (ret) { |
| 1359 | while (i--) { |
| 1360 | iommu--; |
| 1361 | oiommu = iommu->iommu_dev; |
| 1362 | iopgtable_clear_entry(oiommu, da); |
| 1363 | } |
| 1364 | } |
| 1365 | |
| 1366 | return ret; |
| 1367 | } |
| 1368 | |
| 1369 | static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1370 | size_t size, struct iommu_iotlb_gather *gather) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1371 | { |
| 1372 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
| 1373 | struct device *dev = omap_domain->dev; |
| 1374 | struct omap_iommu_device *iommu; |
| 1375 | struct omap_iommu *oiommu; |
| 1376 | bool error = false; |
| 1377 | size_t bytes = 0; |
| 1378 | int i; |
| 1379 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1380 | dev_dbg(dev, "unmapping da 0x%lx size %zu\n", da, size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1381 | |
| 1382 | iommu = omap_domain->iommus; |
| 1383 | for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { |
| 1384 | oiommu = iommu->iommu_dev; |
| 1385 | bytes = iopgtable_clear_entry(oiommu, da); |
| 1386 | if (!bytes) |
| 1387 | error = true; |
| 1388 | } |
| 1389 | |
| 1390 | /* |
| 1391 | * simplify return - we are only checking if any of the iommus |
| 1392 | * reported an error, but not if all of them are unmapping the |
| 1393 | * same number of entries. This should not occur due to the |
| 1394 | * mirror programming. |
| 1395 | */ |
| 1396 | return error ? 0 : bytes; |
| 1397 | } |
| 1398 | |
| 1399 | static int omap_iommu_count(struct device *dev) |
| 1400 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1401 | struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1402 | int count = 0; |
| 1403 | |
| 1404 | while (arch_data->iommu_dev) { |
| 1405 | count++; |
| 1406 | arch_data++; |
| 1407 | } |
| 1408 | |
| 1409 | return count; |
| 1410 | } |
| 1411 | |
| 1412 | /* caller should call cleanup if this function fails */ |
| 1413 | static int omap_iommu_attach_init(struct device *dev, |
| 1414 | struct omap_iommu_domain *odomain) |
| 1415 | { |
| 1416 | struct omap_iommu_device *iommu; |
| 1417 | int i; |
| 1418 | |
| 1419 | odomain->num_iommus = omap_iommu_count(dev); |
| 1420 | if (!odomain->num_iommus) |
| 1421 | return -EINVAL; |
| 1422 | |
| 1423 | odomain->iommus = kcalloc(odomain->num_iommus, sizeof(*iommu), |
| 1424 | GFP_ATOMIC); |
| 1425 | if (!odomain->iommus) |
| 1426 | return -ENOMEM; |
| 1427 | |
| 1428 | iommu = odomain->iommus; |
| 1429 | for (i = 0; i < odomain->num_iommus; i++, iommu++) { |
| 1430 | iommu->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_ATOMIC); |
| 1431 | if (!iommu->pgtable) |
| 1432 | return -ENOMEM; |
| 1433 | |
| 1434 | /* |
| 1435 | * should never fail, but please keep this around to ensure |
| 1436 | * we keep the hardware happy |
| 1437 | */ |
| 1438 | if (WARN_ON(!IS_ALIGNED((long)iommu->pgtable, |
| 1439 | IOPGD_TABLE_SIZE))) |
| 1440 | return -EINVAL; |
| 1441 | } |
| 1442 | |
| 1443 | return 0; |
| 1444 | } |
| 1445 | |
| 1446 | static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain) |
| 1447 | { |
| 1448 | int i; |
| 1449 | struct omap_iommu_device *iommu = odomain->iommus; |
| 1450 | |
| 1451 | for (i = 0; iommu && i < odomain->num_iommus; i++, iommu++) |
| 1452 | kfree(iommu->pgtable); |
| 1453 | |
| 1454 | kfree(odomain->iommus); |
| 1455 | odomain->num_iommus = 0; |
| 1456 | odomain->iommus = NULL; |
| 1457 | } |
| 1458 | |
| 1459 | static int |
| 1460 | omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) |
| 1461 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1462 | struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1463 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1464 | struct omap_iommu_device *iommu; |
| 1465 | struct omap_iommu *oiommu; |
| 1466 | int ret = 0; |
| 1467 | int i; |
| 1468 | |
| 1469 | if (!arch_data || !arch_data->iommu_dev) { |
| 1470 | dev_err(dev, "device doesn't have an associated iommu\n"); |
| 1471 | return -EINVAL; |
| 1472 | } |
| 1473 | |
| 1474 | spin_lock(&omap_domain->lock); |
| 1475 | |
| 1476 | /* only a single client device can be attached to a domain */ |
| 1477 | if (omap_domain->dev) { |
| 1478 | dev_err(dev, "iommu domain is already attached\n"); |
| 1479 | ret = -EBUSY; |
| 1480 | goto out; |
| 1481 | } |
| 1482 | |
| 1483 | ret = omap_iommu_attach_init(dev, omap_domain); |
| 1484 | if (ret) { |
| 1485 | dev_err(dev, "failed to allocate required iommu data %d\n", |
| 1486 | ret); |
| 1487 | goto init_fail; |
| 1488 | } |
| 1489 | |
| 1490 | iommu = omap_domain->iommus; |
| 1491 | for (i = 0; i < omap_domain->num_iommus; i++, iommu++, arch_data++) { |
| 1492 | /* configure and enable the omap iommu */ |
| 1493 | oiommu = arch_data->iommu_dev; |
| 1494 | ret = omap_iommu_attach(oiommu, iommu->pgtable); |
| 1495 | if (ret) { |
| 1496 | dev_err(dev, "can't get omap iommu: %d\n", ret); |
| 1497 | goto attach_fail; |
| 1498 | } |
| 1499 | |
| 1500 | oiommu->domain = domain; |
| 1501 | iommu->iommu_dev = oiommu; |
| 1502 | } |
| 1503 | |
| 1504 | omap_domain->dev = dev; |
| 1505 | |
| 1506 | goto out; |
| 1507 | |
| 1508 | attach_fail: |
| 1509 | while (i--) { |
| 1510 | iommu--; |
| 1511 | arch_data--; |
| 1512 | oiommu = iommu->iommu_dev; |
| 1513 | omap_iommu_detach(oiommu); |
| 1514 | iommu->iommu_dev = NULL; |
| 1515 | oiommu->domain = NULL; |
| 1516 | } |
| 1517 | init_fail: |
| 1518 | omap_iommu_detach_fini(omap_domain); |
| 1519 | out: |
| 1520 | spin_unlock(&omap_domain->lock); |
| 1521 | return ret; |
| 1522 | } |
| 1523 | |
| 1524 | static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, |
| 1525 | struct device *dev) |
| 1526 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1527 | struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1528 | struct omap_iommu_device *iommu = omap_domain->iommus; |
| 1529 | struct omap_iommu *oiommu; |
| 1530 | int i; |
| 1531 | |
| 1532 | if (!omap_domain->dev) { |
| 1533 | dev_err(dev, "domain has no attached device\n"); |
| 1534 | return; |
| 1535 | } |
| 1536 | |
| 1537 | /* only a single device is supported per domain for now */ |
| 1538 | if (omap_domain->dev != dev) { |
| 1539 | dev_err(dev, "invalid attached device\n"); |
| 1540 | return; |
| 1541 | } |
| 1542 | |
| 1543 | /* |
| 1544 | * cleanup in the reverse order of attachment - this addresses |
| 1545 | * any h/w dependencies between multiple instances, if any |
| 1546 | */ |
| 1547 | iommu += (omap_domain->num_iommus - 1); |
| 1548 | arch_data += (omap_domain->num_iommus - 1); |
| 1549 | for (i = 0; i < omap_domain->num_iommus; i++, iommu--, arch_data--) { |
| 1550 | oiommu = iommu->iommu_dev; |
| 1551 | iopgtable_clear_entry_all(oiommu); |
| 1552 | |
| 1553 | omap_iommu_detach(oiommu); |
| 1554 | iommu->iommu_dev = NULL; |
| 1555 | oiommu->domain = NULL; |
| 1556 | } |
| 1557 | |
| 1558 | omap_iommu_detach_fini(omap_domain); |
| 1559 | |
| 1560 | omap_domain->dev = NULL; |
| 1561 | } |
| 1562 | |
| 1563 | static void omap_iommu_detach_dev(struct iommu_domain *domain, |
| 1564 | struct device *dev) |
| 1565 | { |
| 1566 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
| 1567 | |
| 1568 | spin_lock(&omap_domain->lock); |
| 1569 | _omap_iommu_detach_dev(omap_domain, dev); |
| 1570 | spin_unlock(&omap_domain->lock); |
| 1571 | } |
| 1572 | |
| 1573 | static struct iommu_domain *omap_iommu_domain_alloc(unsigned type) |
| 1574 | { |
| 1575 | struct omap_iommu_domain *omap_domain; |
| 1576 | |
| 1577 | if (type != IOMMU_DOMAIN_UNMANAGED) |
| 1578 | return NULL; |
| 1579 | |
| 1580 | omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); |
| 1581 | if (!omap_domain) |
| 1582 | return NULL; |
| 1583 | |
| 1584 | spin_lock_init(&omap_domain->lock); |
| 1585 | |
| 1586 | omap_domain->domain.geometry.aperture_start = 0; |
| 1587 | omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1; |
| 1588 | omap_domain->domain.geometry.force_aperture = true; |
| 1589 | |
| 1590 | return &omap_domain->domain; |
| 1591 | } |
| 1592 | |
| 1593 | static void omap_iommu_domain_free(struct iommu_domain *domain) |
| 1594 | { |
| 1595 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
| 1596 | |
| 1597 | /* |
| 1598 | * An iommu device is still attached |
| 1599 | * (currently, only one device can be attached) ? |
| 1600 | */ |
| 1601 | if (omap_domain->dev) |
| 1602 | _omap_iommu_detach_dev(omap_domain, omap_domain->dev); |
| 1603 | |
| 1604 | kfree(omap_domain); |
| 1605 | } |
| 1606 | |
| 1607 | static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, |
| 1608 | dma_addr_t da) |
| 1609 | { |
| 1610 | struct omap_iommu_domain *omap_domain = to_omap_domain(domain); |
| 1611 | struct omap_iommu_device *iommu = omap_domain->iommus; |
| 1612 | struct omap_iommu *oiommu = iommu->iommu_dev; |
| 1613 | struct device *dev = oiommu->dev; |
| 1614 | u32 *pgd, *pte; |
| 1615 | phys_addr_t ret = 0; |
| 1616 | |
| 1617 | /* |
| 1618 | * all the iommus within the domain will have identical programming, |
| 1619 | * so perform the lookup using just the first iommu |
| 1620 | */ |
| 1621 | iopgtable_lookup_entry(oiommu, da, &pgd, &pte); |
| 1622 | |
| 1623 | if (pte) { |
| 1624 | if (iopte_is_small(*pte)) |
| 1625 | ret = omap_iommu_translate(*pte, da, IOPTE_MASK); |
| 1626 | else if (iopte_is_large(*pte)) |
| 1627 | ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); |
| 1628 | else |
| 1629 | dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte, |
| 1630 | (unsigned long long)da); |
| 1631 | } else { |
| 1632 | if (iopgd_is_section(*pgd)) |
| 1633 | ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); |
| 1634 | else if (iopgd_is_super(*pgd)) |
| 1635 | ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); |
| 1636 | else |
| 1637 | dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd, |
| 1638 | (unsigned long long)da); |
| 1639 | } |
| 1640 | |
| 1641 | return ret; |
| 1642 | } |
| 1643 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1644 | static struct iommu_device *omap_iommu_probe_device(struct device *dev) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1645 | { |
| 1646 | struct omap_iommu_arch_data *arch_data, *tmp; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1647 | struct platform_device *pdev; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1648 | struct omap_iommu *oiommu; |
| 1649 | struct device_node *np; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1650 | int num_iommus, i; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1651 | |
| 1652 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1653 | * Allocate the per-device iommu structure for DT-based devices. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1654 | * |
| 1655 | * TODO: Simplify this when removing non-DT support completely from the |
| 1656 | * IOMMU users. |
| 1657 | */ |
| 1658 | if (!dev->of_node) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1659 | return ERR_PTR(-ENODEV); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1660 | |
| 1661 | /* |
| 1662 | * retrieve the count of IOMMU nodes using phandle size as element size |
| 1663 | * since #iommu-cells = 0 for OMAP |
| 1664 | */ |
| 1665 | num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus", |
| 1666 | sizeof(phandle)); |
| 1667 | if (num_iommus < 0) |
Olivier Deprez | 92d4c21 | 2022-12-06 15:05:30 +0100 | [diff] [blame] | 1668 | return ERR_PTR(-ENODEV); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1669 | |
| 1670 | arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL); |
| 1671 | if (!arch_data) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1672 | return ERR_PTR(-ENOMEM); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1673 | |
| 1674 | for (i = 0, tmp = arch_data; i < num_iommus; i++, tmp++) { |
| 1675 | np = of_parse_phandle(dev->of_node, "iommus", i); |
| 1676 | if (!np) { |
| 1677 | kfree(arch_data); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1678 | return ERR_PTR(-EINVAL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1679 | } |
| 1680 | |
| 1681 | pdev = of_find_device_by_node(np); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1682 | if (!pdev) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1683 | of_node_put(np); |
| 1684 | kfree(arch_data); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1685 | return ERR_PTR(-ENODEV); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1686 | } |
| 1687 | |
| 1688 | oiommu = platform_get_drvdata(pdev); |
| 1689 | if (!oiommu) { |
| 1690 | of_node_put(np); |
| 1691 | kfree(arch_data); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1692 | return ERR_PTR(-EINVAL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1693 | } |
| 1694 | |
| 1695 | tmp->iommu_dev = oiommu; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1696 | tmp->dev = &pdev->dev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1697 | |
| 1698 | of_node_put(np); |
| 1699 | } |
| 1700 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1701 | dev_iommu_priv_set(dev, arch_data); |
| 1702 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1703 | /* |
| 1704 | * use the first IOMMU alone for the sysfs device linking. |
| 1705 | * TODO: Evaluate if a single iommu_group needs to be |
| 1706 | * maintained for both IOMMUs |
| 1707 | */ |
| 1708 | oiommu = arch_data->iommu_dev; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1709 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1710 | return &oiommu->iommu; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1711 | } |
| 1712 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1713 | static void omap_iommu_release_device(struct device *dev) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1714 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1715 | struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1716 | |
| 1717 | if (!dev->of_node || !arch_data) |
| 1718 | return; |
| 1719 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1720 | dev_iommu_priv_set(dev, NULL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1721 | kfree(arch_data); |
| 1722 | |
| 1723 | } |
| 1724 | |
| 1725 | static struct iommu_group *omap_iommu_device_group(struct device *dev) |
| 1726 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1727 | struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1728 | struct iommu_group *group = ERR_PTR(-EINVAL); |
| 1729 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1730 | if (!arch_data) |
| 1731 | return ERR_PTR(-ENODEV); |
| 1732 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1733 | if (arch_data->iommu_dev) |
| 1734 | group = iommu_group_ref_get(arch_data->iommu_dev->group); |
| 1735 | |
| 1736 | return group; |
| 1737 | } |
| 1738 | |
| 1739 | static const struct iommu_ops omap_iommu_ops = { |
| 1740 | .domain_alloc = omap_iommu_domain_alloc, |
| 1741 | .domain_free = omap_iommu_domain_free, |
| 1742 | .attach_dev = omap_iommu_attach_dev, |
| 1743 | .detach_dev = omap_iommu_detach_dev, |
| 1744 | .map = omap_iommu_map, |
| 1745 | .unmap = omap_iommu_unmap, |
| 1746 | .iova_to_phys = omap_iommu_iova_to_phys, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1747 | .probe_device = omap_iommu_probe_device, |
| 1748 | .release_device = omap_iommu_release_device, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1749 | .device_group = omap_iommu_device_group, |
| 1750 | .pgsize_bitmap = OMAP_IOMMU_PGSIZES, |
| 1751 | }; |
| 1752 | |
| 1753 | static int __init omap_iommu_init(void) |
| 1754 | { |
| 1755 | struct kmem_cache *p; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1756 | const slab_flags_t flags = SLAB_HWCACHE_ALIGN; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1757 | size_t align = 1 << 10; /* L2 pagetable alignement */ |
| 1758 | struct device_node *np; |
| 1759 | int ret; |
| 1760 | |
| 1761 | np = of_find_matching_node(NULL, omap_iommu_of_match); |
| 1762 | if (!np) |
| 1763 | return 0; |
| 1764 | |
| 1765 | of_node_put(np); |
| 1766 | |
| 1767 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, |
| 1768 | NULL); |
| 1769 | if (!p) |
| 1770 | return -ENOMEM; |
| 1771 | iopte_cachep = p; |
| 1772 | |
| 1773 | omap_iommu_debugfs_init(); |
| 1774 | |
| 1775 | ret = platform_driver_register(&omap_iommu_driver); |
| 1776 | if (ret) { |
| 1777 | pr_err("%s: failed to register driver\n", __func__); |
| 1778 | goto fail_driver; |
| 1779 | } |
| 1780 | |
| 1781 | ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops); |
| 1782 | if (ret) |
| 1783 | goto fail_bus; |
| 1784 | |
| 1785 | return 0; |
| 1786 | |
| 1787 | fail_bus: |
| 1788 | platform_driver_unregister(&omap_iommu_driver); |
| 1789 | fail_driver: |
| 1790 | kmem_cache_destroy(iopte_cachep); |
| 1791 | return ret; |
| 1792 | } |
| 1793 | subsys_initcall(omap_iommu_init); |
| 1794 | /* must be ready before omap3isp is probed */ |