David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * OpenRISC Linux |
| 4 | * |
| 5 | * Linux architectural port borrowing liberally from similar works of |
| 6 | * others. All original copyrights apply as per the original source |
| 7 | * declaration. |
| 8 | * |
| 9 | * Modifications for the OpenRISC architecture: |
| 10 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> |
| 11 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> |
| 12 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 13 | * DMA mapping callbacks... |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 14 | */ |
| 15 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 16 | #include <linux/dma-map-ops.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 17 | #include <linux/pagewalk.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 18 | |
| 19 | #include <asm/cpuinfo.h> |
| 20 | #include <asm/spr_defs.h> |
| 21 | #include <asm/tlbflush.h> |
| 22 | |
| 23 | static int |
| 24 | page_set_nocache(pte_t *pte, unsigned long addr, |
| 25 | unsigned long next, struct mm_walk *walk) |
| 26 | { |
| 27 | unsigned long cl; |
| 28 | struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; |
| 29 | |
| 30 | pte_val(*pte) |= _PAGE_CI; |
| 31 | |
| 32 | /* |
| 33 | * Flush the page out of the TLB so that the new page flags get |
| 34 | * picked up next time there's an access |
| 35 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 36 | flush_tlb_kernel_range(addr, addr + PAGE_SIZE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 37 | |
| 38 | /* Flush page out of dcache */ |
| 39 | for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size) |
| 40 | mtspr(SPR_DCBFR, cl); |
| 41 | |
| 42 | return 0; |
| 43 | } |
| 44 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 45 | static const struct mm_walk_ops set_nocache_walk_ops = { |
| 46 | .pte_entry = page_set_nocache, |
| 47 | }; |
| 48 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 49 | static int |
| 50 | page_clear_nocache(pte_t *pte, unsigned long addr, |
| 51 | unsigned long next, struct mm_walk *walk) |
| 52 | { |
| 53 | pte_val(*pte) &= ~_PAGE_CI; |
| 54 | |
| 55 | /* |
| 56 | * Flush the page out of the TLB so that the new page flags get |
| 57 | * picked up next time there's an access |
| 58 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 59 | flush_tlb_kernel_range(addr, addr + PAGE_SIZE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | |
| 61 | return 0; |
| 62 | } |
| 63 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 64 | static const struct mm_walk_ops clear_nocache_walk_ops = { |
| 65 | .pte_entry = page_clear_nocache, |
| 66 | }; |
| 67 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 68 | void *arch_dma_set_uncached(void *cpu_addr, size_t size) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 69 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 70 | unsigned long va = (unsigned long)cpu_addr; |
| 71 | int error; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 72 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 73 | /* |
| 74 | * We need to iterate through the pages, clearing the dcache for |
| 75 | * them and setting the cache-inhibit bit. |
| 76 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 77 | mmap_read_lock(&init_mm); |
| 78 | error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops, |
| 79 | NULL); |
| 80 | mmap_read_unlock(&init_mm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 81 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 82 | if (error) |
| 83 | return ERR_PTR(error); |
| 84 | return cpu_addr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 85 | } |
| 86 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 87 | void arch_dma_clear_uncached(void *cpu_addr, size_t size) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 88 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 89 | unsigned long va = (unsigned long)cpu_addr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 90 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 91 | mmap_read_lock(&init_mm); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 92 | /* walk_page_range shouldn't be able to fail here */ |
| 93 | WARN_ON(walk_page_range(&init_mm, va, va + size, |
| 94 | &clear_nocache_walk_ops, NULL)); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 95 | mmap_read_unlock(&init_mm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 96 | } |
| 97 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 98 | void arch_sync_dma_for_device(phys_addr_t addr, size_t size, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 99 | enum dma_data_direction dir) |
| 100 | { |
| 101 | unsigned long cl; |
| 102 | struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; |
| 103 | |
| 104 | switch (dir) { |
| 105 | case DMA_TO_DEVICE: |
| 106 | /* Flush the dcache for the requested range */ |
| 107 | for (cl = addr; cl < addr + size; |
| 108 | cl += cpuinfo->dcache_block_size) |
| 109 | mtspr(SPR_DCBFR, cl); |
| 110 | break; |
| 111 | case DMA_FROM_DEVICE: |
| 112 | /* Invalidate the dcache for the requested range */ |
| 113 | for (cl = addr; cl < addr + size; |
| 114 | cl += cpuinfo->dcache_block_size) |
| 115 | mtspr(SPR_DCBIR, cl); |
| 116 | break; |
| 117 | default: |
| 118 | /* |
| 119 | * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to |
| 120 | * flush nor invalidate the cache here as the area will need |
| 121 | * to be manually synced anyway. |
| 122 | */ |
| 123 | break; |
| 124 | } |
| 125 | } |