David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * OpenRISC Linux |
| 4 | * |
| 5 | * Linux architectural port borrowing liberally from similar works of |
| 6 | * others. All original copyrights apply as per the original source |
| 7 | * declaration. |
| 8 | * |
| 9 | * Modifications for the OpenRISC architecture: |
| 10 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> |
| 11 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> |
| 12 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 13 | * DMA mapping callbacks... |
| 14 | * As alloc_coherent is the only DMA callback being used currently, that's |
| 15 | * the only thing implemented properly. The rest need looking into... |
| 16 | */ |
| 17 | |
| 18 | #include <linux/dma-noncoherent.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 19 | #include <linux/pagewalk.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 20 | |
| 21 | #include <asm/cpuinfo.h> |
| 22 | #include <asm/spr_defs.h> |
| 23 | #include <asm/tlbflush.h> |
| 24 | |
| 25 | static int |
| 26 | page_set_nocache(pte_t *pte, unsigned long addr, |
| 27 | unsigned long next, struct mm_walk *walk) |
| 28 | { |
| 29 | unsigned long cl; |
| 30 | struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; |
| 31 | |
| 32 | pte_val(*pte) |= _PAGE_CI; |
| 33 | |
| 34 | /* |
| 35 | * Flush the page out of the TLB so that the new page flags get |
| 36 | * picked up next time there's an access |
| 37 | */ |
| 38 | flush_tlb_page(NULL, addr); |
| 39 | |
| 40 | /* Flush page out of dcache */ |
| 41 | for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size) |
| 42 | mtspr(SPR_DCBFR, cl); |
| 43 | |
| 44 | return 0; |
| 45 | } |
| 46 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 47 | static const struct mm_walk_ops set_nocache_walk_ops = { |
| 48 | .pte_entry = page_set_nocache, |
| 49 | }; |
| 50 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 51 | static int |
| 52 | page_clear_nocache(pte_t *pte, unsigned long addr, |
| 53 | unsigned long next, struct mm_walk *walk) |
| 54 | { |
| 55 | pte_val(*pte) &= ~_PAGE_CI; |
| 56 | |
| 57 | /* |
| 58 | * Flush the page out of the TLB so that the new page flags get |
| 59 | * picked up next time there's an access |
| 60 | */ |
| 61 | flush_tlb_page(NULL, addr); |
| 62 | |
| 63 | return 0; |
| 64 | } |
| 65 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 66 | static const struct mm_walk_ops clear_nocache_walk_ops = { |
| 67 | .pte_entry = page_clear_nocache, |
| 68 | }; |
| 69 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 70 | /* |
| 71 | * Alloc "coherent" memory, which for OpenRISC means simply uncached. |
| 72 | * |
| 73 | * This function effectively just calls __get_free_pages, sets the |
| 74 | * cache-inhibit bit on those pages, and makes sure that the pages are |
| 75 | * flushed out of the cache before they are used. |
| 76 | * |
| 77 | * If the NON_CONSISTENT attribute is set, then this function just |
| 78 | * returns "normal", cachable memory. |
| 79 | * |
| 80 | * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take |
| 81 | * into consideration here, too. All current known implementations of |
| 82 | * the OR1K support only strongly ordered memory accesses, so that flag |
| 83 | * is being ignored for now; uncached but write-combined memory is a |
| 84 | * missing feature of the OR1K. |
| 85 | */ |
| 86 | void * |
| 87 | arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 88 | gfp_t gfp, unsigned long attrs) |
| 89 | { |
| 90 | unsigned long va; |
| 91 | void *page; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 92 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 93 | page = alloc_pages_exact(size, gfp | __GFP_ZERO); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 94 | if (!page) |
| 95 | return NULL; |
| 96 | |
| 97 | /* This gives us the real physical address of the first page. */ |
| 98 | *dma_handle = __pa(page); |
| 99 | |
| 100 | va = (unsigned long)page; |
| 101 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 102 | /* |
| 103 | * We need to iterate through the pages, clearing the dcache for |
| 104 | * them and setting the cache-inhibit bit. |
| 105 | */ |
| 106 | if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops, |
| 107 | NULL)) { |
| 108 | free_pages_exact(page, size); |
| 109 | return NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | return (void *)va; |
| 113 | } |
| 114 | |
| 115 | void |
| 116 | arch_dma_free(struct device *dev, size_t size, void *vaddr, |
| 117 | dma_addr_t dma_handle, unsigned long attrs) |
| 118 | { |
| 119 | unsigned long va = (unsigned long)vaddr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 120 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 121 | /* walk_page_range shouldn't be able to fail here */ |
| 122 | WARN_ON(walk_page_range(&init_mm, va, va + size, |
| 123 | &clear_nocache_walk_ops, NULL)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 124 | |
| 125 | free_pages_exact(vaddr, size); |
| 126 | } |
| 127 | |
| 128 | void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size, |
| 129 | enum dma_data_direction dir) |
| 130 | { |
| 131 | unsigned long cl; |
| 132 | struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()]; |
| 133 | |
| 134 | switch (dir) { |
| 135 | case DMA_TO_DEVICE: |
| 136 | /* Flush the dcache for the requested range */ |
| 137 | for (cl = addr; cl < addr + size; |
| 138 | cl += cpuinfo->dcache_block_size) |
| 139 | mtspr(SPR_DCBFR, cl); |
| 140 | break; |
| 141 | case DMA_FROM_DEVICE: |
| 142 | /* Invalidate the dcache for the requested range */ |
| 143 | for (cl = addr; cl < addr + size; |
| 144 | cl += cpuinfo->dcache_block_size) |
| 145 | mtspr(SPR_DCBIR, cl); |
| 146 | break; |
| 147 | default: |
| 148 | /* |
| 149 | * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to |
| 150 | * flush nor invalidate the cache here as the area will need |
| 151 | * to be manually synced anyway. |
| 152 | */ |
| 153 | break; |
| 154 | } |
| 155 | } |