David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * DMA coherent memory allocation. |
| 4 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5 | * Copyright (C) 2002 - 2005 Tensilica Inc. |
| 6 | * Copyright (C) 2015 Cadence Design Systems Inc. |
| 7 | * |
| 8 | * Based on version for i386. |
| 9 | * |
| 10 | * Chris Zankel <chris@zankel.net> |
| 11 | * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> |
| 12 | */ |
| 13 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 14 | #include <linux/dma-map-ops.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 15 | #include <linux/dma-direct.h> |
| 16 | #include <linux/gfp.h> |
| 17 | #include <linux/highmem.h> |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/types.h> |
| 20 | #include <asm/cacheflush.h> |
| 21 | #include <asm/io.h> |
| 22 | #include <asm/platform.h> |
| 23 | |
| 24 | static void do_cache_op(phys_addr_t paddr, size_t size, |
| 25 | void (*fn)(unsigned long, unsigned long)) |
| 26 | { |
| 27 | unsigned long off = paddr & (PAGE_SIZE - 1); |
| 28 | unsigned long pfn = PFN_DOWN(paddr); |
| 29 | struct page *page = pfn_to_page(pfn); |
| 30 | |
| 31 | if (!PageHighMem(page)) |
| 32 | fn((unsigned long)phys_to_virt(paddr), size); |
| 33 | else |
| 34 | while (size > 0) { |
| 35 | size_t sz = min_t(size_t, size, PAGE_SIZE - off); |
| 36 | void *vaddr = kmap_atomic(page); |
| 37 | |
| 38 | fn((unsigned long)vaddr + off, sz); |
| 39 | kunmap_atomic(vaddr); |
| 40 | off = 0; |
| 41 | ++page; |
| 42 | size -= sz; |
| 43 | } |
| 44 | } |
| 45 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 46 | void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, |
| 47 | enum dma_data_direction dir) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 48 | { |
| 49 | switch (dir) { |
| 50 | case DMA_BIDIRECTIONAL: |
| 51 | case DMA_FROM_DEVICE: |
| 52 | do_cache_op(paddr, size, __invalidate_dcache_range); |
| 53 | break; |
| 54 | |
| 55 | case DMA_NONE: |
| 56 | BUG(); |
| 57 | break; |
| 58 | |
| 59 | default: |
| 60 | break; |
| 61 | } |
| 62 | } |
| 63 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 64 | void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, |
| 65 | enum dma_data_direction dir) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 66 | { |
| 67 | switch (dir) { |
| 68 | case DMA_BIDIRECTIONAL: |
| 69 | case DMA_TO_DEVICE: |
| 70 | if (XCHAL_DCACHE_IS_WRITEBACK) |
| 71 | do_cache_op(paddr, size, __flush_dcache_range); |
| 72 | break; |
| 73 | |
| 74 | case DMA_NONE: |
| 75 | BUG(); |
| 76 | break; |
| 77 | |
| 78 | default: |
| 79 | break; |
| 80 | } |
| 81 | } |
| 82 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 83 | void arch_dma_prep_coherent(struct page *page, size_t size) |
| 84 | { |
| 85 | __invalidate_dcache_range((unsigned long)page_address(page), size); |
| 86 | } |
| 87 | |
| 88 | /* |
| 89 | * Memory caching is platform-dependent in noMMU xtensa configurations. |
| 90 | * This function should be implemented in platform code in order to enable |
| 91 | * coherent DMA memory operations when CONFIG_MMU is not enabled. |
| 92 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 93 | #ifdef CONFIG_MMU |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 94 | void *arch_dma_set_uncached(void *p, size_t size) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 95 | { |
| 96 | return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; |
| 97 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 98 | #endif /* CONFIG_MMU */ |