Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * include/asm-xtensa/io.h |
| 3 | * |
| 4 | * This file is subject to the terms and conditions of the GNU General Public |
| 5 | * License. See the file "COPYING" in the main directory of this archive |
| 6 | * for more details. |
| 7 | * |
| 8 | * Copyright (C) 2001 - 2005 Tensilica Inc. |
| 9 | */ |
| 10 | |
| 11 | #ifndef _XTENSA_IO_H |
| 12 | #define _XTENSA_IO_H |
| 13 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 14 | #include <asm/byteorder.h> |
| 15 | #include <asm/page.h> |
| 16 | #include <asm/vectors.h> |
| 17 | #include <linux/bug.h> |
| 18 | #include <linux/kernel.h> |
| 19 | |
| 20 | #include <linux/types.h> |
| 21 | |
| 22 | #define IOADDR(x) (XCHAL_KIO_BYPASS_VADDR + (x)) |
| 23 | #define IO_SPACE_LIMIT ~0 |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 24 | #define PCI_IOBASE ((void __iomem *)XCHAL_KIO_BYPASS_VADDR) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 25 | |
| 26 | #ifdef CONFIG_MMU |
| 27 | |
| 28 | void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size); |
| 29 | void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size); |
| 30 | void xtensa_iounmap(volatile void __iomem *addr); |
| 31 | |
| 32 | /* |
| 33 | * Return the virtual address for the specified bus memory. |
| 34 | */ |
| 35 | static inline void __iomem *ioremap_nocache(unsigned long offset, |
| 36 | unsigned long size) |
| 37 | { |
| 38 | if (offset >= XCHAL_KIO_PADDR |
| 39 | && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE) |
| 40 | return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR); |
| 41 | else |
| 42 | return xtensa_ioremap_nocache(offset, size); |
| 43 | } |
| 44 | |
| 45 | static inline void __iomem *ioremap_cache(unsigned long offset, |
| 46 | unsigned long size) |
| 47 | { |
| 48 | if (offset >= XCHAL_KIO_PADDR |
| 49 | && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE) |
| 50 | return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR); |
| 51 | else |
| 52 | return xtensa_ioremap_cache(offset, size); |
| 53 | } |
| 54 | #define ioremap_cache ioremap_cache |
| 55 | #define ioremap_nocache ioremap_nocache |
| 56 | |
| 57 | #define ioremap_wc ioremap_nocache |
| 58 | #define ioremap_wt ioremap_nocache |
| 59 | |
| 60 | static inline void __iomem *ioremap(unsigned long offset, unsigned long size) |
| 61 | { |
| 62 | return ioremap_nocache(offset, size); |
| 63 | } |
| 64 | |
| 65 | static inline void iounmap(volatile void __iomem *addr) |
| 66 | { |
| 67 | unsigned long va = (unsigned long) addr; |
| 68 | |
| 69 | if (!(va >= XCHAL_KIO_CACHED_VADDR && |
| 70 | va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) && |
| 71 | !(va >= XCHAL_KIO_BYPASS_VADDR && |
| 72 | va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE)) |
| 73 | xtensa_iounmap(addr); |
| 74 | } |
| 75 | |
| 76 | #define virt_to_bus virt_to_phys |
| 77 | #define bus_to_virt phys_to_virt |
| 78 | |
| 79 | #endif /* CONFIG_MMU */ |
| 80 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 81 | #include <asm-generic/io.h> |
| 82 | |
| 83 | #endif /* _XTENSA_IO_H */ |