Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* $Id: cache.h,v 1.6 2004/03/11 18:08:05 lethal Exp $ |
| 3 | * |
| 4 | * include/asm-sh/cache.h |
| 5 | * |
| 6 | * Copyright 1999 (C) Niibe Yutaka |
| 7 | * Copyright 2002, 2003 (C) Paul Mundt |
| 8 | */ |
| 9 | #ifndef __ASM_SH_CACHE_H |
| 10 | #define __ASM_SH_CACHE_H |
| 11 | #ifdef __KERNEL__ |
| 12 | |
| 13 | #include <linux/init.h> |
| 14 | #include <cpu/cache.h> |
| 15 | |
| 16 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) |
| 17 | |
| 18 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) |
| 19 | |
| 20 | #ifndef __ASSEMBLY__ |
| 21 | struct cache_info { |
| 22 | unsigned int ways; /* Number of cache ways */ |
| 23 | unsigned int sets; /* Number of cache sets */ |
| 24 | unsigned int linesz; /* Cache line size (bytes) */ |
| 25 | |
| 26 | unsigned int way_size; /* sets * line size */ |
| 27 | |
| 28 | /* |
| 29 | * way_incr is the address offset for accessing the next way |
| 30 | * in memory mapped cache array ops. |
| 31 | */ |
| 32 | unsigned int way_incr; |
| 33 | unsigned int entry_shift; |
| 34 | unsigned int entry_mask; |
| 35 | |
| 36 | /* |
| 37 | * Compute a mask which selects the address bits which overlap between |
| 38 | * 1. those used to select the cache set during indexing |
| 39 | * 2. those in the physical page number. |
| 40 | */ |
| 41 | unsigned int alias_mask; |
| 42 | unsigned int n_aliases; /* Number of aliases */ |
| 43 | |
| 44 | unsigned long flags; |
| 45 | }; |
| 46 | #endif /* __ASSEMBLY__ */ |
| 47 | #endif /* __KERNEL__ */ |
| 48 | #endif /* __ASM_SH_CACHE_H */ |