David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2017 SiFive |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4 | */ |
| 5 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | #include <linux/cpu.h> |
| 7 | #include <linux/of.h> |
| 8 | #include <linux/of_device.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9 | #include <asm/cacheinfo.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 11 | static struct riscv_cacheinfo_ops *rv_cache_ops; |
| 12 | |
| 13 | void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops) |
| 14 | { |
| 15 | rv_cache_ops = ops; |
| 16 | } |
| 17 | EXPORT_SYMBOL_GPL(riscv_set_cacheinfo_ops); |
| 18 | |
| 19 | const struct attribute_group * |
| 20 | cache_get_priv_group(struct cacheinfo *this_leaf) |
| 21 | { |
| 22 | if (rv_cache_ops && rv_cache_ops->get_priv_group) |
| 23 | return rv_cache_ops->get_priv_group(this_leaf); |
| 24 | return NULL; |
| 25 | } |
| 26 | |
| 27 | static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type) |
| 28 | { |
| 29 | /* |
| 30 | * Using raw_smp_processor_id() elides a preemptability check, but this |
| 31 | * is really indicative of a larger problem: the cacheinfo UABI assumes |
| 32 | * that cores have a homonogenous view of the cache hierarchy. That |
| 33 | * happens to be the case for the current set of RISC-V systems, but |
| 34 | * likely won't be true in general. Since there's no way to provide |
| 35 | * correct information for these systems via the current UABI we're |
| 36 | * just eliding the check for now. |
| 37 | */ |
| 38 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(raw_smp_processor_id()); |
| 39 | struct cacheinfo *this_leaf; |
| 40 | int index; |
| 41 | |
| 42 | for (index = 0; index < this_cpu_ci->num_leaves; index++) { |
| 43 | this_leaf = this_cpu_ci->info_list + index; |
| 44 | if (this_leaf->level == level && this_leaf->type == type) |
| 45 | return this_leaf; |
| 46 | } |
| 47 | |
| 48 | return NULL; |
| 49 | } |
| 50 | |
| 51 | uintptr_t get_cache_size(u32 level, enum cache_type type) |
| 52 | { |
| 53 | struct cacheinfo *this_leaf = get_cacheinfo(level, type); |
| 54 | |
| 55 | return this_leaf ? this_leaf->size : 0; |
| 56 | } |
| 57 | |
| 58 | uintptr_t get_cache_geometry(u32 level, enum cache_type type) |
| 59 | { |
| 60 | struct cacheinfo *this_leaf = get_cacheinfo(level, type); |
| 61 | |
| 62 | return this_leaf ? (this_leaf->ways_of_associativity << 16 | |
| 63 | this_leaf->coherency_line_size) : |
| 64 | 0; |
| 65 | } |
| 66 | |
| 67 | static void ci_leaf_init(struct cacheinfo *this_leaf, enum cache_type type, |
| 68 | unsigned int level, unsigned int size, |
| 69 | unsigned int sets, unsigned int line_size) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 70 | { |
| 71 | this_leaf->level = level; |
| 72 | this_leaf->type = type; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 73 | this_leaf->size = size; |
| 74 | this_leaf->number_of_sets = sets; |
| 75 | this_leaf->coherency_line_size = line_size; |
| 76 | |
| 77 | /* |
| 78 | * If the cache is fully associative, there is no need to |
| 79 | * check the other properties. |
| 80 | */ |
| 81 | if (sets == 1) |
| 82 | return; |
| 83 | |
| 84 | /* |
| 85 | * Set the ways number for n-ways associative, make sure |
| 86 | * all properties are big than zero. |
| 87 | */ |
| 88 | if (sets > 0 && size > 0 && line_size > 0) |
| 89 | this_leaf->ways_of_associativity = (size / sets) / line_size; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 90 | } |
| 91 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 92 | static void fill_cacheinfo(struct cacheinfo **this_leaf, |
| 93 | struct device_node *node, unsigned int level) |
| 94 | { |
| 95 | unsigned int size, sets, line_size; |
| 96 | |
| 97 | if (!of_property_read_u32(node, "cache-size", &size) && |
| 98 | !of_property_read_u32(node, "cache-block-size", &line_size) && |
| 99 | !of_property_read_u32(node, "cache-sets", &sets)) { |
| 100 | ci_leaf_init((*this_leaf)++, CACHE_TYPE_UNIFIED, level, size, sets, line_size); |
| 101 | } |
| 102 | |
| 103 | if (!of_property_read_u32(node, "i-cache-size", &size) && |
| 104 | !of_property_read_u32(node, "i-cache-sets", &sets) && |
| 105 | !of_property_read_u32(node, "i-cache-block-size", &line_size)) { |
| 106 | ci_leaf_init((*this_leaf)++, CACHE_TYPE_INST, level, size, sets, line_size); |
| 107 | } |
| 108 | |
| 109 | if (!of_property_read_u32(node, "d-cache-size", &size) && |
| 110 | !of_property_read_u32(node, "d-cache-sets", &sets) && |
| 111 | !of_property_read_u32(node, "d-cache-block-size", &line_size)) { |
| 112 | ci_leaf_init((*this_leaf)++, CACHE_TYPE_DATA, level, size, sets, line_size); |
| 113 | } |
| 114 | } |
| 115 | |
| 116 | int init_cache_level(unsigned int cpu) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 117 | { |
| 118 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
| 119 | struct device_node *np = of_cpu_device_node_get(cpu); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 120 | struct device_node *prev = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 121 | int levels = 0, leaves = 0, level; |
| 122 | |
| 123 | if (of_property_read_bool(np, "cache-size")) |
| 124 | ++leaves; |
| 125 | if (of_property_read_bool(np, "i-cache-size")) |
| 126 | ++leaves; |
| 127 | if (of_property_read_bool(np, "d-cache-size")) |
| 128 | ++leaves; |
| 129 | if (leaves > 0) |
| 130 | levels = 1; |
| 131 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 132 | prev = np; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 133 | while ((np = of_find_next_cache_node(np))) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 134 | of_node_put(prev); |
| 135 | prev = np; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 136 | if (!of_device_is_compatible(np, "cache")) |
| 137 | break; |
| 138 | if (of_property_read_u32(np, "cache-level", &level)) |
| 139 | break; |
| 140 | if (level <= levels) |
| 141 | break; |
| 142 | if (of_property_read_bool(np, "cache-size")) |
| 143 | ++leaves; |
| 144 | if (of_property_read_bool(np, "i-cache-size")) |
| 145 | ++leaves; |
| 146 | if (of_property_read_bool(np, "d-cache-size")) |
| 147 | ++leaves; |
| 148 | levels = level; |
| 149 | } |
| 150 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 151 | of_node_put(np); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 152 | this_cpu_ci->num_levels = levels; |
| 153 | this_cpu_ci->num_leaves = leaves; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 154 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 155 | return 0; |
| 156 | } |
| 157 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 158 | int populate_cache_leaves(unsigned int cpu) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 159 | { |
| 160 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
| 161 | struct cacheinfo *this_leaf = this_cpu_ci->info_list; |
| 162 | struct device_node *np = of_cpu_device_node_get(cpu); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 163 | struct device_node *prev = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 164 | int levels = 1, level = 1; |
| 165 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 166 | /* Level 1 caches in cpu node */ |
| 167 | fill_cacheinfo(&this_leaf, np, level); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 168 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 169 | /* Next level caches in cache nodes */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 170 | prev = np; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 171 | while ((np = of_find_next_cache_node(np))) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 172 | of_node_put(prev); |
| 173 | prev = np; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 174 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 175 | if (!of_device_is_compatible(np, "cache")) |
| 176 | break; |
| 177 | if (of_property_read_u32(np, "cache-level", &level)) |
| 178 | break; |
| 179 | if (level <= levels) |
| 180 | break; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 181 | |
| 182 | fill_cacheinfo(&this_leaf, np, level); |
| 183 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 184 | levels = level; |
| 185 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 186 | of_node_put(np); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 187 | |
| 188 | return 0; |
| 189 | } |