David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * MIPS cacheinfo support |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4 | */ |
| 5 | #include <linux/cacheinfo.h> |
| 6 | |
| 7 | /* Populates leaf and increments to next leaf */ |
| 8 | #define populate_cache(cache, leaf, c_level, c_type) \ |
| 9 | do { \ |
| 10 | leaf->type = c_type; \ |
| 11 | leaf->level = c_level; \ |
| 12 | leaf->coherency_line_size = c->cache.linesz; \ |
| 13 | leaf->number_of_sets = c->cache.sets; \ |
| 14 | leaf->ways_of_associativity = c->cache.ways; \ |
| 15 | leaf->size = c->cache.linesz * c->cache.sets * \ |
| 16 | c->cache.ways; \ |
| 17 | leaf++; \ |
| 18 | } while (0) |
| 19 | |
| 20 | static int __init_cache_level(unsigned int cpu) |
| 21 | { |
| 22 | struct cpuinfo_mips *c = ¤t_cpu_data; |
| 23 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
| 24 | int levels = 0, leaves = 0; |
| 25 | |
| 26 | /* |
| 27 | * If Dcache is not set, we assume the cache structures |
| 28 | * are not properly initialized. |
| 29 | */ |
| 30 | if (c->dcache.waysize) |
| 31 | levels += 1; |
| 32 | else |
| 33 | return -ENOENT; |
| 34 | |
| 35 | |
| 36 | leaves += (c->icache.waysize) ? 2 : 1; |
| 37 | |
| 38 | if (c->scache.waysize) { |
| 39 | levels++; |
| 40 | leaves++; |
| 41 | } |
| 42 | |
| 43 | if (c->tcache.waysize) { |
| 44 | levels++; |
| 45 | leaves++; |
| 46 | } |
| 47 | |
| 48 | this_cpu_ci->num_levels = levels; |
| 49 | this_cpu_ci->num_leaves = leaves; |
| 50 | return 0; |
| 51 | } |
| 52 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 53 | static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map) |
| 54 | { |
| 55 | int cpu1; |
| 56 | |
| 57 | for_each_possible_cpu(cpu1) |
| 58 | if (cpus_are_siblings(cpu, cpu1)) |
| 59 | cpumask_set_cpu(cpu1, cpu_map); |
| 60 | } |
| 61 | |
| 62 | static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map) |
| 63 | { |
| 64 | int cpu1; |
| 65 | int cluster = cpu_cluster(&cpu_data[cpu]); |
| 66 | |
| 67 | for_each_possible_cpu(cpu1) |
| 68 | if (cpu_cluster(&cpu_data[cpu1]) == cluster) |
| 69 | cpumask_set_cpu(cpu1, cpu_map); |
| 70 | } |
| 71 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 72 | static int __populate_cache_leaves(unsigned int cpu) |
| 73 | { |
| 74 | struct cpuinfo_mips *c = ¤t_cpu_data; |
| 75 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
| 76 | struct cacheinfo *this_leaf = this_cpu_ci->info_list; |
| 77 | |
| 78 | if (c->icache.waysize) { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 79 | /* L1 caches are per core */ |
| 80 | fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 81 | populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 82 | fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 83 | populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST); |
| 84 | } else { |
| 85 | populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED); |
| 86 | } |
| 87 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 88 | if (c->scache.waysize) { |
| 89 | /* L2 cache is per cluster */ |
| 90 | fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 91 | populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 92 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 93 | |
| 94 | if (c->tcache.waysize) |
| 95 | populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED); |
| 96 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 97 | this_cpu_ci->cpu_map_populated = true; |
| 98 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 99 | return 0; |
| 100 | } |
| 101 | |
| 102 | DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level) |
| 103 | DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves) |