blob: 62e9a982adaf92bad1394142ff58d99d74f81f75 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Hygon Processor Support for Linux
4 *
5 * Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd.
6 *
7 * Author: Pu Wen <puwen@hygon.cn>
8 */
9#include <linux/io.h>
10
11#include <asm/cpu.h>
12#include <asm/smp.h>
13#include <asm/cacheinfo.h>
14#include <asm/spec-ctrl.h>
15#include <asm/delay.h>
16#ifdef CONFIG_X86_64
17# include <asm/set_memory.h>
18#endif
19
20#include "cpu.h"
21
22#define APICID_SOCKET_ID_BIT 6
23
24/*
25 * nodes_per_socket: Stores the number of nodes per socket.
26 * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
27 */
28static u32 nodes_per_socket = 1;
29
30#ifdef CONFIG_NUMA
31/*
32 * To workaround broken NUMA config. Read the comment in
33 * srat_detect_node().
34 */
35static int nearby_node(int apicid)
36{
37 int i, node;
38
39 for (i = apicid - 1; i >= 0; i--) {
40 node = __apicid_to_node[i];
41 if (node != NUMA_NO_NODE && node_online(node))
42 return node;
43 }
44 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
45 node = __apicid_to_node[i];
46 if (node != NUMA_NO_NODE && node_online(node))
47 return node;
48 }
49 return first_node(node_online_map); /* Shouldn't happen */
50}
51#endif
52
53static void hygon_get_topology_early(struct cpuinfo_x86 *c)
54{
55 if (cpu_has(c, X86_FEATURE_TOPOEXT))
56 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
57}
58
59/*
60 * Fixup core topology information for
61 * (1) Hygon multi-node processors
62 * Assumption: Number of cores in each internal node is the same.
63 * (2) Hygon processors supporting compute units
64 */
65static void hygon_get_topology(struct cpuinfo_x86 *c)
66{
David Brazdil0f672f62019-12-10 10:32:29 +000067 int cpu = smp_processor_id();
68
69 /* get information required for multi-node processors */
70 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
71 int err;
72 u32 eax, ebx, ecx, edx;
73
74 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
75
Olivier Deprez0e641232021-09-23 10:07:05 +020076 c->cpu_die_id = ecx & 0xff;
David Brazdil0f672f62019-12-10 10:32:29 +000077
78 c->cpu_core_id = ebx & 0xff;
79
80 if (smp_num_siblings > 1)
81 c->x86_max_cores /= smp_num_siblings;
82
83 /*
84 * In case leaf B is available, use it to derive
85 * topology information.
86 */
87 err = detect_extended_topology(c);
88 if (!err)
89 c->x86_coreid_bits = get_count_order(c->x86_max_cores);
90
91 /* Socket ID is ApicId[6] for these processors. */
92 c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
93
Olivier Deprez0e641232021-09-23 10:07:05 +020094 cacheinfo_hygon_init_llc_id(c, cpu);
David Brazdil0f672f62019-12-10 10:32:29 +000095 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
96 u64 value;
97
98 rdmsrl(MSR_FAM10H_NODE_ID, value);
Olivier Deprez0e641232021-09-23 10:07:05 +020099 c->cpu_die_id = value & 7;
David Brazdil0f672f62019-12-10 10:32:29 +0000100
Olivier Deprez0e641232021-09-23 10:07:05 +0200101 per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
David Brazdil0f672f62019-12-10 10:32:29 +0000102 } else
103 return;
104
105 if (nodes_per_socket > 1)
106 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
107}
108
109/*
110 * On Hygon setup the lower bits of the APIC id distinguish the cores.
111 * Assumes number of cores is a power of two.
112 */
113static void hygon_detect_cmp(struct cpuinfo_x86 *c)
114{
115 unsigned int bits;
116 int cpu = smp_processor_id();
117
118 bits = c->x86_coreid_bits;
119 /* Low order bits define the core id (index of core in socket) */
120 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
121 /* Convert the initial APIC ID into the socket ID */
122 c->phys_proc_id = c->initial_apicid >> bits;
123 /* use socket ID also for last level cache */
Olivier Deprez0e641232021-09-23 10:07:05 +0200124 per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
David Brazdil0f672f62019-12-10 10:32:29 +0000125}
126
127static void srat_detect_node(struct cpuinfo_x86 *c)
128{
129#ifdef CONFIG_NUMA
130 int cpu = smp_processor_id();
131 int node;
132 unsigned int apicid = c->apicid;
133
134 node = numa_cpu_node(cpu);
135 if (node == NUMA_NO_NODE)
136 node = per_cpu(cpu_llc_id, cpu);
137
138 /*
139 * On multi-fabric platform (e.g. Numascale NumaChip) a
140 * platform-specific handler needs to be called to fixup some
141 * IDs of the CPU.
142 */
143 if (x86_cpuinit.fixup_cpu_id)
144 x86_cpuinit.fixup_cpu_id(c, node);
145
146 if (!node_online(node)) {
147 /*
148 * Two possibilities here:
149 *
150 * - The CPU is missing memory and no node was created. In
151 * that case try picking one from a nearby CPU.
152 *
153 * - The APIC IDs differ from the HyperTransport node IDs.
154 * Assume they are all increased by a constant offset, but
155 * in the same order as the HT nodeids. If that doesn't
156 * result in a usable node fall back to the path for the
157 * previous case.
158 *
159 * This workaround operates directly on the mapping between
160 * APIC ID and NUMA node, assuming certain relationship
161 * between APIC ID, HT node ID and NUMA topology. As going
162 * through CPU mapping may alter the outcome, directly
163 * access __apicid_to_node[].
164 */
165 int ht_nodeid = c->initial_apicid;
166
167 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
168 node = __apicid_to_node[ht_nodeid];
169 /* Pick a nearby node */
170 if (!node_online(node))
171 node = nearby_node(apicid);
172 }
173 numa_set_node(cpu, node);
174#endif
175}
176
177static void early_init_hygon_mc(struct cpuinfo_x86 *c)
178{
179#ifdef CONFIG_SMP
180 unsigned int bits, ecx;
181
182 /* Multi core CPU? */
183 if (c->extended_cpuid_level < 0x80000008)
184 return;
185
186 ecx = cpuid_ecx(0x80000008);
187
188 c->x86_max_cores = (ecx & 0xff) + 1;
189
190 /* CPU telling us the core id bits shift? */
191 bits = (ecx >> 12) & 0xF;
192
193 /* Otherwise recompute */
194 if (bits == 0) {
195 while ((1 << bits) < c->x86_max_cores)
196 bits++;
197 }
198
199 c->x86_coreid_bits = bits;
200#endif
201}
202
203static void bsp_init_hygon(struct cpuinfo_x86 *c)
204{
205#ifdef CONFIG_X86_64
206 unsigned long long tseg;
207
208 /*
209 * Split up direct mapping around the TSEG SMM area.
210 * Don't do it for gbpages because there seems very little
211 * benefit in doing so.
212 */
213 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
214 unsigned long pfn = tseg >> PAGE_SHIFT;
215
216 pr_debug("tseg: %010llx\n", tseg);
217 if (pfn_range_is_mapped(pfn, pfn + 1))
218 set_memory_4k((unsigned long)__va(tseg), 1);
219 }
220#endif
221
222 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
223 u64 val;
224
225 rdmsrl(MSR_K7_HWCR, val);
226 if (!(val & BIT(24)))
227 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
228 }
229
230 if (cpu_has(c, X86_FEATURE_MWAITX))
231 use_mwaitx_delay();
232
233 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
234 u32 ecx;
235
236 ecx = cpuid_ecx(0x8000001e);
237 nodes_per_socket = ((ecx >> 8) & 7) + 1;
238 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
239 u64 value;
240
241 rdmsrl(MSR_FAM10H_NODE_ID, value);
242 nodes_per_socket = ((value >> 3) & 7) + 1;
243 }
244
245 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
246 !boot_cpu_has(X86_FEATURE_VIRT_SSBD)) {
247 /*
248 * Try to cache the base value so further operations can
249 * avoid RMW. If that faults, do not enable SSBD.
250 */
251 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
252 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
253 setup_force_cpu_cap(X86_FEATURE_SSBD);
254 x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;
255 }
256 }
257}
258
259static void early_init_hygon(struct cpuinfo_x86 *c)
260{
261 u32 dummy;
262
263 early_init_hygon_mc(c);
264
265 set_cpu_cap(c, X86_FEATURE_K8);
266
267 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
268
269 /*
270 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
271 * with P/T states and does not stop in deep C-states
272 */
273 if (c->x86_power & (1 << 8)) {
274 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
275 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
276 }
277
278 /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
279 if (c->x86_power & BIT(12))
280 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
281
282#ifdef CONFIG_X86_64
283 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
284#endif
285
286#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
287 /*
288 * ApicID can always be treated as an 8-bit value for Hygon APIC So, we
289 * can safely set X86_FEATURE_EXTD_APICID unconditionally.
290 */
291 if (boot_cpu_has(X86_FEATURE_APIC))
292 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
293#endif
294
295 /*
296 * This is only needed to tell the kernel whether to use VMCALL
297 * and VMMCALL. VMMCALL is never executed except under virt, so
298 * we can set it unconditionally.
299 */
300 set_cpu_cap(c, X86_FEATURE_VMMCALL);
301
302 hygon_get_topology_early(c);
303}
304
305static void init_hygon(struct cpuinfo_x86 *c)
306{
307 early_init_hygon(c);
308
309 /*
310 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
311 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
312 */
313 clear_cpu_cap(c, 0*32+31);
314
315 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
316
317 /* get apicid instead of initial apic id from cpuid */
318 c->apicid = hard_smp_processor_id();
319
320 set_cpu_cap(c, X86_FEATURE_ZEN);
321 set_cpu_cap(c, X86_FEATURE_CPB);
322
323 cpu_detect_cache_sizes(c);
324
325 hygon_detect_cmp(c);
326 hygon_get_topology(c);
327 srat_detect_node(c);
328
329 init_hygon_cacheinfo(c);
330
331 if (cpu_has(c, X86_FEATURE_XMM2)) {
332 /*
333 * Use LFENCE for execution serialization. On families which
334 * don't have that MSR, LFENCE is already serializing.
335 * msr_set_bit() uses the safe accessors, too, even if the MSR
336 * is not present.
337 */
338 msr_set_bit(MSR_F10H_DECFG,
339 MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
340
341 /* A serializing LFENCE stops RDTSC speculation */
342 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
343 }
344
345 /*
346 * Hygon processors have APIC timer running in deep C states.
347 */
348 set_cpu_cap(c, X86_FEATURE_ARAT);
349
350 /* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */
351 if (!cpu_has(c, X86_FEATURE_XENPV))
352 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
353}
354
355static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c)
356{
357 u32 ebx, eax, ecx, edx;
358 u16 mask = 0xfff;
359
360 if (c->extended_cpuid_level < 0x80000006)
361 return;
362
363 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
364
365 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
366 tlb_lli_4k[ENTRIES] = ebx & mask;
367
368 /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
369 if (!((eax >> 16) & mask))
370 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
371 else
372 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
373
374 /* a 4M entry uses two 2M entries */
375 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
376
377 /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
378 if (!(eax & mask)) {
379 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
380 tlb_lli_2m[ENTRIES] = eax & 0xff;
381 } else
382 tlb_lli_2m[ENTRIES] = eax & mask;
383
384 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
385}
386
387static const struct cpu_dev hygon_cpu_dev = {
388 .c_vendor = "Hygon",
389 .c_ident = { "HygonGenuine" },
390 .c_early_init = early_init_hygon,
391 .c_detect_tlb = cpu_detect_tlb_hygon,
392 .c_bsp_init = bsp_init_hygon,
393 .c_init = init_hygon,
394 .c_x86_vendor = X86_VENDOR_HYGON,
395};
396
397cpu_dev_register(hygon_cpu_dev);