Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * TSC frequency enumeration via MSR |
| 4 | * |
| 5 | * Copyright (C) 2013, 2018 Intel Corporation |
| 6 | * Author: Bin Gao <bin.gao@intel.com> |
| 7 | */ |
| 8 | |
| 9 | #include <linux/kernel.h> |
| 10 | |
| 11 | #include <asm/apic.h> |
| 12 | #include <asm/cpu_device_id.h> |
| 13 | #include <asm/intel-family.h> |
| 14 | #include <asm/msr.h> |
| 15 | #include <asm/param.h> |
| 16 | #include <asm/tsc.h> |
| 17 | |
| 18 | #define MAX_NUM_FREQS 9 |
| 19 | |
| 20 | /* |
| 21 | * If MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be |
| 22 | * read in MSR_PLATFORM_ID[12:8], otherwise in MSR_PERF_STAT[44:40]. |
| 23 | * Unfortunately some Intel Atom SoCs aren't quite compliant to this, |
| 24 | * so we need manually differentiate SoC families. This is what the |
| 25 | * field msr_plat does. |
| 26 | */ |
| 27 | struct freq_desc { |
| 28 | u8 msr_plat; /* 1: use MSR_PLATFORM_INFO, 0: MSR_IA32_PERF_STATUS */ |
| 29 | u32 freqs[MAX_NUM_FREQS]; |
| 30 | }; |
| 31 | |
| 32 | /* |
| 33 | * Penwell and Clovertrail use spread spectrum clock, |
| 34 | * so the freq number is not exactly the same as reported |
| 35 | * by MSR based on SDM. |
| 36 | */ |
| 37 | static const struct freq_desc freq_desc_pnw = { |
| 38 | 0, { 0, 0, 0, 0, 0, 99840, 0, 83200 } |
| 39 | }; |
| 40 | |
| 41 | static const struct freq_desc freq_desc_clv = { |
| 42 | 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200 } |
| 43 | }; |
| 44 | |
| 45 | static const struct freq_desc freq_desc_byt = { |
| 46 | 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0 } |
| 47 | }; |
| 48 | |
| 49 | static const struct freq_desc freq_desc_cht = { |
| 50 | 1, { 83300, 100000, 133300, 116700, 80000, 93300, 90000, 88900, 87500 } |
| 51 | }; |
| 52 | |
| 53 | static const struct freq_desc freq_desc_tng = { |
| 54 | 1, { 0, 100000, 133300, 0, 0, 0, 0, 0 } |
| 55 | }; |
| 56 | |
| 57 | static const struct freq_desc freq_desc_ann = { |
| 58 | 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 } |
| 59 | }; |
| 60 | |
| 61 | static const struct x86_cpu_id tsc_msr_cpu_ids[] = { |
| 62 | INTEL_CPU_FAM6(ATOM_PENWELL, freq_desc_pnw), |
| 63 | INTEL_CPU_FAM6(ATOM_CLOVERVIEW, freq_desc_clv), |
| 64 | INTEL_CPU_FAM6(ATOM_SILVERMONT1, freq_desc_byt), |
| 65 | INTEL_CPU_FAM6(ATOM_AIRMONT, freq_desc_cht), |
| 66 | INTEL_CPU_FAM6(ATOM_MERRIFIELD, freq_desc_tng), |
| 67 | INTEL_CPU_FAM6(ATOM_MOOREFIELD, freq_desc_ann), |
| 68 | {} |
| 69 | }; |
| 70 | |
| 71 | /* |
| 72 | * MSR-based CPU/TSC frequency discovery for certain CPUs. |
| 73 | * |
| 74 | * Set global "lapic_timer_frequency" to bus_clock_cycles/jiffy |
| 75 | * Return processor base frequency in KHz, or 0 on failure. |
| 76 | */ |
| 77 | unsigned long cpu_khz_from_msr(void) |
| 78 | { |
| 79 | u32 lo, hi, ratio, freq; |
| 80 | const struct freq_desc *freq_desc; |
| 81 | const struct x86_cpu_id *id; |
| 82 | unsigned long res; |
| 83 | |
| 84 | id = x86_match_cpu(tsc_msr_cpu_ids); |
| 85 | if (!id) |
| 86 | return 0; |
| 87 | |
| 88 | freq_desc = (struct freq_desc *)id->driver_data; |
| 89 | if (freq_desc->msr_plat) { |
| 90 | rdmsr(MSR_PLATFORM_INFO, lo, hi); |
| 91 | ratio = (lo >> 8) & 0xff; |
| 92 | } else { |
| 93 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); |
| 94 | ratio = (hi >> 8) & 0x1f; |
| 95 | } |
| 96 | |
| 97 | /* Get FSB FREQ ID */ |
| 98 | rdmsr(MSR_FSB_FREQ, lo, hi); |
| 99 | |
| 100 | /* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */ |
| 101 | freq = freq_desc->freqs[lo & 0x7]; |
| 102 | |
| 103 | /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */ |
| 104 | res = freq * ratio; |
| 105 | |
| 106 | #ifdef CONFIG_X86_LOCAL_APIC |
| 107 | lapic_timer_frequency = (freq * 1000) / HZ; |
| 108 | #endif |
| 109 | |
| 110 | /* |
| 111 | * TSC frequency determined by MSR is always considered "known" |
| 112 | * because it is reported by HW. |
| 113 | * Another fact is that on MSR capable platforms, PIT/HPET is |
| 114 | * generally not available so calibration won't work at all. |
| 115 | */ |
| 116 | setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); |
| 117 | |
| 118 | /* |
| 119 | * Unfortunately there is no way for hardware to tell whether the |
| 120 | * TSC is reliable. We were told by silicon design team that TSC |
| 121 | * on Atom SoCs are always "reliable". TSC is also the only |
| 122 | * reliable clocksource on these SoCs (HPET is either not present |
| 123 | * or not functional) so mark TSC reliable which removes the |
| 124 | * requirement for a watchdog clocksource. |
| 125 | */ |
| 126 | setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE); |
| 127 | |
| 128 | return res; |
| 129 | } |