Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2 | #include <linux/thread_info.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3 | #include <asm/smp.h> |
| 4 | |
| 5 | #include <xen/events.h> |
| 6 | |
| 7 | #include "xen-ops.h" |
| 8 | #include "smp.h" |
| 9 | |
| 10 | |
| 11 | static void __init xen_hvm_smp_prepare_boot_cpu(void) |
| 12 | { |
| 13 | BUG_ON(smp_processor_id() != 0); |
| 14 | native_smp_prepare_boot_cpu(); |
| 15 | |
| 16 | /* |
| 17 | * Setup vcpu_info for boot CPU. Secondary CPUs get their vcpu_info |
| 18 | * in xen_cpu_up_prepare_hvm(). |
| 19 | */ |
| 20 | xen_vcpu_setup(0); |
| 21 | |
| 22 | /* |
| 23 | * The alternative logic (which patches the unlock/lock) runs before |
| 24 | * the smp bootup up code is activated. Hence we need to set this up |
| 25 | * the core kernel is being patched. Otherwise we will have only |
| 26 | * modules patched but not core code. |
| 27 | */ |
| 28 | xen_init_spinlocks(); |
| 29 | } |
| 30 | |
| 31 | static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) |
| 32 | { |
| 33 | int cpu; |
| 34 | |
| 35 | native_smp_prepare_cpus(max_cpus); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 36 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 37 | if (xen_have_vector_callback) { |
| 38 | WARN_ON(xen_smp_intr_init(0)); |
| 39 | xen_init_lock_cpu(0); |
| 40 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 41 | |
| 42 | for_each_possible_cpu(cpu) { |
| 43 | if (cpu == 0) |
| 44 | continue; |
| 45 | |
| 46 | /* Set default vcpu_id to make sure that we don't use cpu-0's */ |
| 47 | per_cpu(xen_vcpu_id, cpu) = XEN_VCPU_ID_INVALID; |
| 48 | } |
| 49 | } |
| 50 | |
| 51 | #ifdef CONFIG_HOTPLUG_CPU |
| 52 | static void xen_hvm_cpu_die(unsigned int cpu) |
| 53 | { |
| 54 | if (common_cpu_die(cpu) == 0) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 55 | if (xen_have_vector_callback) { |
| 56 | xen_smp_intr_free(cpu); |
| 57 | xen_uninit_lock_cpu(cpu); |
| 58 | xen_teardown_timer(cpu); |
| 59 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | } |
| 61 | } |
| 62 | #else |
| 63 | static void xen_hvm_cpu_die(unsigned int cpu) |
| 64 | { |
| 65 | BUG(); |
| 66 | } |
| 67 | #endif |
| 68 | |
| 69 | void __init xen_hvm_smp_init(void) |
| 70 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 71 | smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 72 | smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 73 | smp_ops.smp_cpus_done = xen_smp_cpus_done; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 74 | smp_ops.cpu_die = xen_hvm_cpu_die; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 75 | |
| 76 | if (!xen_have_vector_callback) { |
| 77 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
| 78 | nopvspin = true; |
| 79 | #endif |
| 80 | return; |
| 81 | } |
| 82 | |
| 83 | smp_ops.smp_send_reschedule = xen_smp_send_reschedule; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 84 | smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; |
| 85 | smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 86 | } |