David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | #include <linux/module.h> |
| 4 | #include <linux/init.h> |
| 5 | #include <linux/kernel.h> |
| 6 | #include <linux/mm.h> |
| 7 | #include <linux/sched.h> |
| 8 | #include <linux/kernel_stat.h> |
| 9 | #include <linux/notifier.h> |
| 10 | #include <linux/cpu.h> |
| 11 | #include <linux/percpu.h> |
| 12 | #include <linux/delay.h> |
| 13 | #include <linux/err.h> |
| 14 | #include <linux/irq.h> |
| 15 | #include <linux/irqdomain.h> |
| 16 | #include <linux/of.h> |
| 17 | #include <linux/sched/task_stack.h> |
| 18 | #include <linux/sched/mm.h> |
| 19 | #include <linux/sched/hotplug.h> |
| 20 | #include <asm/irq.h> |
| 21 | #include <asm/traps.h> |
| 22 | #include <asm/sections.h> |
| 23 | #include <asm/mmu_context.h> |
| 24 | #include <asm/pgalloc.h> |
| 25 | |
| 26 | struct ipi_data_struct { |
| 27 | unsigned long bits ____cacheline_aligned; |
| 28 | }; |
| 29 | static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data); |
| 30 | |
| 31 | enum ipi_message_type { |
| 32 | IPI_EMPTY, |
| 33 | IPI_RESCHEDULE, |
| 34 | IPI_CALL_FUNC, |
| 35 | IPI_MAX |
| 36 | }; |
| 37 | |
| 38 | static irqreturn_t handle_ipi(int irq, void *dev) |
| 39 | { |
| 40 | while (true) { |
| 41 | unsigned long ops; |
| 42 | |
| 43 | ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0); |
| 44 | if (ops == 0) |
| 45 | return IRQ_HANDLED; |
| 46 | |
| 47 | if (ops & (1 << IPI_RESCHEDULE)) |
| 48 | scheduler_ipi(); |
| 49 | |
| 50 | if (ops & (1 << IPI_CALL_FUNC)) |
| 51 | generic_smp_call_function_interrupt(); |
| 52 | |
| 53 | BUG_ON((ops >> IPI_MAX) != 0); |
| 54 | } |
| 55 | |
| 56 | return IRQ_HANDLED; |
| 57 | } |
| 58 | |
| 59 | static void (*send_arch_ipi)(const struct cpumask *mask); |
| 60 | |
| 61 | static int ipi_irq; |
| 62 | void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq) |
| 63 | { |
| 64 | if (send_arch_ipi) |
| 65 | return; |
| 66 | |
| 67 | send_arch_ipi = func; |
| 68 | ipi_irq = irq; |
| 69 | } |
| 70 | |
| 71 | static void |
| 72 | send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) |
| 73 | { |
| 74 | int i; |
| 75 | |
| 76 | for_each_cpu(i, to_whom) |
| 77 | set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits); |
| 78 | |
| 79 | smp_mb(); |
| 80 | send_arch_ipi(to_whom); |
| 81 | } |
| 82 | |
| 83 | void arch_send_call_function_ipi_mask(struct cpumask *mask) |
| 84 | { |
| 85 | send_ipi_message(mask, IPI_CALL_FUNC); |
| 86 | } |
| 87 | |
| 88 | void arch_send_call_function_single_ipi(int cpu) |
| 89 | { |
| 90 | send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); |
| 91 | } |
| 92 | |
| 93 | static void ipi_stop(void *unused) |
| 94 | { |
| 95 | while (1); |
| 96 | } |
| 97 | |
| 98 | void smp_send_stop(void) |
| 99 | { |
| 100 | on_each_cpu(ipi_stop, NULL, 1); |
| 101 | } |
| 102 | |
| 103 | void smp_send_reschedule(int cpu) |
| 104 | { |
| 105 | send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); |
| 106 | } |
| 107 | |
| 108 | void __init smp_prepare_boot_cpu(void) |
| 109 | { |
| 110 | } |
| 111 | |
| 112 | void __init smp_prepare_cpus(unsigned int max_cpus) |
| 113 | { |
| 114 | } |
| 115 | |
| 116 | static int ipi_dummy_dev; |
| 117 | |
| 118 | void __init setup_smp_ipi(void) |
| 119 | { |
| 120 | int rc; |
| 121 | |
| 122 | if (ipi_irq == 0) |
| 123 | panic("%s IRQ mapping failed\n", __func__); |
| 124 | |
| 125 | rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt", |
| 126 | &ipi_dummy_dev); |
| 127 | if (rc) |
| 128 | panic("%s IRQ request failed\n", __func__); |
| 129 | |
| 130 | enable_percpu_irq(ipi_irq, 0); |
| 131 | } |
| 132 | |
| 133 | void __init setup_smp(void) |
| 134 | { |
| 135 | struct device_node *node = NULL; |
| 136 | int cpu; |
| 137 | |
| 138 | for_each_of_cpu_node(node) { |
| 139 | if (!of_device_is_available(node)) |
| 140 | continue; |
| 141 | |
| 142 | if (of_property_read_u32(node, "reg", &cpu)) |
| 143 | continue; |
| 144 | |
| 145 | if (cpu >= NR_CPUS) |
| 146 | continue; |
| 147 | |
| 148 | set_cpu_possible(cpu, true); |
| 149 | set_cpu_present(cpu, true); |
| 150 | } |
| 151 | } |
| 152 | |
| 153 | extern void _start_smp_secondary(void); |
| 154 | |
| 155 | volatile unsigned int secondary_hint; |
| 156 | volatile unsigned int secondary_ccr; |
| 157 | volatile unsigned int secondary_stack; |
| 158 | |
| 159 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) |
| 160 | { |
| 161 | unsigned long mask = 1 << cpu; |
| 162 | |
| 163 | secondary_stack = |
| 164 | (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8; |
| 165 | secondary_hint = mfcr("cr31"); |
| 166 | secondary_ccr = mfcr("cr18"); |
| 167 | |
| 168 | /* |
| 169 | * Because other CPUs are in reset status, we must flush data |
| 170 | * from cache to out and secondary CPUs use them in |
| 171 | * csky_start_secondary(void) |
| 172 | */ |
| 173 | mtcr("cr17", 0x22); |
| 174 | |
| 175 | if (mask & mfcr("cr<29, 0>")) { |
| 176 | send_arch_ipi(cpumask_of(cpu)); |
| 177 | } else { |
| 178 | /* Enable cpu in SMP reset ctrl reg */ |
| 179 | mask |= mfcr("cr<29, 0>"); |
| 180 | mtcr("cr<29, 0>", mask); |
| 181 | } |
| 182 | |
| 183 | /* Wait for the cpu online */ |
| 184 | while (!cpu_online(cpu)); |
| 185 | |
| 186 | secondary_stack = 0; |
| 187 | |
| 188 | return 0; |
| 189 | } |
| 190 | |
| 191 | void __init smp_cpus_done(unsigned int max_cpus) |
| 192 | { |
| 193 | } |
| 194 | |
| 195 | int setup_profiling_timer(unsigned int multiplier) |
| 196 | { |
| 197 | return -EINVAL; |
| 198 | } |
| 199 | |
| 200 | void csky_start_secondary(void) |
| 201 | { |
| 202 | struct mm_struct *mm = &init_mm; |
| 203 | unsigned int cpu = smp_processor_id(); |
| 204 | |
| 205 | mtcr("cr31", secondary_hint); |
| 206 | mtcr("cr18", secondary_ccr); |
| 207 | |
| 208 | mtcr("vbr", vec_base); |
| 209 | |
| 210 | flush_tlb_all(); |
| 211 | write_mmu_pagemask(0); |
| 212 | TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); |
| 213 | TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir); |
| 214 | |
| 215 | #ifdef CONFIG_CPU_HAS_FPU |
| 216 | init_fpu(); |
| 217 | #endif |
| 218 | |
| 219 | enable_percpu_irq(ipi_irq, 0); |
| 220 | |
| 221 | mmget(mm); |
| 222 | mmgrab(mm); |
| 223 | current->active_mm = mm; |
| 224 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
| 225 | |
| 226 | notify_cpu_starting(cpu); |
| 227 | set_cpu_online(cpu, true); |
| 228 | |
| 229 | pr_info("CPU%u Online: %s...\n", cpu, __func__); |
| 230 | |
| 231 | local_irq_enable(); |
| 232 | preempt_disable(); |
| 233 | cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); |
| 234 | } |
| 235 | |
| 236 | #ifdef CONFIG_HOTPLUG_CPU |
| 237 | int __cpu_disable(void) |
| 238 | { |
| 239 | unsigned int cpu = smp_processor_id(); |
| 240 | |
| 241 | set_cpu_online(cpu, false); |
| 242 | |
| 243 | irq_migrate_all_off_this_cpu(); |
| 244 | |
| 245 | clear_tasks_mm_cpumask(cpu); |
| 246 | |
| 247 | return 0; |
| 248 | } |
| 249 | |
| 250 | void __cpu_die(unsigned int cpu) |
| 251 | { |
| 252 | if (!cpu_wait_death(cpu, 5)) { |
| 253 | pr_crit("CPU%u: shutdown failed\n", cpu); |
| 254 | return; |
| 255 | } |
| 256 | pr_notice("CPU%u: shutdown\n", cpu); |
| 257 | } |
| 258 | |
| 259 | void arch_cpu_idle_dead(void) |
| 260 | { |
| 261 | idle_task_exit(); |
| 262 | |
| 263 | cpu_report_death(); |
| 264 | |
| 265 | while (!secondary_stack) |
| 266 | arch_cpu_idle(); |
| 267 | |
| 268 | local_irq_disable(); |
| 269 | |
| 270 | asm volatile( |
| 271 | "mov sp, %0\n" |
| 272 | "mov r8, %0\n" |
| 273 | "jmpi csky_start_secondary" |
| 274 | : |
| 275 | : "r" (secondary_stack)); |
| 276 | } |
| 277 | #endif |