blob: 18b320a06fe5625a639d5ee54ec80b322585f781 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
3 * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
4 *
5 * Based on arm64 and arc implementations
6 * Copyright (C) 2013 ARM Ltd.
7 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
8 *
9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied.
12 */
13
14#include <linux/smp.h>
15#include <linux/cpu.h>
16#include <linux/sched.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020017#include <linux/sched/mm.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018#include <linux/irq.h>
19#include <asm/cpuinfo.h>
20#include <asm/mmu_context.h>
21#include <asm/tlbflush.h>
22#include <asm/cacheflush.h>
23#include <asm/time.h>
24
25static void (*smp_cross_call)(const struct cpumask *, unsigned int);
26
27unsigned long secondary_release = -1;
28struct thread_info *secondary_thread_info;
29
30enum ipi_msg_type {
31 IPI_WAKEUP,
32 IPI_RESCHEDULE,
33 IPI_CALL_FUNC,
34 IPI_CALL_FUNC_SINGLE,
35};
36
37static DEFINE_SPINLOCK(boot_lock);
38
39static void boot_secondary(unsigned int cpu, struct task_struct *idle)
40{
41 /*
42 * set synchronisation state between this boot processor
43 * and the secondary one
44 */
45 spin_lock(&boot_lock);
46
47 secondary_release = cpu;
48 smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
49
50 /*
51 * now the secondary core is starting up let it run its
52 * calibrations, then wait for it to finish
53 */
54 spin_unlock(&boot_lock);
55}
56
57void __init smp_prepare_boot_cpu(void)
58{
59}
60
61void __init smp_init_cpus(void)
62{
63 int i;
64
65 for (i = 0; i < NR_CPUS; i++)
66 set_cpu_possible(i, true);
67}
68
69void __init smp_prepare_cpus(unsigned int max_cpus)
70{
71 int i;
72
73 /*
74 * Initialise the present map, which describes the set of CPUs
75 * actually populated at the present time.
76 */
77 for (i = 0; i < max_cpus; i++)
78 set_cpu_present(i, true);
79}
80
81void __init smp_cpus_done(unsigned int max_cpus)
82{
83}
84
85static DECLARE_COMPLETION(cpu_running);
86
87int __cpu_up(unsigned int cpu, struct task_struct *idle)
88{
89 if (smp_cross_call == NULL) {
90 pr_warn("CPU%u: failed to start, IPI controller missing",
91 cpu);
92 return -EIO;
93 }
94
95 secondary_thread_info = task_thread_info(idle);
96 current_pgd[cpu] = init_mm.pgd;
97
98 boot_secondary(cpu, idle);
99 if (!wait_for_completion_timeout(&cpu_running,
100 msecs_to_jiffies(1000))) {
101 pr_crit("CPU%u: failed to start\n", cpu);
102 return -EIO;
103 }
104 synchronise_count_master(cpu);
105
106 return 0;
107}
108
109asmlinkage __init void secondary_start_kernel(void)
110{
111 struct mm_struct *mm = &init_mm;
112 unsigned int cpu = smp_processor_id();
113 /*
114 * All kernel threads share the same mm context; grab a
115 * reference and switch to it.
116 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200117 mmgrab(mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118 current->active_mm = mm;
119 cpumask_set_cpu(cpu, mm_cpumask(mm));
120
121 pr_info("CPU%u: Booted secondary processor\n", cpu);
122
123 setup_cpuinfo();
124 openrisc_clockevent_init();
125
126 notify_cpu_starting(cpu);
127
128 /*
129 * OK, now it's safe to let the boot CPU continue
130 */
131 complete(&cpu_running);
132
133 synchronise_count_slave(cpu);
134 set_cpu_online(cpu, true);
135
136 local_irq_enable();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000137 /*
138 * OK, it's off to the idle thread for us
139 */
140 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
141}
142
143void handle_IPI(unsigned int ipi_msg)
144{
145 unsigned int cpu = smp_processor_id();
146
147 switch (ipi_msg) {
148 case IPI_WAKEUP:
149 break;
150
151 case IPI_RESCHEDULE:
152 scheduler_ipi();
153 break;
154
155 case IPI_CALL_FUNC:
156 generic_smp_call_function_interrupt();
157 break;
158
159 case IPI_CALL_FUNC_SINGLE:
160 generic_smp_call_function_single_interrupt();
161 break;
162
163 default:
164 WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
165 break;
166 }
167}
168
169void smp_send_reschedule(int cpu)
170{
171 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
172}
173
174static void stop_this_cpu(void *dummy)
175{
176 /* Remove this CPU */
177 set_cpu_online(smp_processor_id(), false);
178
179 local_irq_disable();
180 /* CPU Doze */
181 if (mfspr(SPR_UPR) & SPR_UPR_PMP)
182 mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
183 /* If that didn't work, infinite loop */
184 while (1)
185 ;
186}
187
188void smp_send_stop(void)
189{
190 smp_call_function(stop_this_cpu, NULL, 0);
191}
192
193/* not supported, yet */
194int setup_profiling_timer(unsigned int multiplier)
195{
196 return -EINVAL;
197}
198
199void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
200{
201 smp_cross_call = fn;
202}
203
204void arch_send_call_function_single_ipi(int cpu)
205{
206 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
207}
208
209void arch_send_call_function_ipi_mask(const struct cpumask *mask)
210{
211 smp_cross_call(mask, IPI_CALL_FUNC);
212}
213
214/* TLB flush operations - Performed on each CPU*/
215static inline void ipi_flush_tlb_all(void *ignored)
216{
217 local_flush_tlb_all();
218}
219
Olivier Deprez157378f2022-04-04 15:47:50 +0200220static inline void ipi_flush_tlb_mm(void *info)
221{
222 struct mm_struct *mm = (struct mm_struct *)info;
223
224 local_flush_tlb_mm(mm);
225}
226
227static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm)
228{
229 unsigned int cpuid;
230
231 if (cpumask_empty(cmask))
232 return;
233
234 cpuid = get_cpu();
235
236 if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
237 /* local cpu is the only cpu present in cpumask */
238 local_flush_tlb_mm(mm);
239 } else {
240 on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1);
241 }
242 put_cpu();
243}
244
245struct flush_tlb_data {
246 unsigned long addr1;
247 unsigned long addr2;
248};
249
250static inline void ipi_flush_tlb_page(void *info)
251{
252 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
253
254 local_flush_tlb_page(NULL, fd->addr1);
255}
256
257static inline void ipi_flush_tlb_range(void *info)
258{
259 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
260
261 local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
262}
263
264static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start,
265 unsigned long end)
266{
267 unsigned int cpuid;
268
269 if (cpumask_empty(cmask))
270 return;
271
272 cpuid = get_cpu();
273
274 if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
275 /* local cpu is the only cpu present in cpumask */
276 if ((end - start) <= PAGE_SIZE)
277 local_flush_tlb_page(NULL, start);
278 else
279 local_flush_tlb_range(NULL, start, end);
280 } else {
281 struct flush_tlb_data fd;
282
283 fd.addr1 = start;
284 fd.addr2 = end;
285
286 if ((end - start) <= PAGE_SIZE)
287 on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1);
288 else
289 on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1);
290 }
291 put_cpu();
292}
293
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000294void flush_tlb_all(void)
295{
296 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
297}
298
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000299void flush_tlb_mm(struct mm_struct *mm)
300{
Olivier Deprez157378f2022-04-04 15:47:50 +0200301 smp_flush_tlb_mm(mm_cpumask(mm), mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000302}
303
304void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
305{
Olivier Deprez157378f2022-04-04 15:47:50 +0200306 smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000307}
308
309void flush_tlb_range(struct vm_area_struct *vma,
310 unsigned long start, unsigned long end)
311{
Olivier Deprez157378f2022-04-04 15:47:50 +0200312 const struct cpumask *cmask = vma ? mm_cpumask(vma->vm_mm)
313 : cpu_online_mask;
314 smp_flush_tlb_range(cmask, start, end);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000315}
316
317/* Instruction cache invalidate - performed on each cpu */
318static void ipi_icache_page_inv(void *arg)
319{
320 struct page *page = arg;
321
322 local_icache_page_inv(page);
323}
324
325void smp_icache_page_inv(struct page *page)
326{
327 on_each_cpu(ipi_icache_page_inv, page, 1);
328}
329EXPORT_SYMBOL(smp_icache_page_inv);