1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * arch/sh/kernel/smp.c
4 *
5 * SMP support for the SuperH processors.
6 *
7 * Copyright (C) 2002 - 2010 Paul Mundt
8 * Copyright (C) 2006 - 2007 Akio Idehara
9 */
10 #include <linux/err.h>
11 #include <linux/cache.h>
12 #include <linux/cpumask.h>
13 #include <linux/delay.h>
14 #include <linux/init.h>
15 #include <linux/spinlock.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/cpu.h>
19 #include <linux/interrupt.h>
20 #include <linux/sched/mm.h>
21 #include <linux/sched/hotplug.h>
22 #include <linux/atomic.h>
23 #include <linux/clockchips.h>
24 #include <asm/processor.h>
25 #include <asm/mmu_context.h>
26 #include <asm/smp.h>
27 #include <asm/cacheflush.h>
28 #include <asm/sections.h>
29 #include <asm/setup.h>
30
31 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
32 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
33
34 struct plat_smp_ops *mp_ops = NULL;
35
36 /* State of each CPU */
37 DEFINE_PER_CPU(int, cpu_state) = { 0 };
38
register_smp_ops(struct plat_smp_ops * ops)39 void register_smp_ops(struct plat_smp_ops *ops)
40 {
41 if (mp_ops)
42 printk(KERN_WARNING "Overriding previously set SMP ops\n");
43
44 mp_ops = ops;
45 }
46
smp_store_cpu_info(unsigned int cpu)47 static inline void smp_store_cpu_info(unsigned int cpu)
48 {
49 struct sh_cpuinfo *c = cpu_data + cpu;
50
51 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
52
53 c->loops_per_jiffy = loops_per_jiffy;
54 }
55
smp_prepare_cpus(unsigned int max_cpus)56 void __init smp_prepare_cpus(unsigned int max_cpus)
57 {
58 unsigned int cpu = smp_processor_id();
59
60 init_new_context(current, &init_mm);
61 current_thread_info()->cpu = cpu;
62 mp_ops->prepare_cpus(max_cpus);
63
64 #ifndef CONFIG_HOTPLUG_CPU
65 init_cpu_present(cpu_possible_mask);
66 #endif
67 }
68
smp_prepare_boot_cpu(void)69 void __init smp_prepare_boot_cpu(void)
70 {
71 unsigned int cpu = smp_processor_id();
72
73 __cpu_number_map[0] = cpu;
74 __cpu_logical_map[0] = cpu;
75
76 set_cpu_online(cpu, true);
77 set_cpu_possible(cpu, true);
78
79 per_cpu(cpu_state, cpu) = CPU_ONLINE;
80 }
81
82 #ifdef CONFIG_HOTPLUG_CPU
native_cpu_die(unsigned int cpu)83 void native_cpu_die(unsigned int cpu)
84 {
85 unsigned int i;
86
87 for (i = 0; i < 10; i++) {
88 smp_rmb();
89 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
90 if (system_state == SYSTEM_RUNNING)
91 pr_info("CPU %u is now offline\n", cpu);
92
93 return;
94 }
95
96 msleep(100);
97 }
98
99 pr_err("CPU %u didn't die...\n", cpu);
100 }
101
native_cpu_disable(unsigned int cpu)102 int native_cpu_disable(unsigned int cpu)
103 {
104 return cpu == 0 ? -EPERM : 0;
105 }
106
play_dead_common(void)107 void play_dead_common(void)
108 {
109 idle_task_exit();
110 irq_ctx_exit(raw_smp_processor_id());
111 mb();
112
113 __this_cpu_write(cpu_state, CPU_DEAD);
114 local_irq_disable();
115 }
116
native_play_dead(void)117 void native_play_dead(void)
118 {
119 play_dead_common();
120 }
121
__cpu_disable(void)122 int __cpu_disable(void)
123 {
124 unsigned int cpu = smp_processor_id();
125 int ret;
126
127 ret = mp_ops->cpu_disable(cpu);
128 if (ret)
129 return ret;
130
131 /*
132 * Take this CPU offline. Once we clear this, we can't return,
133 * and we must not schedule until we're ready to give up the cpu.
134 */
135 set_cpu_online(cpu, false);
136
137 /*
138 * OK - migrate IRQs away from this CPU
139 */
140 migrate_irqs();
141
142 /*
143 * Flush user cache and TLB mappings, and then remove this CPU
144 * from the vm mask set of all processes.
145 */
146 flush_cache_all();
147 #ifdef CONFIG_MMU
148 local_flush_tlb_all();
149 #endif
150
151 clear_tasks_mm_cpumask(cpu);
152
153 return 0;
154 }
155 #else /* ... !CONFIG_HOTPLUG_CPU */
native_cpu_disable(unsigned int cpu)156 int native_cpu_disable(unsigned int cpu)
157 {
158 return -ENOSYS;
159 }
160
native_cpu_die(unsigned int cpu)161 void native_cpu_die(unsigned int cpu)
162 {
163 /* We said "no" in __cpu_disable */
164 BUG();
165 }
166
native_play_dead(void)167 void native_play_dead(void)
168 {
169 BUG();
170 }
171 #endif
172
start_secondary(void)173 asmlinkage void start_secondary(void)
174 {
175 unsigned int cpu = smp_processor_id();
176 struct mm_struct *mm = &init_mm;
177
178 enable_mmu();
179 mmgrab(mm);
180 mmget(mm);
181 current->active_mm = mm;
182 #ifdef CONFIG_MMU
183 enter_lazy_tlb(mm, current);
184 local_flush_tlb_all();
185 #endif
186
187 per_cpu_trap_init();
188
189 notify_cpu_starting(cpu);
190
191 local_irq_enable();
192
193 calibrate_delay();
194
195 smp_store_cpu_info(cpu);
196
197 set_cpu_online(cpu, true);
198 per_cpu(cpu_state, cpu) = CPU_ONLINE;
199
200 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
201 }
202
203 extern struct {
204 unsigned long sp;
205 unsigned long bss_start;
206 unsigned long bss_end;
207 void *start_kernel_fn;
208 void *cpu_init_fn;
209 void *thread_info;
210 } stack_start;
211
__cpu_up(unsigned int cpu,struct task_struct * tsk)212 int __cpu_up(unsigned int cpu, struct task_struct *tsk)
213 {
214 unsigned long timeout;
215
216 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
217
218 /* Fill in data in head.S for secondary cpus */
219 stack_start.sp = tsk->thread.sp;
220 stack_start.thread_info = tsk->stack;
221 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
222 stack_start.start_kernel_fn = start_secondary;
223
224 flush_icache_range((unsigned long)&stack_start,
225 (unsigned long)&stack_start + sizeof(stack_start));
226 wmb();
227
228 mp_ops->start_cpu(cpu, (unsigned long)_stext);
229
230 timeout = jiffies + HZ;
231 while (time_before(jiffies, timeout)) {
232 if (cpu_online(cpu))
233 break;
234
235 udelay(10);
236 barrier();
237 }
238
239 if (cpu_online(cpu))
240 return 0;
241
242 return -ENOENT;
243 }
244
smp_cpus_done(unsigned int max_cpus)245 void __init smp_cpus_done(unsigned int max_cpus)
246 {
247 unsigned long bogosum = 0;
248 int cpu;
249
250 for_each_online_cpu(cpu)
251 bogosum += cpu_data[cpu].loops_per_jiffy;
252
253 printk(KERN_INFO "SMP: Total of %d processors activated "
254 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
255 bogosum / (500000/HZ),
256 (bogosum / (5000/HZ)) % 100);
257 }
258
smp_send_reschedule(int cpu)259 void smp_send_reschedule(int cpu)
260 {
261 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
262 }
263
smp_send_stop(void)264 void smp_send_stop(void)
265 {
266 smp_call_function(stop_this_cpu, 0, 0);
267 }
268
arch_send_call_function_ipi_mask(const struct cpumask * mask)269 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
270 {
271 int cpu;
272
273 for_each_cpu(cpu, mask)
274 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
275 }
276
arch_send_call_function_single_ipi(int cpu)277 void arch_send_call_function_single_ipi(int cpu)
278 {
279 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
280 }
281
282 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
tick_broadcast(const struct cpumask * mask)283 void tick_broadcast(const struct cpumask *mask)
284 {
285 int cpu;
286
287 for_each_cpu(cpu, mask)
288 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
289 }
290
ipi_timer(void)291 static void ipi_timer(void)
292 {
293 irq_enter();
294 tick_receive_broadcast();
295 irq_exit();
296 }
297 #endif
298
smp_message_recv(unsigned int msg)299 void smp_message_recv(unsigned int msg)
300 {
301 switch (msg) {
302 case SMP_MSG_FUNCTION:
303 generic_smp_call_function_interrupt();
304 break;
305 case SMP_MSG_RESCHEDULE:
306 scheduler_ipi();
307 break;
308 case SMP_MSG_FUNCTION_SINGLE:
309 generic_smp_call_function_single_interrupt();
310 break;
311 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
312 case SMP_MSG_TIMER:
313 ipi_timer();
314 break;
315 #endif
316 default:
317 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
318 smp_processor_id(), __func__, msg);
319 break;
320 }
321 }
322
323 /* Not really SMP stuff ... */
setup_profiling_timer(unsigned int multiplier)324 int setup_profiling_timer(unsigned int multiplier)
325 {
326 return 0;
327 }
328
329 #ifdef CONFIG_MMU
330
flush_tlb_all_ipi(void * info)331 static void flush_tlb_all_ipi(void *info)
332 {
333 local_flush_tlb_all();
334 }
335
flush_tlb_all(void)336 void flush_tlb_all(void)
337 {
338 on_each_cpu(flush_tlb_all_ipi, 0, 1);
339 }
340
flush_tlb_mm_ipi(void * mm)341 static void flush_tlb_mm_ipi(void *mm)
342 {
343 local_flush_tlb_mm((struct mm_struct *)mm);
344 }
345
346 /*
347 * The following tlb flush calls are invoked when old translations are
348 * being torn down, or pte attributes are changing. For single threaded
349 * address spaces, a new context is obtained on the current cpu, and tlb
350 * context on other cpus are invalidated to force a new context allocation
351 * at switch_mm time, should the mm ever be used on other cpus. For
352 * multithreaded address spaces, intercpu interrupts have to be sent.
353 * Another case where intercpu interrupts are required is when the target
354 * mm might be active on another cpu (eg debuggers doing the flushes on
355 * behalf of debugees, kswapd stealing pages from another process etc).
356 * Kanoj 07/00.
357 */
flush_tlb_mm(struct mm_struct * mm)358 void flush_tlb_mm(struct mm_struct *mm)
359 {
360 preempt_disable();
361
362 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
363 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
364 } else {
365 int i;
366 for_each_online_cpu(i)
367 if (smp_processor_id() != i)
368 cpu_context(i, mm) = 0;
369 }
370 local_flush_tlb_mm(mm);
371
372 preempt_enable();
373 }
374
375 struct flush_tlb_data {
376 struct vm_area_struct *vma;
377 unsigned long addr1;
378 unsigned long addr2;
379 };
380
flush_tlb_range_ipi(void * info)381 static void flush_tlb_range_ipi(void *info)
382 {
383 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
384
385 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
386 }
387
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)388 void flush_tlb_range(struct vm_area_struct *vma,
389 unsigned long start, unsigned long end)
390 {
391 struct mm_struct *mm = vma->vm_mm;
392
393 preempt_disable();
394 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
395 struct flush_tlb_data fd;
396
397 fd.vma = vma;
398 fd.addr1 = start;
399 fd.addr2 = end;
400 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
401 } else {
402 int i;
403 for_each_online_cpu(i)
404 if (smp_processor_id() != i)
405 cpu_context(i, mm) = 0;
406 }
407 local_flush_tlb_range(vma, start, end);
408 preempt_enable();
409 }
410
flush_tlb_kernel_range_ipi(void * info)411 static void flush_tlb_kernel_range_ipi(void *info)
412 {
413 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
414
415 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
416 }
417
flush_tlb_kernel_range(unsigned long start,unsigned long end)418 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
419 {
420 struct flush_tlb_data fd;
421
422 fd.addr1 = start;
423 fd.addr2 = end;
424 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
425 }
426
flush_tlb_page_ipi(void * info)427 static void flush_tlb_page_ipi(void *info)
428 {
429 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
430
431 local_flush_tlb_page(fd->vma, fd->addr1);
432 }
433
flush_tlb_page(struct vm_area_struct * vma,unsigned long page)434 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
435 {
436 preempt_disable();
437 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
438 (current->mm != vma->vm_mm)) {
439 struct flush_tlb_data fd;
440
441 fd.vma = vma;
442 fd.addr1 = page;
443 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
444 } else {
445 int i;
446 for_each_online_cpu(i)
447 if (smp_processor_id() != i)
448 cpu_context(i, vma->vm_mm) = 0;
449 }
450 local_flush_tlb_page(vma, page);
451 preempt_enable();
452 }
453
flush_tlb_one_ipi(void * info)454 static void flush_tlb_one_ipi(void *info)
455 {
456 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
457 local_flush_tlb_one(fd->addr1, fd->addr2);
458 }
459
flush_tlb_one(unsigned long asid,unsigned long vaddr)460 void flush_tlb_one(unsigned long asid, unsigned long vaddr)
461 {
462 struct flush_tlb_data fd;
463
464 fd.addr1 = asid;
465 fd.addr2 = vaddr;
466
467 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
468 local_flush_tlb_one(asid, vaddr);
469 }
470
471 #endif
472