1 /*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7 #include <config.h>
8 #include <mode/smp/ipi.h>
9 #include <smp/lock.h>
10 #include <util.h>
11
12 #ifdef ENABLE_SMP_SUPPORT
13
14 /* the remote call being requested */
15 static volatile IpiRemoteCall_t remoteCall;
16 static volatile irq_t ipiIrq[CONFIG_MAX_NUM_NODES];
17
init_ipi_args(IpiRemoteCall_t func,word_t data1,word_t data2,word_t data3,word_t mask)18 static inline void init_ipi_args(IpiRemoteCall_t func,
19 word_t data1, word_t data2, word_t data3,
20 word_t mask)
21 {
22 remoteCall = func;
23 ipi_args[0] = data1;
24 ipi_args[1] = data2;
25 ipi_args[2] = data3;
26
27 /* get number of cores involved in this IPI */
28 totalCoreBarrier = popcountl(mask);
29 }
30
handleRemoteCall(IpiRemoteCall_t call,word_t arg0,word_t arg1,word_t arg2,bool_t irqPath)31 static void handleRemoteCall(IpiRemoteCall_t call, word_t arg0,
32 word_t arg1, word_t arg2, bool_t irqPath)
33 {
34 /* we gets spurious irq_remote_call_ipi calls, e.g. when handling IPI
35 * in lock while hardware IPI is pending. Guard against spurious IPIs! */
36 if (clh_is_ipi_pending(getCurrentCPUIndex())) {
37 switch ((IpiRemoteCall_t)call) {
38 case IpiRemoteCall_Stall:
39 ipiStallCoreCallback(irqPath);
40 break;
41
42 #ifdef CONFIG_HAVE_FPU
43 case IpiRemoteCall_switchFpuOwner:
44 switchLocalFpuOwner((user_fpu_state_t *)arg0);
45 break;
46 #endif /* CONFIG_HAVE_FPU */
47
48 default:
49 fail("Invalid remote call");
50 break;
51 }
52
53 big_kernel_lock.node_owners[getCurrentCPUIndex()].ipi = 0;
54 ipiIrq[getCurrentCPUIndex()] = irqInvalid;
55 ipi_wait(totalCoreBarrier);
56 }
57 }
58
ipi_send_mask(irq_t ipi,word_t mask,bool_t isBlocking)59 void ipi_send_mask(irq_t ipi, word_t mask, bool_t isBlocking)
60 {
61
62 generic_ipi_send_mask(ipi, mask, isBlocking);
63 }
64
ipi_get_irq(void)65 irq_t ipi_get_irq(void)
66 {
67 assert(!(ipiIrq[getCurrentCPUIndex()] == irqInvalid && big_kernel_lock.node_owners[getCurrentCPUIndex()].ipi == 1));
68 return ipiIrq[getCurrentCPUIndex()];
69 }
70
ipi_clear_irq(irq_t irq)71 void ipi_clear_irq(irq_t irq)
72 {
73 ipiIrq[getCurrentCPUIndex()] = irqInvalid;
74 return;
75 }
76
77 /* this function is called with a single hart id. */
ipi_send_target(irq_t irq,word_t hart_id)78 void ipi_send_target(irq_t irq, word_t hart_id)
79 {
80 unsigned long hart_mask;
81 word_t core_id = hartIDToCoreID(hart_id);
82 assert(core_id < CONFIG_MAX_NUM_NODES);
83 hart_mask = BIT(hart_id);
84
85 assert((ipiIrq[core_id] == irqInvalid) || (ipiIrq[core_id] == irq_reschedule_ipi) ||
86 (ipiIrq[core_id] == irq_remote_call_ipi && big_kernel_lock.node_owners[core_id].ipi == 0));
87
88 ipiIrq[core_id] = irq;
89 asm volatile("fence rw,rw");
90 sbi_send_ipi(&hart_mask);
91 }
92
93 #endif
94