1 /*
2  * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3  *
4  * SPDX-License-Identifier: GPL-2.0-only
5  */
6 
7 #include <config.h>
8 #include <mode/smp/ipi.h>
9 #include <smp/ipi.h>
10 #include <smp/lock.h>
11 
12 #ifdef ENABLE_SMP_SUPPORT
13 /* This function switches the core it is called on to the idle thread,
14  * in order to avoid IPI storms. If the core is waiting on the lock, the actual
15  * switch will not occur until the core attempts to obtain the lock, at which
16  * point the core will capture the pending IPI, which is discarded.
17 
18  * The core who triggered the store is responsible for triggering a reschedule,
19  * or this call will idle forever */
ipiStallCoreCallback(bool_t irqPath)20 void ipiStallCoreCallback(bool_t irqPath)
21 {
22     if (clh_is_self_in_queue() && !irqPath) {
23         /* The current thread is running as we would replace this thread with an idle thread
24          *
25          * The instruction should be re-executed if we are in kernel to handle syscalls.
26          * Also, thread in 'ThreadState_RunningVM' should remain in same state.
27          * Note that, 'ThreadState_Restart' does not always result in regenerating exception
28          * if we are in kernel to handle them, e.g. hardware single step exception. */
29         if (thread_state_ptr_get_tsType(&NODE_STATE(ksCurThread)->tcbState) == ThreadState_Running) {
30             setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
31         }
32 
33         SCHED_ENQUEUE_CURRENT_TCB;
34         switchToIdleThread();
35         NODE_STATE(ksSchedulerAction) = SchedulerAction_ResumeCurrentThread;
36 
37         /* Let the cpu requesting this IPI to continue while we waiting on lock */
38         big_kernel_lock.node_owners[getCurrentCPUIndex()].ipi = 0;
39 #ifdef CONFIG_ARCH_RISCV
40         ipi_clear_irq(irq_remote_call_ipi);
41 #endif
42         ipi_wait(totalCoreBarrier);
43 
44         /* Continue waiting on lock */
45         while (big_kernel_lock.node_owners[getCurrentCPUIndex()].next->value != CLHState_Granted) {
46             if (clh_is_ipi_pending(getCurrentCPUIndex())) {
47 
48                 /* Multiple calls for similar reason could result in stack overflow */
49                 assert((IpiRemoteCall_t)remoteCall != IpiRemoteCall_Stall);
50                 handleIPI(CORE_IRQ_TO_IRQT(getCurrentCPUIndex(), irq_remote_call_ipi), irqPath);
51             }
52             arch_pause();
53         }
54 
55         /* make sure no resource access passes from this point */
56         asm volatile("" ::: "memory");
57 
58         /* Start idle thread to capture the pending IPI */
59         activateThread();
60         restore_user_context();
61     } else {
62         /* We get here either without grabbing the lock from normal interrupt path or from
63          * inside the lock while waiting to grab the lock for handling pending interrupt.
64          * In latter case, we return to the 'clh_lock_acquire' to grab the lock and
65          * handle the pending interrupt. Its valid as interrups are async events! */
66         SCHED_ENQUEUE_CURRENT_TCB;
67         switchToIdleThread();
68 
69         NODE_STATE(ksSchedulerAction) = SchedulerAction_ResumeCurrentThread;
70     }
71 }
72 
handleIPI(irq_t irq,bool_t irqPath)73 void handleIPI(irq_t irq, bool_t irqPath)
74 {
75     if (IRQT_TO_IRQ(irq) == irq_remote_call_ipi) {
76         handleRemoteCall(remoteCall, get_ipi_arg(0), get_ipi_arg(1), get_ipi_arg(2), irqPath);
77     } else if (IRQT_TO_IRQ(irq) == irq_reschedule_ipi) {
78         rescheduleRequired();
79 #ifdef CONFIG_ARCH_RISCV
80         ifence_local();
81 #endif
82     } else {
83         fail("Invalid IPI");
84     }
85 }
86 
doRemoteMaskOp(IpiRemoteCall_t func,word_t data1,word_t data2,word_t data3,word_t mask)87 void doRemoteMaskOp(IpiRemoteCall_t func, word_t data1, word_t data2, word_t data3, word_t mask)
88 {
89     /* make sure the current core is not set in the mask */
90     mask &= ~BIT(getCurrentCPUIndex());
91 
92     /* this may happen, e.g. the caller tries to map a pagetable in
93      * newly created PD which has not been run yet. Guard against them! */
94     if (mask != 0) {
95         init_ipi_args(func, data1, data2, data3, mask);
96 
97         /* make sure no resource access passes from this point */
98         asm volatile("" ::: "memory");
99         ipi_send_mask(CORE_IRQ_TO_IRQT(0, irq_remote_call_ipi), mask, true);
100         ipi_wait(totalCoreBarrier);
101     }
102 }
103 
doMaskReschedule(word_t mask)104 void doMaskReschedule(word_t mask)
105 {
106     /* make sure the current core is not set in the mask */
107     mask &= ~BIT(getCurrentCPUIndex());
108     if (mask != 0) {
109         ipi_send_mask(CORE_IRQ_TO_IRQT(0, irq_reschedule_ipi), mask, false);
110     }
111 }
112 
generic_ipi_send_mask(irq_t ipi,word_t mask,bool_t isBlocking)113 void generic_ipi_send_mask(irq_t ipi, word_t mask, bool_t isBlocking)
114 {
115     word_t nr_target_cores = 0;
116     uint16_t target_cores[CONFIG_MAX_NUM_NODES];
117 
118     while (mask) {
119         int index = wordBits - 1 - clzl(mask);
120         if (isBlocking) {
121             big_kernel_lock.node_owners[index].ipi = 1;
122             target_cores[nr_target_cores] = index;
123             nr_target_cores++;
124         } else {
125             ipi_send_target(ipi, cpuIndexToID(index));
126         }
127         mask &= ~BIT(index);
128     }
129 
130     if (nr_target_cores > 0) {
131         /* sending IPIs... */
132         IPI_MEM_BARRIER;
133         for (int i = 0; i < nr_target_cores; i++) {
134             ipi_send_target(ipi, cpuIndexToID(target_cores[i]));
135         }
136     }
137 }
138 #endif /* ENABLE_SMP_SUPPORT */
139