1 /*
2  * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3  *
4  * SPDX-License-Identifier: GPL-2.0-only
5  */
6 
7 #include <config.h>
8 #include <mode/smp/ipi.h>
9 #include <smp/ipi.h>
10 #include <smp/lock.h>
11 
12 #ifdef ENABLE_SMP_SUPPORT
13 
14 static IpiModeRemoteCall_t remoteCall;   /* the remote call being requested */
15 
init_ipi_args(IpiRemoteCall_t func,word_t data1,word_t data2,word_t data3,word_t mask)16 static inline void init_ipi_args(IpiRemoteCall_t func,
17                                  word_t data1, word_t data2, word_t data3,
18                                  word_t mask)
19 {
20     remoteCall = (IpiModeRemoteCall_t)func;
21     ipi_args[0] = data1;
22     ipi_args[1] = data2;
23     ipi_args[2] = data3;
24 
25     /* get number of cores involved in this IPI */
26     totalCoreBarrier = popcountl(mask);
27 }
28 
handleRemoteCall(IpiModeRemoteCall_t call,word_t arg0,word_t arg1,word_t arg2,bool_t irqPath)29 static void handleRemoteCall(IpiModeRemoteCall_t call, word_t arg0,
30                              word_t arg1, word_t arg2, bool_t irqPath)
31 {
32     /* we gets spurious irq_remote_call_ipi calls, e.g. when handling IPI
33      * in lock while hardware IPI is pending. Guard against spurious IPIs! */
34     if (clh_is_ipi_pending(getCurrentCPUIndex())) {
35         switch ((IpiRemoteCall_t)call) {
36         case IpiRemoteCall_Stall:
37             ipiStallCoreCallback(irqPath);
38             break;
39 
40         case IpiRemoteCall_InvalidatePageStructureCacheASID:
41             invalidateLocalPageStructureCacheASID(arg0, arg1);
42             break;
43 
44         case IpiRemoteCall_InvalidateTranslationSingle:
45             invalidateLocalTranslationSingle(arg0);
46             break;
47 
48         case IpiRemoteCall_InvalidateTranslationSingleASID:
49             invalidateLocalTranslationSingleASID(arg0, arg1);
50             break;
51 
52         case IpiRemoteCall_InvalidateTranslationAll:
53             invalidateLocalTranslationAll();
54             break;
55 
56         case IpiRemoteCall_switchFpuOwner:
57             switchLocalFpuOwner((user_fpu_state_t *)arg0);
58             break;
59 
60 #ifdef CONFIG_VTX
61         case IpiRemoteCall_ClearCurrentVCPU:
62             clearCurrentVCPU();
63             break;
64         case IpiRemoteCall_VMCheckBoundNotification:
65             VMCheckBoundNotification((tcb_t *)arg0);
66             break;
67 #endif
68         default:
69             Mode_handleRemoteCall(call, arg0, arg1, arg2);
70             break;
71         }
72 
73         big_kernel_lock.node_owners[getCurrentCPUIndex()].ipi = 0;
74         ipi_wait(totalCoreBarrier);
75     }
76 }
77 
78 /* make sure all cpu IDs for number of core fit in bitwise word */
79 compile_assert(invalid_number_of_supported_nodes, CONFIG_MAX_NUM_NODES <= wordBits);
80 
81 #ifdef CONFIG_USE_LOGICAL_IDS
x86_ipi_send_mask(interrupt_t ipi,word_t mask,bool_t isBlocking)82 static void x86_ipi_send_mask(interrupt_t ipi, word_t mask, bool_t isBlocking)
83 {
84     word_t nr_target_clusters = 0;
85     word_t target_clusters[CONFIG_MAX_NUM_NODES];
86 
87     do {
88         int core = wordBits - 1 - clzl(mask);
89         target_clusters[nr_target_clusters] = 0;
90 
91         /* get mask of all cores in bitmask which are in same cluster as 'core' */
92         word_t sub_mask = mask & cpu_mapping.other_indexes_in_cluster[core];
93         target_clusters[nr_target_clusters] |= cpu_mapping.index_to_logical_id[core];
94         if (isBlocking) {
95             big_kernel_lock.node_owners[core].ipi = 1;
96         }
97 
98         /* check if there is any other core in this cluster */
99         while (sub_mask) {
100             int index = wordBits - 1 - clzl(sub_mask);
101             target_clusters[nr_target_clusters] |= cpu_mapping.index_to_logical_id[index];
102             if (isBlocking) {
103                 big_kernel_lock.node_owners[index].ipi = 1;
104             }
105             sub_mask &= ~BIT(index);
106         }
107 
108         mask &= ~(cpu_mapping.other_indexes_in_cluster[core] | BIT(core));
109         nr_target_clusters++;
110     } while (mask != 0);
111 
112     /* broadcast IPIs to clusters... */
113     IPI_ICR_BARRIER;
114     for (int i = 0; i < nr_target_clusters; i++) {
115         apic_send_ipi_cluster(ipi, target_clusters[i]);
116     }
117 }
118 #endif /* CONFIG_USE_LOGICAL_IDS */
119 
ipi_send_mask(irq_t ipi,word_t mask,bool_t isBlocking)120 void ipi_send_mask(irq_t ipi, word_t mask, bool_t isBlocking)
121 {
122     interrupt_t interrupt_ipi = ipi + IRQ_INT_OFFSET;
123 
124 #ifdef CONFIG_USE_LOGICAL_IDS
125     x86_ipi_send_mask(interrupt_ipi, mask, isBlocking);
126 #else
127     generic_ipi_send_mask(interrupt_ipi, mask, isBlocking);
128 #endif /* CONFIG_USE_LOGICAL_IDS */
129 }
130 #endif /* ENABLE_SMP_SUPPORT */
131