1 // Copyright 2016 The Fuchsia Authors
2 //
3 // Use of this source code is governed by a MIT-style
4 // license that can be found in the LICENSE file or at
5 // https://opensource.org/licenses/MIT
6
7 #include <assert.h>
8 #include <err.h>
9 #include <list.h>
10 #include <trace.h>
11
12 #include <kernel/dpc.h>
13 #include <kernel/event.h>
14 #include <kernel/percpu.h>
15 #include <kernel/spinlock.h>
16 #include <lk/init.h>
17
18 static spin_lock_t dpc_lock = SPIN_LOCK_INITIAL_VALUE;
19
dpc_queue(dpc_t * dpc,bool reschedule)20 zx_status_t dpc_queue(dpc_t* dpc, bool reschedule) {
21 DEBUG_ASSERT(dpc);
22 DEBUG_ASSERT(dpc->func);
23
24 // disable interrupts before finding lock
25 spin_lock_saved_state_t state;
26 spin_lock_irqsave(&dpc_lock, state);
27
28 if (list_in_list(&dpc->node)) {
29 spin_unlock_irqrestore(&dpc_lock, state);
30 return ZX_ERR_ALREADY_EXISTS;
31 }
32
33 struct percpu* cpu = get_local_percpu();
34
35 // put the dpc at the tail of the list and signal the worker
36 list_add_tail(&cpu->dpc_list, &dpc->node);
37
38 spin_unlock_irqrestore(&dpc_lock, state);
39
40 event_signal(&cpu->dpc_event, reschedule);
41
42 return ZX_OK;
43 }
44
dpc_queue_thread_locked(dpc_t * dpc)45 zx_status_t dpc_queue_thread_locked(dpc_t* dpc) {
46 DEBUG_ASSERT(dpc);
47 DEBUG_ASSERT(dpc->func);
48
49 // interrupts are already disabled
50 spin_lock(&dpc_lock);
51
52 if (list_in_list(&dpc->node)) {
53 spin_unlock(&dpc_lock);
54 return ZX_ERR_ALREADY_EXISTS;
55 }
56
57 struct percpu* cpu = get_local_percpu();
58
59 // put the dpc at the tail of the list and signal the worker
60 list_add_tail(&cpu->dpc_list, &dpc->node);
61 event_signal_thread_locked(&cpu->dpc_event);
62
63 spin_unlock(&dpc_lock);
64
65 return ZX_OK;
66 }
67
dpc_shutdown(uint cpu_id)68 void dpc_shutdown(uint cpu_id) {
69 DEBUG_ASSERT(cpu_id < SMP_MAX_CPUS);
70
71 spin_lock_saved_state_t state;
72 spin_lock_irqsave(&dpc_lock, state);
73
74 DEBUG_ASSERT(!percpu[cpu_id].dpc_stop);
75
76 // Ask the DPC thread to terminate.
77 percpu[cpu_id].dpc_stop = true;
78
79 // Take the thread pointer so we can join outside the spinlock.
80 thread_t* t = percpu[cpu_id].dpc_thread;
81 percpu[cpu_id].dpc_thread = nullptr;
82
83 spin_unlock_irqrestore(&dpc_lock, state);
84
85 // Wake it.
86 event_signal(&percpu[cpu_id].dpc_event, false);
87
88 // Wait for it to terminate.
89 int ret = 0;
90 zx_status_t status = thread_join(t, &ret, ZX_TIME_INFINITE);
91 DEBUG_ASSERT(status == ZX_OK);
92 DEBUG_ASSERT(ret == 0);
93 }
94
dpc_shutdown_transition_off_cpu(uint cpu_id)95 void dpc_shutdown_transition_off_cpu(uint cpu_id) {
96 DEBUG_ASSERT(cpu_id < SMP_MAX_CPUS);
97
98 spin_lock_saved_state_t state;
99 spin_lock_irqsave(&dpc_lock, state);
100
101 uint cur_cpu = arch_curr_cpu_num();
102 DEBUG_ASSERT(cpu_id != cur_cpu);
103
104 // The DPC thread should already be stopped.
105 DEBUG_ASSERT(percpu[cpu_id].dpc_stop);
106 DEBUG_ASSERT(percpu[cpu_id].dpc_thread == nullptr);
107
108 list_node_t* src_list = &percpu[cpu_id].dpc_list;
109 list_node_t* dst_list = &percpu[cur_cpu].dpc_list;
110
111 dpc_t* dpc;
112 while ((dpc = list_remove_head_type(src_list, dpc_t, node))) {
113 list_add_tail(dst_list, &dpc->node);
114 }
115
116 // Reset the state so we can restart DPC processing if the CPU comes back online.
117 DEBUG_ASSERT(list_is_empty(&percpu[cpu_id].dpc_list));
118 percpu[cpu_id].dpc_stop = false;
119 event_destroy(&percpu[cpu_id].dpc_event);
120
121 spin_unlock_irqrestore(&dpc_lock, state);
122 }
123
dpc_thread(void * arg)124 static int dpc_thread(void* arg) {
125 dpc_t dpc_local;
126
127 spin_lock_saved_state_t state;
128 arch_interrupt_save(&state, SPIN_LOCK_FLAG_INTERRUPTS);
129
130 struct percpu* cpu = get_local_percpu();
131 event_t* event = &cpu->dpc_event;
132 list_node_t* list = &cpu->dpc_list;
133
134 arch_interrupt_restore(state, SPIN_LOCK_FLAG_INTERRUPTS);
135
136 for (;;) {
137 // wait for a dpc to fire
138 __UNUSED zx_status_t err = event_wait(event);
139 DEBUG_ASSERT(err == ZX_OK);
140
141 spin_lock_irqsave(&dpc_lock, state);
142
143 if (cpu->dpc_stop) {
144 spin_unlock_irqrestore(&dpc_lock, state);
145 return 0;
146 }
147
148 // pop a dpc off the list, make a local copy.
149 dpc_t* dpc = list_remove_head_type(list, dpc_t, node);
150
151 // if the list is now empty, unsignal the event so we block until it is
152 if (!dpc) {
153 event_unsignal(event);
154 dpc_local.func = NULL;
155 } else {
156 dpc_local = *dpc;
157 }
158
159 spin_unlock_irqrestore(&dpc_lock, state);
160
161 // call the dpc
162 if (dpc_local.func) {
163 dpc_local.func(&dpc_local);
164 }
165 }
166
167 return 0;
168 }
169
dpc_init_for_cpu(void)170 void dpc_init_for_cpu(void) {
171 struct percpu* cpu = get_local_percpu();
172 uint cpu_num = arch_curr_cpu_num();
173
174 // the cpu's dpc state was initialized on a previous hotplug event
175 if (event_initialized(&cpu->dpc_event)) {
176 return;
177 }
178
179 list_initialize(&cpu->dpc_list);
180 event_init(&cpu->dpc_event, false, 0);
181 cpu->dpc_stop = false;
182
183 char name[10];
184 snprintf(name, sizeof(name), "dpc-%u", cpu_num);
185 cpu->dpc_thread = thread_create(name, &dpc_thread, NULL, DPC_THREAD_PRIORITY);
186 thread_set_cpu_affinity(cpu->dpc_thread, cpu_num_to_mask(cpu_num));
187 thread_resume(cpu->dpc_thread);
188 }
189
dpc_init(unsigned int level)190 static void dpc_init(unsigned int level) {
191 // initialize dpc for the main CPU
192 dpc_init_for_cpu();
193 }
194
195 LK_INIT_HOOK(dpc, dpc_init, LK_INIT_LEVEL_THREADING);
196