Lines Matching refs:cur_cpu_num
89 uint8_t cur_cpu_num; in core_sched() local
95 cur_cpu_num = cpu_cur_get(); in core_sched()
97 if (g_per_cpu[cur_cpu_num].dis_sched > 0u) { in core_sched()
98 g_per_cpu[cur_cpu_num].dis_sched = 0u; in core_sched()
103 if (g_intrpt_nested_level[cur_cpu_num] > 0u) { in core_sched()
108 if (g_sched_lock[cur_cpu_num] > 0u) { in core_sched()
113 preferred_task = preferred_cpu_ready_task_get(&g_ready_queue, cur_cpu_num); in core_sched()
116 if (preferred_task == &g_idle_task[cur_cpu_num]) { in core_sched()
117 if (g_active_task[cur_cpu_num]->sched_policy == KSCHED_CFS) { in core_sched()
118 if (g_active_task[cur_cpu_num]->task_state == K_RDY) { in core_sched()
119 cur_task_exec_time = g_active_task[cur_cpu_num]->task_time_this_run + in core_sched()
120 (LR_COUNT_GET() - g_active_task[cur_cpu_num]->task_time_start); in core_sched()
125 cfs_node_insert(&g_active_task[cur_cpu_num]->node, cur_task_exec_time); in core_sched()
130 preferred_task = &g_idle_task[cur_cpu_num]; in core_sched()
133 if (g_active_task[cur_cpu_num]->sched_policy == KSCHED_CFS) { in core_sched()
134 if (g_active_task[cur_cpu_num]->task_state == K_RDY) { in core_sched()
135 cur_task_exec_time = g_active_task[cur_cpu_num]->task_time_this_run + in core_sched()
136 (LR_COUNT_GET() - g_active_task[cur_cpu_num]->task_time_start); in core_sched()
137 cfs_node_insert(&g_active_task[cur_cpu_num]->node, cur_task_exec_time); in core_sched()
148 if (preferred_task == g_active_task[cur_cpu_num]) { in core_sched()
153 TRACE_TASK_SWITCH(g_active_task[cur_cpu_num], preferred_task); in core_sched()
156 krhino_task_switch_hook(g_active_task[cur_cpu_num], preferred_task); in core_sched()
159 g_active_task[cur_cpu_num]->cur_exc = 0; in core_sched()
160 preferred_task->cpu_num = cur_cpu_num; in core_sched()
162 g_preferred_ready_task[cur_cpu_num] = preferred_task; in core_sched()
169 uint8_t cur_cpu_num; in core_sched() local
174 cur_cpu_num = cpu_cur_get(); in core_sched()
176 if (g_intrpt_nested_level[cur_cpu_num] > 0u) { in core_sched()
180 if (g_sched_lock[cur_cpu_num] > 0u) { in core_sched()
184 preferred_task = preferred_cpu_ready_task_get(&g_ready_queue, cur_cpu_num); in core_sched()
187 if (preferred_task == &g_idle_task[cur_cpu_num]) { in core_sched()
188 if (g_active_task[cur_cpu_num]->sched_policy == KSCHED_CFS) { in core_sched()
189 if (g_active_task[cur_cpu_num]->task_state == K_RDY) { in core_sched()
190 cur_task_exec_time = g_active_task[cur_cpu_num]->task_time_this_run + in core_sched()
191 (LR_COUNT_GET() - g_active_task[cur_cpu_num]->task_time_start); in core_sched()
195 cfs_node_insert(&g_active_task[cur_cpu_num]->node, cur_task_exec_time); in core_sched()
200 preferred_task = &g_idle_task[cur_cpu_num]; in core_sched()
203 if (g_active_task[cur_cpu_num]->sched_policy == KSCHED_CFS) { in core_sched()
204 if (g_active_task[cur_cpu_num]->task_state == K_RDY) { in core_sched()
205 cur_task_exec_time = g_active_task[cur_cpu_num]->task_time_this_run + in core_sched()
206 (LR_COUNT_GET() - g_active_task[cur_cpu_num]->task_time_start); in core_sched()
207 cfs_node_insert(&g_active_task[cur_cpu_num]->node, cur_task_exec_time); in core_sched()
218 if (preferred_task == g_active_task[cur_cpu_num]) { in core_sched()
222 g_preferred_ready_task[cur_cpu_num] = preferred_task; in core_sched()
224 TRACE_TASK_SWITCH(g_active_task[cur_cpu_num], g_preferred_ready_task[cur_cpu_num]); in core_sched()
227 krhino_task_switch_hook(g_active_task[cur_cpu_num], g_preferred_ready_task[cur_cpu_num]); in core_sched()
283 static void task_sched_to_cpu(runqueue_t *rq, ktask_t *task, uint8_t cur_cpu_num) in task_sched_to_cpu() argument
293 if (task->cpu_num != cur_cpu_num) { in task_sched_to_cpu()
309 if (low_pri_cpu_num != cur_cpu_num) { in task_sched_to_cpu()
310 if (task->prio < g_active_task[cur_cpu_num]->prio) { in task_sched_to_cpu()
311 g_per_cpu[cur_cpu_num].dis_sched = 1u; in task_sched_to_cpu()