Lines Matching refs:node

139 	int node;  in spu_update_sched_info()  local
142 node = ctx->spu->node; in spu_update_sched_info()
147 mutex_lock(&cbe_spu_info[node].list_mutex); in spu_update_sched_info()
149 mutex_unlock(&cbe_spu_info[node].list_mutex); in spu_update_sched_info()
155 static int __node_allowed(struct spu_context *ctx, int node) in __node_allowed() argument
157 if (nr_cpus_node(node)) { in __node_allowed()
158 const struct cpumask *mask = cpumask_of_node(node); in __node_allowed()
167 static int node_allowed(struct spu_context *ctx, int node) in node_allowed() argument
172 rval = __node_allowed(ctx, node); in node_allowed()
180 int node; in do_notify_spus_active() local
185 for_each_online_node(node) { in do_notify_spus_active()
188 mutex_lock(&cbe_spu_info[node].list_mutex); in do_notify_spus_active()
189 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in do_notify_spus_active()
198 mutex_unlock(&cbe_spu_info[node].list_mutex); in do_notify_spus_active()
214 atomic_inc(&cbe_spu_info[spu->node].reserved_spus); in spu_bind_context()
249 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex)); in sched_spu()
292 int node, n; in aff_ref_location() local
298 node = cpu_to_node(raw_smp_processor_id()); in aff_ref_location()
299 for (n = 0; n < MAX_NUMNODES; n++, node++) { in aff_ref_location()
311 node = (node < MAX_NUMNODES) ? node : 0; in aff_ref_location()
312 if (!node_allowed(ctx, node)) in aff_ref_location()
316 mutex_lock(&cbe_spu_info[node].list_mutex); in aff_ref_location()
317 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in aff_ref_location()
324 mutex_unlock(&cbe_spu_info[node].list_mutex); in aff_ref_location()
328 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in aff_ref_location()
331 mutex_unlock(&cbe_spu_info[node].list_mutex); in aff_ref_location()
335 mutex_unlock(&cbe_spu_info[node].list_mutex); in aff_ref_location()
363 static struct spu *ctx_location(struct spu *ref, int offset, int node) in ctx_location() argument
370 BUG_ON(spu->node != node); in ctx_location()
378 BUG_ON(spu->node != node); in ctx_location()
428 atomic_dec(&cbe_spu_info[spu->node].reserved_spus); in spu_unbind_context()
556 int node, n; in spu_get_idle() local
566 node = aff_ref_spu->node; in spu_get_idle()
568 mutex_lock(&cbe_spu_info[node].list_mutex); in spu_get_idle()
569 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node); in spu_get_idle()
572 mutex_unlock(&cbe_spu_info[node].list_mutex); in spu_get_idle()
579 node = cpu_to_node(raw_smp_processor_id()); in spu_get_idle()
580 for (n = 0; n < MAX_NUMNODES; n++, node++) { in spu_get_idle()
581 node = (node < MAX_NUMNODES) ? node : 0; in spu_get_idle()
582 if (!node_allowed(ctx, node)) in spu_get_idle()
585 mutex_lock(&cbe_spu_info[node].list_mutex); in spu_get_idle()
586 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in spu_get_idle()
590 mutex_unlock(&cbe_spu_info[node].list_mutex); in spu_get_idle()
599 mutex_unlock(&cbe_spu_info[node].list_mutex); in spu_get_idle()
615 int node, n; in find_victim() local
627 node = cpu_to_node(raw_smp_processor_id()); in find_victim()
628 for (n = 0; n < MAX_NUMNODES; n++, node++) { in find_victim()
629 node = (node < MAX_NUMNODES) ? node : 0; in find_victim()
630 if (!node_allowed(ctx, node)) in find_victim()
633 mutex_lock(&cbe_spu_info[node].list_mutex); in find_victim()
634 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in find_victim()
645 mutex_unlock(&cbe_spu_info[node].list_mutex); in find_victim()
679 mutex_lock(&cbe_spu_info[node].list_mutex); in find_victim()
680 cbe_spu_info[node].nr_active--; in find_victim()
682 mutex_unlock(&cbe_spu_info[node].list_mutex); in find_victim()
701 int node = spu->node; in __spu_schedule() local
706 mutex_lock(&cbe_spu_info[node].list_mutex); in __spu_schedule()
709 cbe_spu_info[node].nr_active++; in __spu_schedule()
713 mutex_unlock(&cbe_spu_info[node].list_mutex); in __spu_schedule()
747 int node = spu->node; in spu_unschedule() local
749 mutex_lock(&cbe_spu_info[node].list_mutex); in spu_unschedule()
750 cbe_spu_info[node].nr_active--; in spu_unschedule()
756 mutex_unlock(&cbe_spu_info[node].list_mutex); in spu_unschedule()
819 static struct spu_context *grab_runnable_context(int prio, int node) in grab_runnable_context() argument
831 if (__node_allowed(ctx, node)) { in grab_runnable_context()
850 new = grab_runnable_context(max_prio, spu->node); in __spu_deactivate()
923 new = grab_runnable_context(ctx->prio + 1, spu->node); in spusched_tick()
951 int nr_active = 0, node; in count_active_contexts() local
953 for (node = 0; node < MAX_NUMNODES; node++) in count_active_contexts()
954 nr_active += cbe_spu_info[node].nr_active; in count_active_contexts()
991 int node; in spusched_thread() local
996 for (node = 0; node < MAX_NUMNODES; node++) { in spusched_thread()
997 struct mutex *mtx = &cbe_spu_info[node].list_mutex; in spusched_thread()
1000 list_for_each_entry(spu, &cbe_spu_info[node].spus, in spusched_thread()
1026 int node; in spuctx_switch_state() local
1047 node = spu->node; in spuctx_switch_state()
1049 atomic_dec(&cbe_spu_info[node].busy_spus); in spuctx_switch_state()
1051 atomic_inc(&cbe_spu_info[node].busy_spus); in spuctx_switch_state()
1125 int node; in spu_sched_exit() local
1133 for (node = 0; node < MAX_NUMNODES; node++) { in spu_sched_exit()
1134 mutex_lock(&cbe_spu_info[node].list_mutex); in spu_sched_exit()
1135 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) in spu_sched_exit()
1138 mutex_unlock(&cbe_spu_info[node].list_mutex); in spu_sched_exit()