Lines Matching refs:node
151 int node; in spu_update_sched_info() local
154 node = ctx->spu->node; in spu_update_sched_info()
159 mutex_lock(&cbe_spu_info[node].list_mutex); in spu_update_sched_info()
161 mutex_unlock(&cbe_spu_info[node].list_mutex); in spu_update_sched_info()
167 static int __node_allowed(struct spu_context *ctx, int node) in __node_allowed() argument
169 if (nr_cpus_node(node)) { in __node_allowed()
170 const struct cpumask *mask = cpumask_of_node(node); in __node_allowed()
179 static int node_allowed(struct spu_context *ctx, int node) in node_allowed() argument
184 rval = __node_allowed(ctx, node); in node_allowed()
192 int node; in do_notify_spus_active() local
200 for_each_online_node(node) { in do_notify_spus_active()
203 mutex_lock(&cbe_spu_info[node].list_mutex); in do_notify_spus_active()
204 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in do_notify_spus_active()
213 mutex_unlock(&cbe_spu_info[node].list_mutex); in do_notify_spus_active()
229 atomic_inc(&cbe_spu_info[spu->node].reserved_spus); in spu_bind_context()
265 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex)); in sched_spu()
308 int node, n; in aff_ref_location() local
314 node = cpu_to_node(raw_smp_processor_id()); in aff_ref_location()
315 for (n = 0; n < MAX_NUMNODES; n++, node++) { in aff_ref_location()
327 node = (node < MAX_NUMNODES) ? node : 0; in aff_ref_location()
328 if (!node_allowed(ctx, node)) in aff_ref_location()
332 mutex_lock(&cbe_spu_info[node].list_mutex); in aff_ref_location()
333 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in aff_ref_location()
340 mutex_unlock(&cbe_spu_info[node].list_mutex); in aff_ref_location()
344 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in aff_ref_location()
347 mutex_unlock(&cbe_spu_info[node].list_mutex); in aff_ref_location()
351 mutex_unlock(&cbe_spu_info[node].list_mutex); in aff_ref_location()
380 static struct spu *ctx_location(struct spu *ref, int offset, int node) in ctx_location() argument
387 BUG_ON(spu->node != node); in ctx_location()
395 BUG_ON(spu->node != node); in ctx_location()
445 atomic_dec(&cbe_spu_info[spu->node].reserved_spus); in spu_unbind_context()
574 int node, n; in spu_get_idle() local
584 node = aff_ref_spu->node; in spu_get_idle()
586 mutex_lock(&cbe_spu_info[node].list_mutex); in spu_get_idle()
587 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node); in spu_get_idle()
590 mutex_unlock(&cbe_spu_info[node].list_mutex); in spu_get_idle()
597 node = cpu_to_node(raw_smp_processor_id()); in spu_get_idle()
598 for (n = 0; n < MAX_NUMNODES; n++, node++) { in spu_get_idle()
599 node = (node < MAX_NUMNODES) ? node : 0; in spu_get_idle()
600 if (!node_allowed(ctx, node)) in spu_get_idle()
603 mutex_lock(&cbe_spu_info[node].list_mutex); in spu_get_idle()
604 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in spu_get_idle()
608 mutex_unlock(&cbe_spu_info[node].list_mutex); in spu_get_idle()
617 mutex_unlock(&cbe_spu_info[node].list_mutex); in spu_get_idle()
633 int node, n; in find_victim() local
645 node = cpu_to_node(raw_smp_processor_id()); in find_victim()
646 for (n = 0; n < MAX_NUMNODES; n++, node++) { in find_victim()
647 node = (node < MAX_NUMNODES) ? node : 0; in find_victim()
648 if (!node_allowed(ctx, node)) in find_victim()
651 mutex_lock(&cbe_spu_info[node].list_mutex); in find_victim()
652 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { in find_victim()
663 mutex_unlock(&cbe_spu_info[node].list_mutex); in find_victim()
697 mutex_lock(&cbe_spu_info[node].list_mutex); in find_victim()
698 cbe_spu_info[node].nr_active--; in find_victim()
700 mutex_unlock(&cbe_spu_info[node].list_mutex); in find_victim()
719 int node = spu->node; in __spu_schedule() local
724 mutex_lock(&cbe_spu_info[node].list_mutex); in __spu_schedule()
727 cbe_spu_info[node].nr_active++; in __spu_schedule()
731 mutex_unlock(&cbe_spu_info[node].list_mutex); in __spu_schedule()
765 int node = spu->node; in spu_unschedule() local
767 mutex_lock(&cbe_spu_info[node].list_mutex); in spu_unschedule()
768 cbe_spu_info[node].nr_active--; in spu_unschedule()
774 mutex_unlock(&cbe_spu_info[node].list_mutex); in spu_unschedule()
837 static struct spu_context *grab_runnable_context(int prio, int node) in grab_runnable_context() argument
849 if (__node_allowed(ctx, node)) { in grab_runnable_context()
868 new = grab_runnable_context(max_prio, spu->node); in __spu_deactivate()
941 new = grab_runnable_context(ctx->prio + 1, spu->node); in spusched_tick()
969 int nr_active = 0, node; in count_active_contexts() local
971 for (node = 0; node < MAX_NUMNODES; node++) in count_active_contexts()
972 nr_active += cbe_spu_info[node].nr_active; in count_active_contexts()
1009 int node; in spusched_thread() local
1014 for (node = 0; node < MAX_NUMNODES; node++) { in spusched_thread()
1015 struct mutex *mtx = &cbe_spu_info[node].list_mutex; in spusched_thread()
1018 list_for_each_entry(spu, &cbe_spu_info[node].spus, in spusched_thread()
1044 int node; in spuctx_switch_state() local
1065 node = spu->node; in spuctx_switch_state()
1067 atomic_dec(&cbe_spu_info[node].busy_spus); in spuctx_switch_state()
1069 atomic_inc(&cbe_spu_info[node].busy_spus); in spuctx_switch_state()
1156 int node; in spu_sched_exit() local
1164 for (node = 0; node < MAX_NUMNODES; node++) { in spu_sched_exit()
1165 mutex_lock(&cbe_spu_info[node].list_mutex); in spu_sched_exit()
1166 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) in spu_sched_exit()
1169 mutex_unlock(&cbe_spu_info[node].list_mutex); in spu_sched_exit()