• Home
  • Raw
  • Download

Lines Matching refs:ctx

85 void spu_set_timeslice(struct spu_context *ctx)  in spu_set_timeslice()  argument
87 if (ctx->prio < NORMAL_PRIO) in spu_set_timeslice()
88 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio); in spu_set_timeslice()
90 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio); in spu_set_timeslice()
96 void __spu_update_sched_info(struct spu_context *ctx) in __spu_update_sched_info() argument
102 BUG_ON(!list_empty(&ctx->rq)); in __spu_update_sched_info()
109 ctx->tid = current->pid; in __spu_update_sched_info()
118 ctx->prio = current->prio; in __spu_update_sched_info()
120 ctx->prio = current->static_prio; in __spu_update_sched_info()
121 ctx->policy = current->policy; in __spu_update_sched_info()
131 cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr); in __spu_update_sched_info()
134 ctx->last_ran = raw_smp_processor_id(); in __spu_update_sched_info()
137 void spu_update_sched_info(struct spu_context *ctx) in spu_update_sched_info() argument
141 if (ctx->state == SPU_STATE_RUNNABLE) { in spu_update_sched_info()
142 node = ctx->spu->node; in spu_update_sched_info()
148 __spu_update_sched_info(ctx); in spu_update_sched_info()
151 __spu_update_sched_info(ctx); in spu_update_sched_info()
155 static int __node_allowed(struct spu_context *ctx, int node) in __node_allowed() argument
160 if (cpumask_intersects(mask, &ctx->cpus_allowed)) in __node_allowed()
167 static int node_allowed(struct spu_context *ctx, int node) in node_allowed() argument
172 rval = __node_allowed(ctx, node); in node_allowed()
191 struct spu_context *ctx = spu->ctx; in do_notify_spus_active() local
193 &ctx->sched_flags); in do_notify_spus_active()
195 wake_up_all(&ctx->stop_wq); in do_notify_spus_active()
207 static void spu_bind_context(struct spu *spu, struct spu_context *ctx) in spu_bind_context() argument
209 spu_context_trace(spu_bind_context__enter, ctx, spu); in spu_bind_context()
211 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); in spu_bind_context()
213 if (ctx->flags & SPU_CREATE_NOSCHED) in spu_bind_context()
216 ctx->stats.slb_flt_base = spu->stats.slb_flt; in spu_bind_context()
217 ctx->stats.class2_intr_base = spu->stats.class2_intr; in spu_bind_context()
219 spu_associate_mm(spu, ctx->owner); in spu_bind_context()
222 spu->ctx = ctx; in spu_bind_context()
224 ctx->spu = spu; in spu_bind_context()
225 ctx->ops = &spu_hw_ops; in spu_bind_context()
234 spu_unmap_mappings(ctx); in spu_bind_context()
236 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); in spu_bind_context()
237 spu_restore(&ctx->csa, spu); in spu_bind_context()
239 ctx->state = SPU_STATE_RUNNABLE; in spu_bind_context()
241 spuctx_switch_state(ctx, SPU_UTIL_USER); in spu_bind_context()
251 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED)); in sched_spu()
256 struct spu_context *ctx; in aff_merge_remaining_ctxs() local
258 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) { in aff_merge_remaining_ctxs()
259 if (list_empty(&ctx->aff_list)) in aff_merge_remaining_ctxs()
260 list_add(&ctx->aff_list, &gang->aff_list_head); in aff_merge_remaining_ctxs()
267 struct spu_context *ctx; in aff_set_offsets() local
271 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list, in aff_set_offsets()
273 if (&ctx->aff_list == &gang->aff_list_head) in aff_set_offsets()
275 ctx->aff_offset = offset--; in aff_set_offsets()
279 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) { in aff_set_offsets()
280 if (&ctx->aff_list == &gang->aff_list_head) in aff_set_offsets()
282 ctx->aff_offset = offset++; in aff_set_offsets()
288 static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff, in aff_ref_location() argument
312 if (!node_allowed(ctx, node)) in aff_ref_location()
318 if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset in aff_ref_location()
319 && spu->ctx->gang->aff_ref_spu) in aff_ref_location()
320 available_spus -= spu->ctx->gang->contexts; in aff_ref_location()
323 if (available_spus < ctx->gang->contexts) { in aff_ref_location()
343 struct spu_context *ctx; in aff_set_ref_point_location() local
353 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list, in aff_set_ref_point_location()
355 if (&ctx->aff_list == &gang->aff_list_head) in aff_set_ref_point_location()
357 lowest_offset = ctx->aff_offset; in aff_set_ref_point_location()
394 static int has_affinity(struct spu_context *ctx) in has_affinity() argument
396 struct spu_gang *gang = ctx->gang; in has_affinity()
398 if (list_empty(&ctx->aff_list)) in has_affinity()
401 if (atomic_read(&ctx->gang->aff_sched_count) == 0) in has_affinity()
402 ctx->gang->aff_ref_spu = NULL; in has_affinity()
420 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) in spu_unbind_context() argument
424 spu_context_trace(spu_unbind_context__enter, ctx, spu); in spu_unbind_context()
426 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); in spu_unbind_context()
428 if (spu->ctx->flags & SPU_CREATE_NOSCHED) in spu_unbind_context()
431 if (ctx->gang) in spu_unbind_context()
437 atomic_dec_if_positive(&ctx->gang->aff_sched_count); in spu_unbind_context()
439 spu_unmap_mappings(ctx); in spu_unbind_context()
440 spu_save(&ctx->csa, spu); in spu_unbind_context()
441 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0); in spu_unbind_context()
445 ctx->state = SPU_STATE_SAVED; in spu_unbind_context()
452 ctx->ops = &spu_backing_ops; in spu_unbind_context()
454 spu->ctx = NULL; in spu_unbind_context()
459 ctx->stats.slb_flt += in spu_unbind_context()
460 (spu->stats.slb_flt - ctx->stats.slb_flt_base); in spu_unbind_context()
461 ctx->stats.class2_intr += in spu_unbind_context()
462 (spu->stats.class2_intr - ctx->stats.class2_intr_base); in spu_unbind_context()
465 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); in spu_unbind_context()
466 ctx->spu = NULL; in spu_unbind_context()
468 if (spu_stopped(ctx, &status)) in spu_unbind_context()
469 wake_up_all(&ctx->stop_wq); in spu_unbind_context()
476 static void __spu_add_to_rq(struct spu_context *ctx) in __spu_add_to_rq() argument
491 if (list_empty(&ctx->rq)) { in __spu_add_to_rq()
492 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); in __spu_add_to_rq()
493 set_bit(ctx->prio, spu_prio->bitmap); in __spu_add_to_rq()
499 static void spu_add_to_rq(struct spu_context *ctx) in spu_add_to_rq() argument
502 __spu_add_to_rq(ctx); in spu_add_to_rq()
506 static void __spu_del_from_rq(struct spu_context *ctx) in __spu_del_from_rq() argument
508 int prio = ctx->prio; in __spu_del_from_rq()
510 if (!list_empty(&ctx->rq)) { in __spu_del_from_rq()
513 list_del_init(&ctx->rq); in __spu_del_from_rq()
520 void spu_del_from_rq(struct spu_context *ctx) in spu_del_from_rq() argument
523 __spu_del_from_rq(ctx); in spu_del_from_rq()
527 static void spu_prio_wait(struct spu_context *ctx) in spu_prio_wait() argument
536 BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED)); in spu_prio_wait()
539 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); in spu_prio_wait()
541 __spu_add_to_rq(ctx); in spu_prio_wait()
543 mutex_unlock(&ctx->state_mutex); in spu_prio_wait()
545 mutex_lock(&ctx->state_mutex); in spu_prio_wait()
547 __spu_del_from_rq(ctx); in spu_prio_wait()
551 remove_wait_queue(&ctx->stop_wq, &wait); in spu_prio_wait()
554 static struct spu *spu_get_idle(struct spu_context *ctx) in spu_get_idle() argument
559 spu_context_nospu_trace(spu_get_idle__enter, ctx); in spu_get_idle()
561 if (ctx->gang) { in spu_get_idle()
562 mutex_lock(&ctx->gang->aff_mutex); in spu_get_idle()
563 if (has_affinity(ctx)) { in spu_get_idle()
564 aff_ref_spu = ctx->gang->aff_ref_spu; in spu_get_idle()
565 atomic_inc(&ctx->gang->aff_sched_count); in spu_get_idle()
566 mutex_unlock(&ctx->gang->aff_mutex); in spu_get_idle()
570 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node); in spu_get_idle()
575 atomic_dec(&ctx->gang->aff_sched_count); in spu_get_idle()
578 mutex_unlock(&ctx->gang->aff_mutex); in spu_get_idle()
583 if (!node_allowed(ctx, node)) in spu_get_idle()
595 spu_context_nospu_trace(spu_get_idle__not_found, ctx); in spu_get_idle()
601 spu_context_trace(spu_get_idle__found, ctx, spu); in spu_get_idle()
612 static struct spu *find_victim(struct spu_context *ctx) in find_victim() argument
618 spu_context_nospu_trace(spu_find_victim__enter, ctx); in find_victim()
631 if (!node_allowed(ctx, node)) in find_victim()
636 struct spu_context *tmp = spu->ctx; in find_victim()
638 if (tmp && tmp->prio > ctx->prio && in find_victim()
641 victim = spu->ctx; in find_victim()
666 if (!spu || victim->prio <= ctx->prio) { in find_victim()
678 spu_context_trace(__spu_deactivate__unload, ctx, spu); in find_victim()
700 static void __spu_schedule(struct spu *spu, struct spu_context *ctx) in __spu_schedule() argument
705 spu_set_timeslice(ctx); in __spu_schedule()
708 if (spu->ctx == NULL) { in __spu_schedule()
709 spu_bind_context(spu, ctx); in __spu_schedule()
717 wake_up_all(&ctx->run_wq); in __spu_schedule()
719 spu_add_to_rq(ctx); in __spu_schedule()
722 static void spu_schedule(struct spu *spu, struct spu_context *ctx) in spu_schedule() argument
726 mutex_lock(&ctx->state_mutex); in spu_schedule()
727 if (ctx->state == SPU_STATE_SAVED) in spu_schedule()
728 __spu_schedule(spu, ctx); in spu_schedule()
729 spu_release(ctx); in spu_schedule()
745 static void spu_unschedule(struct spu *spu, struct spu_context *ctx, in spu_unschedule() argument
754 spu_unbind_context(spu, ctx); in spu_unschedule()
755 ctx->stats.invol_ctx_switch++; in spu_unschedule()
769 int spu_activate(struct spu_context *ctx, unsigned long flags) in spu_activate() argument
779 if (ctx->spu) in spu_activate()
786 spu = spu_get_idle(ctx); in spu_activate()
791 if (!spu && rt_prio(ctx->prio)) in spu_activate()
792 spu = find_victim(ctx); in spu_activate()
796 runcntl = ctx->ops->runcntl_read(ctx); in spu_activate()
797 __spu_schedule(spu, ctx); in spu_activate()
799 spuctx_switch_state(ctx, SPU_UTIL_USER); in spu_activate()
804 if (ctx->flags & SPU_CREATE_NOSCHED) { in spu_activate()
805 spu_prio_wait(ctx); in spu_activate()
809 spu_add_to_rq(ctx); in spu_activate()
822 struct spu_context *ctx; in grab_runnable_context() local
830 list_for_each_entry(ctx, rq, rq) { in grab_runnable_context()
832 if (__node_allowed(ctx, node)) { in grab_runnable_context()
833 __spu_del_from_rq(ctx); in grab_runnable_context()
839 ctx = NULL; in grab_runnable_context()
842 return ctx; in grab_runnable_context()
845 static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) in __spu_deactivate() argument
847 struct spu *spu = ctx->spu; in __spu_deactivate()
853 spu_unschedule(spu, ctx, new == NULL); in __spu_deactivate()
858 spu_release(ctx); in __spu_deactivate()
862 mutex_lock(&ctx->state_mutex); in __spu_deactivate()
878 void spu_deactivate(struct spu_context *ctx) in spu_deactivate() argument
880 spu_context_nospu_trace(spu_deactivate__enter, ctx); in spu_deactivate()
881 __spu_deactivate(ctx, 1, MAX_PRIO); in spu_deactivate()
892 void spu_yield(struct spu_context *ctx) in spu_yield() argument
894 spu_context_nospu_trace(spu_yield__enter, ctx); in spu_yield()
895 if (!(ctx->flags & SPU_CREATE_NOSCHED)) { in spu_yield()
896 mutex_lock(&ctx->state_mutex); in spu_yield()
897 __spu_deactivate(ctx, 0, MAX_PRIO); in spu_yield()
898 mutex_unlock(&ctx->state_mutex); in spu_yield()
902 static noinline void spusched_tick(struct spu_context *ctx) in spusched_tick() argument
907 if (spu_acquire(ctx)) in spusched_tick()
910 if (ctx->state != SPU_STATE_RUNNABLE) in spusched_tick()
912 if (ctx->flags & SPU_CREATE_NOSCHED) in spusched_tick()
914 if (ctx->policy == SCHED_FIFO) in spusched_tick()
917 if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) in spusched_tick()
920 spu = ctx->spu; in spusched_tick()
922 spu_context_trace(spusched_tick__preempt, ctx, spu); in spusched_tick()
924 new = grab_runnable_context(ctx->prio + 1, spu->node); in spusched_tick()
926 spu_unschedule(spu, ctx, 0); in spusched_tick()
927 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) in spusched_tick()
928 spu_add_to_rq(ctx); in spusched_tick()
930 spu_context_nospu_trace(spusched_tick__newslice, ctx); in spusched_tick()
931 if (!ctx->time_slice) in spusched_tick()
932 ctx->time_slice++; in spusched_tick()
935 spu_release(ctx); in spusched_tick()
1003 struct spu_context *ctx = spu->ctx; in spusched_thread() local
1005 if (ctx) { in spusched_thread()
1006 get_spu_context(ctx); in spusched_thread()
1008 spusched_tick(ctx); in spusched_thread()
1010 put_spu_context(ctx); in spusched_thread()
1020 void spuctx_switch_state(struct spu_context *ctx, in spuctx_switch_state() argument
1030 delta = curtime - ctx->stats.tstamp; in spuctx_switch_state()
1032 WARN_ON(!mutex_is_locked(&ctx->state_mutex)); in spuctx_switch_state()
1035 spu = ctx->spu; in spuctx_switch_state()
1036 old_state = ctx->stats.util_state; in spuctx_switch_state()
1037 ctx->stats.util_state = new_state; in spuctx_switch_state()
1038 ctx->stats.tstamp = curtime; in spuctx_switch_state()
1044 ctx->stats.times[old_state] += delta; in spuctx_switch_state()