Lines Matching refs:vc

651 __csched_vcpu_check(struct vcpu *vc)  in __csched_vcpu_check()  argument
653 struct csched_vcpu * const svc = CSCHED_VCPU(vc); in __csched_vcpu_check()
656 BUG_ON( svc->vcpu != vc ); in __csched_vcpu_check()
657 BUG_ON( sdom != CSCHED_DOM(vc->domain) ); in __csched_vcpu_check()
660 BUG_ON( is_idle_vcpu(vc) ); in __csched_vcpu_check()
661 BUG_ON( sdom->dom != vc->domain ); in __csched_vcpu_check()
665 BUG_ON( !is_idle_vcpu(vc) ); in __csched_vcpu_check()
707 __csched_vcpu_is_migrateable(struct vcpu *vc, int dest_cpu, cpumask_t *mask) in __csched_vcpu_is_migrateable() argument
716 ASSERT(!vc->is_running); in __csched_vcpu_is_migrateable()
718 return !__csched_vcpu_is_cache_hot(vc) && in __csched_vcpu_is_migrateable()
723 _csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit) in _csched_cpu_pick() argument
729 int cpu = vc->processor; in _csched_cpu_pick()
733 online = cpupool_domain_cpumask(vc->domain); in _csched_cpu_pick()
734 cpumask_and(&cpus, vc->cpu_hard_affinity, online); in _csched_cpu_pick()
757 && !has_soft_affinity(vc, &cpus) ) in _csched_cpu_pick()
761 affinity_balance_cpumask(vc, balance_step, &cpus); in _csched_cpu_pick()
765 cpu = cpumask_test_cpu(vc->processor, &cpus) in _csched_cpu_pick()
766 ? vc->processor in _csched_cpu_pick()
767 : cpumask_cycle(vc->processor, &cpus); in _csched_cpu_pick()
787 if ( vc->processor == cpu && is_runq_idle(cpu) ) in _csched_cpu_pick()
863 TRACE_3D(TRC_CSCHED_PICKED_CPU, vc->domain->domain_id, vc->vcpu_id, cpu); in _csched_cpu_pick()
869 csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc) in csched_cpu_pick() argument
871 struct csched_vcpu *svc = CSCHED_VCPU(vc); in csched_cpu_pick()
881 return _csched_cpu_pick(ops, vc, 1); in csched_cpu_pick()
1005 csched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd) in csched_alloc_vdata() argument
1017 svc->vcpu = vc; in csched_alloc_vdata()
1018 svc->pri = is_idle_domain(vc->domain) ? in csched_alloc_vdata()
1026 csched_vcpu_insert(const struct scheduler *ops, struct vcpu *vc) in csched_vcpu_insert() argument
1028 struct csched_vcpu *svc = vc->sched_priv; in csched_vcpu_insert()
1031 BUG_ON( is_idle_vcpu(vc) ); in csched_vcpu_insert()
1034 lock = vcpu_schedule_lock_irq(vc); in csched_vcpu_insert()
1036 vc->processor = csched_cpu_pick(ops, vc); in csched_vcpu_insert()
1040 lock = vcpu_schedule_lock_irq(vc); in csched_vcpu_insert()
1042 if ( !__vcpu_on_runq(svc) && vcpu_runnable(vc) && !vc->is_running ) in csched_vcpu_insert()
1045 vcpu_schedule_unlock_irq(lock, vc); in csched_vcpu_insert()
1061 csched_vcpu_remove(const struct scheduler *ops, struct vcpu *vc) in csched_vcpu_remove() argument
1064 struct csched_vcpu * const svc = CSCHED_VCPU(vc); in csched_vcpu_remove()
1088 csched_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc) in csched_vcpu_sleep() argument
1090 struct csched_vcpu * const svc = CSCHED_VCPU(vc); in csched_vcpu_sleep()
1091 unsigned int cpu = vc->processor; in csched_vcpu_sleep()
1095 BUG_ON( is_idle_vcpu(vc) ); in csched_vcpu_sleep()
1097 if ( curr_on_cpu(cpu) == vc ) in csched_vcpu_sleep()
1112 csched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc) in csched_vcpu_wake() argument
1114 struct csched_vcpu * const svc = CSCHED_VCPU(vc); in csched_vcpu_wake()
1117 BUG_ON( is_idle_vcpu(vc) ); in csched_vcpu_wake()
1119 if ( unlikely(curr_on_cpu(vc->processor) == vc) ) in csched_vcpu_wake()
1130 if ( likely(vcpu_runnable(vc)) ) in csched_vcpu_wake()
1162 TRACE_2D(TRC_CSCHED_BOOST_START, vc->domain->domain_id, vc->vcpu_id); in csched_vcpu_wake()
1173 csched_vcpu_yield(const struct scheduler *ops, struct vcpu *vc) in csched_vcpu_yield() argument
1175 struct csched_vcpu * const svc = CSCHED_VCPU(vc); in csched_vcpu_yield()
1616 struct vcpu *vc; in csched_runq_steal() local
1639 vc = speer->vcpu; in csched_runq_steal()
1640 BUG_ON( is_idle_vcpu(vc) ); in csched_runq_steal()
1656 if ( vc->is_running || in csched_runq_steal()
1658 && !has_soft_affinity(vc, vc->cpu_hard_affinity)) ) in csched_runq_steal()
1661 affinity_balance_cpumask(vc, balance_step, cpumask_scratch); in csched_runq_steal()
1662 if ( __csched_vcpu_is_migrateable(vc, cpu, cpumask_scratch) ) in csched_runq_steal()
1666 vc->domain->domain_id, vc->vcpu_id); in csched_runq_steal()
1669 WARN_ON(vc->is_urgent); in csched_runq_steal()
1671 vc->processor = cpu; in csched_runq_steal()