Lines Matching refs:new_cpu
518 static void vcpu_move_locked(struct vcpu *v, unsigned int new_cpu) in vcpu_move_locked() argument
527 if ( unlikely(v->is_urgent) && (old_cpu != new_cpu) ) in vcpu_move_locked()
529 atomic_inc(&per_cpu(schedule_data, new_cpu).urgent_count); in vcpu_move_locked()
538 SCHED_OP(vcpu_scheduler(v), migrate, v, new_cpu); in vcpu_move_locked()
540 v->processor = new_cpu; in vcpu_move_locked()
550 static void vcpu_move_nosched(struct vcpu *v, unsigned int new_cpu) in vcpu_move_nosched() argument
560 new_lock = per_cpu(schedule_data, new_cpu).schedule_lock; in vcpu_move_nosched()
563 ASSERT(new_cpu != v->processor); in vcpu_move_nosched()
564 vcpu_move_locked(v, new_cpu); in vcpu_move_nosched()
573 unsigned int old_cpu, new_cpu; in vcpu_migrate() local
577 old_cpu = new_cpu = v->processor; in vcpu_migrate()
586 new_lock = per_cpu(schedule_data, new_cpu).schedule_lock; in vcpu_migrate()
598 (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) && in vcpu_migrate()
599 cpumask_test_cpu(new_cpu, v->cpu_hard_affinity) && in vcpu_migrate()
600 cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) ) in vcpu_migrate()
604 new_cpu = SCHED_OP(vcpu_scheduler(v), pick_cpu, v); in vcpu_migrate()
605 if ( (new_lock == per_cpu(schedule_data, new_cpu).schedule_lock) && in vcpu_migrate()
606 cpumask_test_cpu(new_cpu, v->domain->cpupool->cpu_valid) ) in vcpu_migrate()
634 vcpu_move_locked(v, new_cpu); in vcpu_migrate()
638 if ( old_cpu != new_cpu ) in vcpu_migrate()
723 unsigned int new_cpu; in cpu_disable_scheduler() local
801 new_cpu = cpumask_first(&online_affinity); in cpu_disable_scheduler()
802 vcpu_move_nosched(v, new_cpu); in cpu_disable_scheduler()