Lines Matching refs:rq

87 struct rq;
99 extern void calc_global_load_tick(struct rq *this_rq);
100 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
102 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
599 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
671 struct rq *rq; member
866 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
926 struct rq { struct
1104 struct rq *core; argument
1122 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument
1124 return cfs_rq->rq; in rq_of()
1129 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of()
1131 return container_of(cfs_rq, struct rq, cfs); in rq_of()
1135 static inline int cpu_of(struct rq *rq) in cpu_of() argument
1138 return rq->cpu; in cpu_of()
1161 static inline bool sched_core_enabled(struct rq *rq) in sched_core_enabled() argument
1163 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled; in sched_core_enabled()
1175 static inline raw_spinlock_t *rq_lockp(struct rq *rq) in rq_lockp() argument
1177 if (sched_core_enabled(rq)) in rq_lockp()
1178 return &rq->core->__lock; in rq_lockp()
1180 return &rq->__lock; in rq_lockp()
1183 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) in __rq_lockp() argument
1185 if (rq->core_enabled) in __rq_lockp()
1186 return &rq->core->__lock; in __rq_lockp()
1188 return &rq->__lock; in __rq_lockp()
1199 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) in sched_cpu_cookie_match() argument
1202 if (!sched_core_enabled(rq)) in sched_cpu_cookie_match()
1205 return rq->core->core_cookie == p->core_cookie; in sched_cpu_cookie_match()
1208 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) in sched_core_cookie_match() argument
1214 if (!sched_core_enabled(rq)) in sched_core_cookie_match()
1217 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { in sched_core_cookie_match()
1228 return idle_core || rq->core->core_cookie == p->core_cookie; in sched_core_cookie_match()
1231 static inline bool sched_group_cookie_match(struct rq *rq, in sched_group_cookie_match() argument
1238 if (!sched_core_enabled(rq)) in sched_group_cookie_match()
1242 if (sched_core_cookie_match(rq, p)) in sched_group_cookie_match()
1248 extern void queue_core_balance(struct rq *rq);
1255 extern void sched_core_enqueue(struct rq *rq, struct task_struct *p);
1256 extern void sched_core_dequeue(struct rq *rq, struct task_struct *p);
1263 static inline bool sched_core_enabled(struct rq *rq) in sched_core_enabled() argument
1273 static inline raw_spinlock_t *rq_lockp(struct rq *rq) in rq_lockp() argument
1275 return &rq->__lock; in rq_lockp()
1278 static inline raw_spinlock_t *__rq_lockp(struct rq *rq) in __rq_lockp() argument
1280 return &rq->__lock; in __rq_lockp()
1283 static inline void queue_core_balance(struct rq *rq) in queue_core_balance() argument
1287 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p) in sched_cpu_cookie_match() argument
1292 static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) in sched_core_cookie_match() argument
1297 static inline bool sched_group_cookie_match(struct rq *rq, in sched_group_cookie_match() argument
1305 static inline void lockdep_assert_rq_held(struct rq *rq) in lockdep_assert_rq_held() argument
1307 lockdep_assert_held(__rq_lockp(rq)); in lockdep_assert_rq_held()
1310 extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
1311 extern bool raw_spin_rq_trylock(struct rq *rq);
1312 extern void raw_spin_rq_unlock(struct rq *rq);
1314 static inline void raw_spin_rq_lock(struct rq *rq) in raw_spin_rq_lock() argument
1316 raw_spin_rq_lock_nested(rq, 0); in raw_spin_rq_lock()
1319 static inline void raw_spin_rq_lock_irq(struct rq *rq) in raw_spin_rq_lock_irq() argument
1322 raw_spin_rq_lock(rq); in raw_spin_rq_lock_irq()
1325 static inline void raw_spin_rq_unlock_irq(struct rq *rq) in raw_spin_rq_unlock_irq() argument
1327 raw_spin_rq_unlock(rq); in raw_spin_rq_unlock_irq()
1331 static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq) in _raw_spin_rq_lock_irqsave() argument
1335 raw_spin_rq_lock(rq); in _raw_spin_rq_lock_irqsave()
1339 static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags) in raw_spin_rq_unlock_irqrestore() argument
1341 raw_spin_rq_unlock(rq); in raw_spin_rq_unlock_irqrestore()
1345 #define raw_spin_rq_lock_irqsave(rq, flags) \ argument
1347 flags = _raw_spin_rq_lock_irqsave(rq); \
1351 extern void __update_idle_core(struct rq *rq);
1353 static inline void update_idle_core(struct rq *rq) in update_idle_core() argument
1356 __update_idle_core(rq); in update_idle_core()
1360 static inline void update_idle_core(struct rq *rq) { } in update_idle_core() argument
1363 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1410 struct rq *rq = task_rq(p); in cfs_rq_of() local
1412 return &rq->cfs; in cfs_rq_of()
1422 extern void update_rq_clock(struct rq *rq);
1451 static inline void assert_clock_updated(struct rq *rq) in assert_clock_updated() argument
1457 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP); in assert_clock_updated()
1460 static inline u64 rq_clock(struct rq *rq) in rq_clock() argument
1462 lockdep_assert_rq_held(rq); in rq_clock()
1463 assert_clock_updated(rq); in rq_clock()
1465 return rq->clock; in rq_clock()
1468 static inline u64 rq_clock_task(struct rq *rq) in rq_clock_task() argument
1470 lockdep_assert_rq_held(rq); in rq_clock_task()
1471 assert_clock_updated(rq); in rq_clock_task()
1473 return rq->clock_task; in rq_clock_task()
1489 static inline u64 rq_clock_thermal(struct rq *rq) in rq_clock_thermal() argument
1491 return rq_clock_task(rq) >> sched_thermal_decay_shift; in rq_clock_thermal()
1494 static inline void rq_clock_skip_update(struct rq *rq) in rq_clock_skip_update() argument
1496 lockdep_assert_rq_held(rq); in rq_clock_skip_update()
1497 rq->clock_update_flags |= RQCF_REQ_SKIP; in rq_clock_skip_update()
1504 static inline void rq_clock_cancel_skipupdate(struct rq *rq) in rq_clock_cancel_skipupdate() argument
1506 lockdep_assert_rq_held(rq); in rq_clock_cancel_skipupdate()
1507 rq->clock_update_flags &= ~RQCF_REQ_SKIP; in rq_clock_cancel_skipupdate()
1535 static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf) in rq_pin_lock() argument
1537 rf->cookie = lockdep_pin_lock(__rq_lockp(rq)); in rq_pin_lock()
1540 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP); in rq_pin_lock()
1543 SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback); in rq_pin_lock()
1548 static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) in rq_unpin_lock() argument
1551 if (rq->clock_update_flags > RQCF_ACT_SKIP) in rq_unpin_lock()
1555 lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); in rq_unpin_lock()
1558 static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf) in rq_repin_lock() argument
1560 lockdep_repin_lock(__rq_lockp(rq), rf->cookie); in rq_repin_lock()
1566 rq->clock_update_flags |= rf->clock_update_flags; in rq_repin_lock()
1570 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1571 __acquires(rq->lock);
1573 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1575 __acquires(rq->lock);
1577 static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) in __task_rq_unlock() argument
1578 __releases(rq->lock) in __task_rq_unlock()
1580 rq_unpin_lock(rq, rf); in __task_rq_unlock()
1581 raw_spin_rq_unlock(rq); in __task_rq_unlock()
1585 task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in task_rq_unlock() argument
1586 __releases(rq->lock) in task_rq_unlock()
1589 rq_unpin_lock(rq, rf); in task_rq_unlock()
1590 raw_spin_rq_unlock(rq); in task_rq_unlock()
1595 rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) in rq_lock_irqsave() argument
1596 __acquires(rq->lock) in rq_lock_irqsave()
1598 raw_spin_rq_lock_irqsave(rq, rf->flags); in rq_lock_irqsave()
1599 rq_pin_lock(rq, rf); in rq_lock_irqsave()
1603 rq_lock_irq(struct rq *rq, struct rq_flags *rf) in rq_lock_irq() argument
1604 __acquires(rq->lock) in rq_lock_irq()
1606 raw_spin_rq_lock_irq(rq); in rq_lock_irq()
1607 rq_pin_lock(rq, rf); in rq_lock_irq()
1611 rq_lock(struct rq *rq, struct rq_flags *rf) in rq_lock() argument
1612 __acquires(rq->lock) in rq_lock()
1614 raw_spin_rq_lock(rq); in rq_lock()
1615 rq_pin_lock(rq, rf); in rq_lock()
1619 rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) in rq_unlock_irqrestore() argument
1620 __releases(rq->lock) in rq_unlock_irqrestore()
1622 rq_unpin_lock(rq, rf); in rq_unlock_irqrestore()
1623 raw_spin_rq_unlock_irqrestore(rq, rf->flags); in rq_unlock_irqrestore()
1627 rq_unlock_irq(struct rq *rq, struct rq_flags *rf) in rq_unlock_irq() argument
1628 __releases(rq->lock) in rq_unlock_irq()
1630 rq_unpin_lock(rq, rf); in rq_unlock_irq()
1631 raw_spin_rq_unlock_irq(rq); in rq_unlock_irq()
1635 rq_unlock(struct rq *rq, struct rq_flags *rf) in rq_unlock() argument
1636 __releases(rq->lock) in rq_unlock()
1638 rq_unpin_lock(rq, rf); in rq_unlock()
1639 raw_spin_rq_unlock(rq); in rq_unlock()
1642 static inline struct rq *
1644 __acquires(rq->lock) in this_rq_lock_irq()
1646 struct rq *rq; in this_rq_lock_irq() local
1649 rq = this_rq(); in this_rq_lock_irq()
1650 rq_lock(rq, rf); in this_rq_lock_irq()
1651 return rq; in this_rq_lock_irq()
1700 queue_balance_callback(struct rq *rq, in queue_balance_callback() argument
1702 void (*func)(struct rq *rq)) in queue_balance_callback() argument
1704 lockdep_assert_rq_held(rq); in queue_balance_callback()
1706 if (unlikely(head->next || rq->balance_callback == &balance_push_callback)) in queue_balance_callback()
1710 head->next = rq->balance_callback; in queue_balance_callback()
1711 rq->balance_callback = head; in queue_balance_callback()
2002 static inline int task_current(struct rq *rq, struct task_struct *p) in task_current() argument
2004 return rq->curr == p; in task_current()
2007 static inline int task_running(struct rq *rq, struct task_struct *p) in task_running() argument
2012 return task_current(rq, p); in task_running()
2101 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
2102 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
2103 void (*yield_task) (struct rq *rq);
2104 bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
2106 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
2108 struct task_struct *(*pick_next_task)(struct rq *rq);
2110 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
2111 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
2114 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
2117 struct task_struct * (*pick_task)(struct rq *rq);
2121 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
2127 void (*rq_online)(struct rq *rq);
2128 void (*rq_offline)(struct rq *rq);
2130 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
2133 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
2142 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
2143 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
2144 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
2147 unsigned int (*get_rr_interval)(struct rq *rq,
2150 void (*update_curr)(struct rq *rq);
2160 static inline void put_prev_task(struct rq *rq, struct task_struct *prev) in put_prev_task() argument
2162 WARN_ON_ONCE(rq->curr != prev); in put_prev_task()
2163 prev->sched_class->put_prev_task(rq, prev); in put_prev_task()
2166 static inline void set_next_task(struct rq *rq, struct task_struct *next) in set_next_task() argument
2168 next->sched_class->set_next_task(rq, next, false); in set_next_task()
2204 static inline bool sched_stop_runnable(struct rq *rq) in sched_stop_runnable() argument
2206 return rq->stop && task_on_rq_queued(rq->stop); in sched_stop_runnable()
2209 static inline bool sched_dl_runnable(struct rq *rq) in sched_dl_runnable() argument
2211 return rq->dl.dl_nr_running > 0; in sched_dl_runnable()
2214 static inline bool sched_rt_runnable(struct rq *rq) in sched_rt_runnable() argument
2216 return rq->rt.rt_queued > 0; in sched_rt_runnable()
2219 static inline bool sched_fair_runnable(struct rq *rq) in sched_fair_runnable() argument
2221 return rq->cfs.nr_running > 0; in sched_fair_runnable()
2224 extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_f…
2225 extern struct task_struct *pick_next_task_idle(struct rq *rq);
2236 extern void trigger_load_balance(struct rq *rq);
2240 static inline struct task_struct *get_push_task(struct rq *rq) in get_push_task() argument
2242 struct task_struct *p = rq->curr; in get_push_task()
2244 lockdep_assert_rq_held(rq); in get_push_task()
2246 if (rq->push_busy) in get_push_task()
2255 rq->push_busy = true; in get_push_task()
2264 static inline void idle_set_state(struct rq *rq, in idle_set_state() argument
2267 rq->idle_state = idle_state; in idle_set_state()
2270 static inline struct cpuidle_state *idle_get_state(struct rq *rq) in idle_get_state() argument
2274 return rq->idle_state; in idle_get_state()
2277 static inline void idle_set_state(struct rq *rq, in idle_set_state() argument
2282 static inline struct cpuidle_state *idle_get_state(struct rq *rq) in idle_get_state() argument
2300 extern void resched_curr(struct rq *rq);
2322 extern bool sched_can_stop_tick(struct rq *rq);
2330 static inline void sched_update_tick_dependency(struct rq *rq) in sched_update_tick_dependency() argument
2332 int cpu = cpu_of(rq); in sched_update_tick_dependency()
2337 if (sched_can_stop_tick(rq)) in sched_update_tick_dependency()
2344 static inline void sched_update_tick_dependency(struct rq *rq) { } in sched_update_tick_dependency() argument
2347 static inline void add_nr_running(struct rq *rq, unsigned count) in add_nr_running() argument
2349 unsigned prev_nr = rq->nr_running; in add_nr_running()
2351 rq->nr_running = prev_nr + count; in add_nr_running()
2353 call_trace_sched_update_nr_running(rq, count); in add_nr_running()
2357 if (prev_nr < 2 && rq->nr_running >= 2) { in add_nr_running()
2358 if (!READ_ONCE(rq->rd->overload)) in add_nr_running()
2359 WRITE_ONCE(rq->rd->overload, 1); in add_nr_running()
2363 sched_update_tick_dependency(rq); in add_nr_running()
2366 static inline void sub_nr_running(struct rq *rq, unsigned count) in sub_nr_running() argument
2368 rq->nr_running -= count; in sub_nr_running()
2370 call_trace_sched_update_nr_running(rq, -count); in sub_nr_running()
2374 sched_update_tick_dependency(rq); in sub_nr_running()
2377 extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
2378 extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
2380 extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
2408 static inline int hrtick_enabled(struct rq *rq) in hrtick_enabled() argument
2410 if (!cpu_active(cpu_of(rq))) in hrtick_enabled()
2412 return hrtimer_is_hres_active(&rq->hrtick_timer); in hrtick_enabled()
2415 static inline int hrtick_enabled_fair(struct rq *rq) in hrtick_enabled_fair() argument
2419 return hrtick_enabled(rq); in hrtick_enabled_fair()
2422 static inline int hrtick_enabled_dl(struct rq *rq) in hrtick_enabled_dl() argument
2426 return hrtick_enabled(rq); in hrtick_enabled_dl()
2429 void hrtick_start(struct rq *rq, u64 delay);
2433 static inline int hrtick_enabled_fair(struct rq *rq) in hrtick_enabled_fair() argument
2438 static inline int hrtick_enabled_dl(struct rq *rq) in hrtick_enabled_dl() argument
2443 static inline int hrtick_enabled(struct rq *rq) in hrtick_enabled() argument
2478 static inline bool rq_order_less(struct rq *rq1, struct rq *rq2) in rq_order_less()
2504 extern void double_rq_lock(struct rq *rq1, struct rq *rq2);
2516 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance()
2535 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) in _double_lock_balance()
2562 static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest) in double_lock_balance()
2569 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) in double_unlock_balance()
2610 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) in double_rq_unlock()
2621 extern void set_rq_online (struct rq *rq);
2622 extern void set_rq_offline(struct rq *rq);
2633 static inline void double_rq_lock(struct rq *rq1, struct rq *rq2) in double_rq_lock()
2649 static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) in double_rq_unlock()
2711 extern void nohz_balance_exit_idle(struct rq *rq);
2713 static inline void nohz_balance_exit_idle(struct rq *rq) { } in nohz_balance_exit_idle() argument
2732 struct rq *rq = cpu_rq(i); in __dl_update() local
2734 rq->dl.extra_bw += bw; in __dl_update()
2803 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) in cpufreq_update_util() argument
2808 cpu_of(rq))); in cpufreq_update_util()
2810 data->func(data, rq_clock(rq), flags); in cpufreq_update_util()
2813 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} in cpufreq_update_util() argument
2837 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, in uclamp_rq_util_with() argument
2854 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) in uclamp_rq_util_with()
2858 min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value)); in uclamp_rq_util_with()
2859 max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value)); in uclamp_rq_util_with()
2886 unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, in uclamp_rq_util_with() argument
2931 static inline unsigned long cpu_bw_dl(struct rq *rq) in cpu_bw_dl() argument
2933 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT; in cpu_bw_dl()
2936 static inline unsigned long cpu_util_dl(struct rq *rq) in cpu_util_dl() argument
2938 return READ_ONCE(rq->avg_dl.util_avg); in cpu_util_dl()
2941 static inline unsigned long cpu_util_cfs(struct rq *rq) in cpu_util_cfs() argument
2943 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg); in cpu_util_cfs()
2947 READ_ONCE(rq->cfs.avg.util_est.enqueued)); in cpu_util_cfs()
2953 static inline unsigned long cpu_util_rt(struct rq *rq) in cpu_util_rt() argument
2955 return READ_ONCE(rq->avg_rt.util_avg); in cpu_util_rt()
2960 static inline unsigned long cpu_util_irq(struct rq *rq) in cpu_util_irq() argument
2962 return rq->avg_irq.util_avg; in cpu_util_irq()
2975 static inline unsigned long cpu_util_irq(struct rq *rq) in cpu_util_irq() argument
3012 static inline void membarrier_switch_mm(struct rq *rq, in membarrier_switch_mm() argument
3022 if (READ_ONCE(rq->membarrier_state) == membarrier_state) in membarrier_switch_mm()
3025 WRITE_ONCE(rq->membarrier_state, membarrier_state); in membarrier_switch_mm()
3028 static inline void membarrier_switch_mm(struct rq *rq, in membarrier_switch_mm() argument