Lines Matching refs:rq

11 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)  in rq_sched_info_arrive()  argument
13 if (rq) { in rq_sched_info_arrive()
14 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
15 rq->rq_sched_info.pcount++; in rq_sched_info_arrive()
23 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
25 if (rq) in rq_sched_info_depart()
26 rq->rq_cpu_time += delta; in rq_sched_info_depart()
30 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeue() argument
32 if (rq) in rq_sched_info_dequeue()
33 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeue()
45 void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
48 void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
50 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
70 static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { } in rq_sched_info_arrive() argument
71 static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { } in rq_sched_info_dequeue() argument
72 static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { } in rq_sched_info_depart() argument
83 # define __update_stats_wait_start(rq, p, stats) do { } while (0) argument
84 # define __update_stats_wait_end(rq, p, stats) do { } while (0) argument
85 # define __update_stats_enqueue_sleeper(rq, p, stats) do { } while (0) argument
167 struct rq *rq; in psi_ttwu_dequeue() local
175 rq = __task_rq_lock(p, &rf); in psi_ttwu_dequeue()
178 __task_rq_unlock(rq, &rf); in psi_ttwu_dequeue()
208 static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t) in sched_info_dequeue() argument
215 delta = rq_clock(rq) - t->sched_info.last_queued; in sched_info_dequeue()
219 rq_sched_info_dequeue(rq, delta); in sched_info_dequeue()
227 static void sched_info_arrive(struct rq *rq, struct task_struct *t) in sched_info_arrive() argument
234 now = rq_clock(rq); in sched_info_arrive()
241 rq_sched_info_arrive(rq, delta); in sched_info_arrive()
249 static inline void sched_info_enqueue(struct rq *rq, struct task_struct *t) in sched_info_enqueue() argument
252 t->sched_info.last_queued = rq_clock(rq); in sched_info_enqueue()
263 static inline void sched_info_depart(struct rq *rq, struct task_struct *t) in sched_info_depart() argument
265 unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival; in sched_info_depart()
267 rq_sched_info_depart(rq, delta); in sched_info_depart()
270 sched_info_enqueue(rq, t); in sched_info_depart()
279 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) in sched_info_switch() argument
286 if (prev != rq->idle) in sched_info_switch()
287 sched_info_depart(rq, prev); in sched_info_switch()
289 if (next != rq->idle) in sched_info_switch()
290 sched_info_arrive(rq, next); in sched_info_switch()
294 # define sched_info_enqueue(rq, t) do { } while (0) argument
295 # define sched_info_dequeue(rq, t) do { } while (0) argument
296 # define sched_info_switch(rq, t, next) do { } while (0) argument