Lines Matching refs:rt_rq

130 void init_rt_rq(struct rt_rq *rt_rq)  in init_rt_rq()  argument
135 array = &rt_rq->active; in init_rt_rq()
144 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in init_rt_rq()
145 rt_rq->highest_prio.next = MAX_RT_PRIO-1; in init_rt_rq()
146 rt_rq->rt_nr_migratory = 0; in init_rt_rq()
147 rt_rq->overloaded = 0; in init_rt_rq()
148 plist_head_init(&rt_rq->pushable_tasks); in init_rt_rq()
151 rt_rq->rt_queued = 0; in init_rt_rq()
153 rt_rq->rt_time = 0; in init_rt_rq()
154 rt_rq->rt_throttled = 0; in init_rt_rq()
155 rt_rq->rt_runtime = 0; in init_rt_rq()
156 raw_spin_lock_init(&rt_rq->rt_runtime_lock); in init_rt_rq()
175 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() argument
177 return rt_rq->rq; in rq_of_rt_rq()
180 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) in rt_rq_of_se()
182 return rt_se->rt_rq; in rt_rq_of_se()
187 struct rt_rq *rt_rq = rt_se->rt_rq; in rq_of_rt_se() local
189 return rt_rq->rq; in rq_of_rt_se()
204 if (tg->rt_rq) in free_rt_sched_group()
205 kfree(tg->rt_rq[i]); in free_rt_sched_group()
210 kfree(tg->rt_rq); in free_rt_sched_group()
214 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, in init_tg_rt_entry() argument
220 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in init_tg_rt_entry()
221 rt_rq->rt_nr_boosted = 0; in init_tg_rt_entry()
222 rt_rq->rq = rq; in init_tg_rt_entry()
223 rt_rq->tg = tg; in init_tg_rt_entry()
225 tg->rt_rq[cpu] = rt_rq; in init_tg_rt_entry()
232 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
234 rt_se->rt_rq = parent->my_q; in init_tg_rt_entry()
236 rt_se->my_q = rt_rq; in init_tg_rt_entry()
243 struct rt_rq *rt_rq; in alloc_rt_sched_group() local
247 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); in alloc_rt_sched_group()
248 if (!tg->rt_rq) in alloc_rt_sched_group()
258 rt_rq = kzalloc_node(sizeof(struct rt_rq), in alloc_rt_sched_group()
260 if (!rt_rq) in alloc_rt_sched_group()
268 init_rt_rq(rt_rq); in alloc_rt_sched_group()
269 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; in alloc_rt_sched_group()
270 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); in alloc_rt_sched_group()
276 kfree(rt_rq); in alloc_rt_sched_group()
290 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() argument
292 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
302 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) in rt_rq_of_se()
361 static void update_rt_migration(struct rt_rq *rt_rq) in update_rt_migration() argument
363 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { in update_rt_migration()
364 if (!rt_rq->overloaded) { in update_rt_migration()
365 rt_set_overload(rq_of_rt_rq(rt_rq)); in update_rt_migration()
366 rt_rq->overloaded = 1; in update_rt_migration()
368 } else if (rt_rq->overloaded) { in update_rt_migration()
369 rt_clear_overload(rq_of_rt_rq(rt_rq)); in update_rt_migration()
370 rt_rq->overloaded = 0; in update_rt_migration()
374 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_migration() argument
382 rt_rq = &rq_of_rt_rq(rt_rq)->rt; in inc_rt_migration()
384 rt_rq->rt_nr_total++; in inc_rt_migration()
386 rt_rq->rt_nr_migratory++; in inc_rt_migration()
388 update_rt_migration(rt_rq); in inc_rt_migration()
391 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_migration() argument
399 rt_rq = &rq_of_rt_rq(rt_rq)->rt; in dec_rt_migration()
401 rt_rq->rt_nr_total--; in dec_rt_migration()
403 rt_rq->rt_nr_migratory--; in dec_rt_migration()
405 update_rt_migration(rt_rq); in dec_rt_migration()
468 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_migration() argument
473 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_migration() argument
482 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
483 static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
531 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) in sched_rt_runtime() argument
533 if (!rt_rq->tg) in sched_rt_runtime()
536 return rt_rq->rt_runtime; in sched_rt_runtime()
539 static inline u64 sched_rt_period(struct rt_rq *rt_rq) in sched_rt_period() argument
541 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); in sched_rt_period()
559 #define for_each_rt_rq(rt_rq, iter, rq) \ argument
562 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
567 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) in group_rt_rq()
575 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) in sched_rt_rq_enqueue() argument
577 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; in sched_rt_rq_enqueue()
578 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue()
583 rt_se = rt_rq->tg->rt_se[cpu]; in sched_rt_rq_enqueue()
585 if (rt_rq->rt_nr_running) { in sched_rt_rq_enqueue()
587 enqueue_top_rt_rq(rt_rq); in sched_rt_rq_enqueue()
591 if (rt_rq->highest_prio.curr < curr->prio) in sched_rt_rq_enqueue()
596 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) in sched_rt_rq_dequeue() argument
599 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); in sched_rt_rq_dequeue()
601 rt_se = rt_rq->tg->rt_se[cpu]; in sched_rt_rq_dequeue()
604 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running); in sched_rt_rq_dequeue()
606 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0); in sched_rt_rq_dequeue()
612 static inline int rt_rq_throttled(struct rt_rq *rt_rq) in rt_rq_throttled() argument
614 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; in rt_rq_throttled()
619 struct rt_rq *rt_rq = group_rt_rq(rt_se); in rt_se_boosted() local
622 if (rt_rq) in rt_se_boosted()
623 return !!rt_rq->rt_nr_boosted; in rt_se_boosted()
642 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) in sched_rt_period_rt_rq()
644 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; in sched_rt_period_rt_rq()
647 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) in sched_rt_bandwidth() argument
649 return &rt_rq->tg->rt_bandwidth; in sched_rt_bandwidth()
654 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) in sched_rt_runtime() argument
656 return rt_rq->rt_runtime; in sched_rt_runtime()
659 static inline u64 sched_rt_period(struct rt_rq *rt_rq) in sched_rt_period() argument
664 typedef struct rt_rq *rt_rq_iter_t;
666 #define for_each_rt_rq(rt_rq, iter, rq) \ argument
667 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
672 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) in group_rt_rq()
677 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) in sched_rt_rq_enqueue() argument
679 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue()
681 if (!rt_rq->rt_nr_running) in sched_rt_rq_enqueue()
684 enqueue_top_rt_rq(rt_rq); in sched_rt_rq_enqueue()
688 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) in sched_rt_rq_dequeue() argument
690 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running); in sched_rt_rq_dequeue()
693 static inline int rt_rq_throttled(struct rt_rq *rt_rq) in rt_rq_throttled() argument
695 return rt_rq->rt_throttled; in rt_rq_throttled()
704 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) in sched_rt_period_rt_rq()
709 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) in sched_rt_bandwidth() argument
716 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) in sched_rt_bandwidth_account() argument
718 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in sched_rt_bandwidth_account()
721 rt_rq->rt_time < rt_b->rt_runtime); in sched_rt_bandwidth_account()
728 static void do_balance_runtime(struct rt_rq *rt_rq) in do_balance_runtime() argument
730 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in do_balance_runtime()
731 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; in do_balance_runtime()
740 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); in do_balance_runtime()
743 if (iter == rt_rq) in do_balance_runtime()
762 if (rt_rq->rt_runtime + diff > rt_period) in do_balance_runtime()
763 diff = rt_period - rt_rq->rt_runtime; in do_balance_runtime()
765 rt_rq->rt_runtime += diff; in do_balance_runtime()
766 if (rt_rq->rt_runtime == rt_period) { in do_balance_runtime()
784 struct rt_rq *rt_rq; in __disable_runtime() local
789 for_each_rt_rq(rt_rq, iter, rq) { in __disable_runtime()
790 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in __disable_runtime()
795 raw_spin_lock(&rt_rq->rt_runtime_lock); in __disable_runtime()
801 if (rt_rq->rt_runtime == RUNTIME_INF || in __disable_runtime()
802 rt_rq->rt_runtime == rt_b->rt_runtime) in __disable_runtime()
804 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __disable_runtime()
811 want = rt_b->rt_runtime - rt_rq->rt_runtime; in __disable_runtime()
817 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); in __disable_runtime()
823 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) in __disable_runtime()
841 raw_spin_lock(&rt_rq->rt_runtime_lock); in __disable_runtime()
852 rt_rq->rt_runtime = RUNTIME_INF; in __disable_runtime()
853 rt_rq->rt_throttled = 0; in __disable_runtime()
854 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __disable_runtime()
858 sched_rt_rq_enqueue(rt_rq); in __disable_runtime()
865 struct rt_rq *rt_rq; in __enable_runtime() local
873 for_each_rt_rq(rt_rq, iter, rq) { in __enable_runtime()
874 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in __enable_runtime()
877 raw_spin_lock(&rt_rq->rt_runtime_lock); in __enable_runtime()
878 rt_rq->rt_runtime = rt_b->rt_runtime; in __enable_runtime()
879 rt_rq->rt_time = 0; in __enable_runtime()
880 rt_rq->rt_throttled = 0; in __enable_runtime()
881 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __enable_runtime()
886 static void balance_runtime(struct rt_rq *rt_rq) in balance_runtime() argument
891 if (rt_rq->rt_time > rt_rq->rt_runtime) { in balance_runtime()
892 raw_spin_unlock(&rt_rq->rt_runtime_lock); in balance_runtime()
893 do_balance_runtime(rt_rq); in balance_runtime()
894 raw_spin_lock(&rt_rq->rt_runtime_lock); in balance_runtime()
898 static inline void balance_runtime(struct rt_rq *rt_rq) {} in balance_runtime() argument
922 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); in do_sched_rt_period_timer() local
923 struct rq *rq = rq_of_rt_rq(rt_rq); in do_sched_rt_period_timer()
931 raw_spin_lock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
932 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) in do_sched_rt_period_timer()
933 rt_rq->rt_runtime = rt_b->rt_runtime; in do_sched_rt_period_timer()
934 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; in do_sched_rt_period_timer()
935 raw_spin_unlock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
942 if (rt_rq->rt_time) { in do_sched_rt_period_timer()
945 raw_spin_lock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
946 if (rt_rq->rt_throttled) in do_sched_rt_period_timer()
947 balance_runtime(rt_rq); in do_sched_rt_period_timer()
948 runtime = rt_rq->rt_runtime; in do_sched_rt_period_timer()
949 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); in do_sched_rt_period_timer()
950 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { in do_sched_rt_period_timer()
951 rt_rq->rt_throttled = 0; in do_sched_rt_period_timer()
961 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer()
964 if (rt_rq->rt_time || rt_rq->rt_nr_running) in do_sched_rt_period_timer()
966 raw_spin_unlock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
967 } else if (rt_rq->rt_nr_running) { in do_sched_rt_period_timer()
969 if (!rt_rq_throttled(rt_rq)) in do_sched_rt_period_timer()
972 if (rt_rq->rt_throttled) in do_sched_rt_period_timer()
976 sched_rt_rq_enqueue(rt_rq); in do_sched_rt_period_timer()
989 struct rt_rq *rt_rq = group_rt_rq(rt_se); in rt_se_prio() local
991 if (rt_rq) in rt_se_prio()
992 return rt_rq->highest_prio.curr; in rt_se_prio()
998 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) in sched_rt_runtime_exceeded() argument
1000 u64 runtime = sched_rt_runtime(rt_rq); in sched_rt_runtime_exceeded()
1002 if (rt_rq->rt_throttled) in sched_rt_runtime_exceeded()
1003 return rt_rq_throttled(rt_rq); in sched_rt_runtime_exceeded()
1005 if (runtime >= sched_rt_period(rt_rq)) in sched_rt_runtime_exceeded()
1008 balance_runtime(rt_rq); in sched_rt_runtime_exceeded()
1009 runtime = sched_rt_runtime(rt_rq); in sched_rt_runtime_exceeded()
1013 if (rt_rq->rt_time > runtime) { in sched_rt_runtime_exceeded()
1014 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in sched_rt_runtime_exceeded()
1021 rt_rq->rt_throttled = 1; in sched_rt_runtime_exceeded()
1029 rt_rq->rt_time = 0; in sched_rt_runtime_exceeded()
1032 if (rt_rq_throttled(rt_rq)) { in sched_rt_runtime_exceeded()
1033 sched_rt_rq_dequeue(rt_rq); in sched_rt_runtime_exceeded()
1071 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in update_curr_rt() local
1074 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { in update_curr_rt()
1075 raw_spin_lock(&rt_rq->rt_runtime_lock); in update_curr_rt()
1076 rt_rq->rt_time += delta_exec; in update_curr_rt()
1077 exceeded = sched_rt_runtime_exceeded(rt_rq); in update_curr_rt()
1080 raw_spin_unlock(&rt_rq->rt_runtime_lock); in update_curr_rt()
1082 do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq)); in update_curr_rt()
1088 dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count) in dequeue_top_rt_rq() argument
1090 struct rq *rq = rq_of_rt_rq(rt_rq); in dequeue_top_rt_rq()
1092 BUG_ON(&rq->rt != rt_rq); in dequeue_top_rt_rq()
1094 if (!rt_rq->rt_queued) in dequeue_top_rt_rq()
1100 rt_rq->rt_queued = 0; in dequeue_top_rt_rq()
1105 enqueue_top_rt_rq(struct rt_rq *rt_rq) in enqueue_top_rt_rq() argument
1107 struct rq *rq = rq_of_rt_rq(rt_rq); in enqueue_top_rt_rq()
1109 BUG_ON(&rq->rt != rt_rq); in enqueue_top_rt_rq()
1111 if (rt_rq->rt_queued) in enqueue_top_rt_rq()
1114 if (rt_rq_throttled(rt_rq)) in enqueue_top_rt_rq()
1117 if (rt_rq->rt_nr_running) { in enqueue_top_rt_rq()
1118 add_nr_running(rq, rt_rq->rt_nr_running); in enqueue_top_rt_rq()
1119 rt_rq->rt_queued = 1; in enqueue_top_rt_rq()
1129 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) in inc_rt_prio_smp() argument
1131 struct rq *rq = rq_of_rt_rq(rt_rq); in inc_rt_prio_smp()
1137 if (&rq->rt != rt_rq) in inc_rt_prio_smp()
1145 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) in dec_rt_prio_smp() argument
1147 struct rq *rq = rq_of_rt_rq(rt_rq); in dec_rt_prio_smp()
1153 if (&rq->rt != rt_rq) in dec_rt_prio_smp()
1156 if (rq->online && rt_rq->highest_prio.curr != prev_prio) in dec_rt_prio_smp()
1157 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); in dec_rt_prio_smp()
1163 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} in inc_rt_prio_smp() argument
1165 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} in dec_rt_prio_smp() argument
1171 inc_rt_prio(struct rt_rq *rt_rq, int prio) in inc_rt_prio() argument
1173 int prev_prio = rt_rq->highest_prio.curr; in inc_rt_prio()
1176 rt_rq->highest_prio.curr = prio; in inc_rt_prio()
1178 inc_rt_prio_smp(rt_rq, prio, prev_prio); in inc_rt_prio()
1182 dec_rt_prio(struct rt_rq *rt_rq, int prio) in dec_rt_prio() argument
1184 int prev_prio = rt_rq->highest_prio.curr; in dec_rt_prio()
1186 if (rt_rq->rt_nr_running) { in dec_rt_prio()
1195 struct rt_prio_array *array = &rt_rq->active; in dec_rt_prio()
1197 rt_rq->highest_prio.curr = in dec_rt_prio()
1202 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in dec_rt_prio()
1205 dec_rt_prio_smp(rt_rq, prio, prev_prio); in dec_rt_prio()
1210 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} in inc_rt_prio() argument
1211 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} in dec_rt_prio() argument
1218 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_group() argument
1221 rt_rq->rt_nr_boosted++; in inc_rt_group()
1223 if (rt_rq->tg) in inc_rt_group()
1224 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); in inc_rt_group()
1228 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_group() argument
1231 rt_rq->rt_nr_boosted--; in dec_rt_group()
1233 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); in dec_rt_group()
1239 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_group() argument
1245 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} in dec_rt_group() argument
1252 struct rt_rq *group_rq = group_rt_rq(rt_se); in rt_se_nr_running()
1263 struct rt_rq *group_rq = group_rt_rq(rt_se); in rt_se_rr_nr_running()
1275 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_tasks() argument
1280 rt_rq->rt_nr_running += rt_se_nr_running(rt_se); in inc_rt_tasks()
1281 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se); in inc_rt_tasks()
1283 inc_rt_prio(rt_rq, prio); in inc_rt_tasks()
1284 inc_rt_migration(rt_se, rt_rq); in inc_rt_tasks()
1285 inc_rt_group(rt_se, rt_rq); in inc_rt_tasks()
1289 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_tasks() argument
1292 WARN_ON(!rt_rq->rt_nr_running); in dec_rt_tasks()
1293 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); in dec_rt_tasks()
1294 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se); in dec_rt_tasks()
1296 dec_rt_prio(rt_rq, rt_se_prio(rt_se)); in dec_rt_tasks()
1297 dec_rt_migration(rt_se, rt_rq); in dec_rt_tasks()
1298 dec_rt_group(rt_se, rt_rq); in dec_rt_tasks()
1337 update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) in update_stats_wait_start_rt() argument
1352 __update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats); in update_stats_wait_start_rt()
1356 update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) in update_stats_enqueue_sleeper_rt() argument
1371 __update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats); in update_stats_enqueue_sleeper_rt()
1375 update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, in update_stats_enqueue_rt() argument
1382 update_stats_enqueue_sleeper_rt(rt_rq, rt_se); in update_stats_enqueue_rt()
1386 update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) in update_stats_wait_end_rt() argument
1401 __update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats); in update_stats_wait_end_rt()
1405 update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, in update_stats_dequeue_rt() argument
1422 rq_clock(rq_of_rt_rq(rt_rq))); in update_stats_dequeue_rt()
1426 rq_clock(rq_of_rt_rq(rt_rq))); in update_stats_dequeue_rt()
1432 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in __enqueue_rt_entity() local
1433 struct rt_prio_array *array = &rt_rq->active; in __enqueue_rt_entity()
1434 struct rt_rq *group_rq = group_rt_rq(rt_se); in __enqueue_rt_entity()
1461 inc_rt_tasks(rt_se, rt_rq); in __enqueue_rt_entity()
1466 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in __dequeue_rt_entity() local
1467 struct rt_prio_array *array = &rt_rq->active; in __dequeue_rt_entity()
1475 dec_rt_tasks(rt_se, rt_rq); in __dequeue_rt_entity()
1523 struct rt_rq *rt_rq = group_rt_rq(rt_se); in dequeue_rt_entity() local
1525 if (rt_rq && rt_rq->rt_nr_running) in dequeue_rt_entity()
1566 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) in requeue_rt_entity() argument
1569 struct rt_prio_array *array = &rt_rq->active; in requeue_rt_entity()
1582 struct rt_rq *rt_rq; in requeue_task_rt() local
1585 rt_rq = rt_rq_of_se(rt_se); in requeue_task_rt()
1586 requeue_rt_entity(rt_rq, rt_se, head); in requeue_task_rt()
1746 struct rt_rq *rt_rq = &rq->rt; in set_next_task_rt() local
1750 update_stats_wait_end_rt(rt_rq, rt_se); in set_next_task_rt()
1769 static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq) in pick_next_rt_entity() argument
1771 struct rt_prio_array *array = &rt_rq->active; in pick_next_rt_entity()
1790 struct rt_rq *rt_rq = &rq->rt; in _pick_next_task_rt() local
1793 rt_se = pick_next_rt_entity(rt_rq); in _pick_next_task_rt()
1796 rt_rq = group_rt_rq(rt_se); in _pick_next_task_rt()
1797 } while (rt_rq); in _pick_next_task_rt()
1827 struct rt_rq *rt_rq = &rq->rt; in put_prev_task_rt() local
1830 update_stats_wait_start_rt(rt_rq, rt_se); in put_prev_task_rt()
2856 struct rt_rq *rt_rq = tg->rt_rq[i]; in tg_set_rt_bandwidth() local
2858 raw_spin_lock(&rt_rq->rt_runtime_lock); in tg_set_rt_bandwidth()
2859 rt_rq->rt_runtime = rt_runtime; in tg_set_rt_bandwidth()
2860 raw_spin_unlock(&rt_rq->rt_runtime_lock); in tg_set_rt_bandwidth()
2949 struct rt_rq *rt_rq = &cpu_rq(i)->rt; in sched_rt_global_constraints() local
2951 raw_spin_lock(&rt_rq->rt_runtime_lock); in sched_rt_global_constraints()
2952 rt_rq->rt_runtime = global_rt_runtime(); in sched_rt_global_constraints()
2953 raw_spin_unlock(&rt_rq->rt_runtime_lock); in sched_rt_global_constraints()
3053 struct rt_rq *rt_rq; in print_rt_stats() local
3056 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) in print_rt_stats()
3057 print_rt_rq(m, cpu, rt_rq); in print_rt_stats()