Lines Matching refs:rt_rq

65 void init_rt_rq(struct rt_rq *rt_rq)  in init_rt_rq()  argument
70 array = &rt_rq->active; in init_rt_rq()
79 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in init_rt_rq()
80 rt_rq->highest_prio.next = MAX_RT_PRIO-1; in init_rt_rq()
81 rt_rq->overloaded = 0; in init_rt_rq()
82 plist_head_init(&rt_rq->pushable_tasks); in init_rt_rq()
85 rt_rq->rt_queued = 0; in init_rt_rq()
88 rt_rq->rt_time = 0; in init_rt_rq()
89 rt_rq->rt_throttled = 0; in init_rt_rq()
90 rt_rq->rt_runtime = 0; in init_rt_rq()
91 raw_spin_lock_init(&rt_rq->rt_runtime_lock); in init_rt_rq()
178 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() argument
180 return rt_rq->rq; in rq_of_rt_rq()
183 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) in rt_rq_of_se()
185 return rt_se->rt_rq; in rt_rq_of_se()
190 struct rt_rq *rt_rq = rt_se->rt_rq; in rq_of_rt_se() local
192 return rt_rq->rq; in rq_of_rt_se()
206 if (tg->rt_rq) in free_rt_sched_group()
207 kfree(tg->rt_rq[i]); in free_rt_sched_group()
212 kfree(tg->rt_rq); in free_rt_sched_group()
216 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, in init_tg_rt_entry() argument
222 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in init_tg_rt_entry()
223 rt_rq->rt_nr_boosted = 0; in init_tg_rt_entry()
224 rt_rq->rq = rq; in init_tg_rt_entry()
225 rt_rq->tg = tg; in init_tg_rt_entry()
227 tg->rt_rq[cpu] = rt_rq; in init_tg_rt_entry()
234 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
236 rt_se->rt_rq = parent->my_q; in init_tg_rt_entry()
238 rt_se->my_q = rt_rq; in init_tg_rt_entry()
245 struct rt_rq *rt_rq; in alloc_rt_sched_group() local
249 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); in alloc_rt_sched_group()
250 if (!tg->rt_rq) in alloc_rt_sched_group()
259 rt_rq = kzalloc_node(sizeof(struct rt_rq), in alloc_rt_sched_group()
261 if (!rt_rq) in alloc_rt_sched_group()
269 init_rt_rq(rt_rq); in alloc_rt_sched_group()
270 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; in alloc_rt_sched_group()
271 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); in alloc_rt_sched_group()
277 kfree(rt_rq); in alloc_rt_sched_group()
291 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() argument
293 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
303 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) in rt_rq_of_se()
436 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
437 static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
485 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) in sched_rt_runtime() argument
487 if (!rt_rq->tg) in sched_rt_runtime()
490 return rt_rq->rt_runtime; in sched_rt_runtime()
493 static inline u64 sched_rt_period(struct rt_rq *rt_rq) in sched_rt_period() argument
495 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); in sched_rt_period()
513 #define for_each_rt_rq(rt_rq, iter, rq) \ argument
516 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
521 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) in group_rt_rq()
529 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) in sched_rt_rq_enqueue() argument
531 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; in sched_rt_rq_enqueue()
532 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue()
537 rt_se = rt_rq->tg->rt_se[cpu]; in sched_rt_rq_enqueue()
539 if (rt_rq->rt_nr_running) { in sched_rt_rq_enqueue()
541 enqueue_top_rt_rq(rt_rq); in sched_rt_rq_enqueue()
545 if (rt_rq->highest_prio.curr < curr->prio) in sched_rt_rq_enqueue()
550 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) in sched_rt_rq_dequeue() argument
553 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); in sched_rt_rq_dequeue()
555 rt_se = rt_rq->tg->rt_se[cpu]; in sched_rt_rq_dequeue()
558 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running); in sched_rt_rq_dequeue()
560 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0); in sched_rt_rq_dequeue()
566 static inline int rt_rq_throttled(struct rt_rq *rt_rq) in rt_rq_throttled() argument
568 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; in rt_rq_throttled()
573 struct rt_rq *rt_rq = group_rt_rq(rt_se); in rt_se_boosted() local
576 if (rt_rq) in rt_se_boosted()
577 return !!rt_rq->rt_nr_boosted; in rt_se_boosted()
596 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) in sched_rt_period_rt_rq()
598 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; in sched_rt_period_rt_rq()
601 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) in sched_rt_bandwidth() argument
603 return &rt_rq->tg->rt_bandwidth; in sched_rt_bandwidth()
606 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) in sched_rt_bandwidth_account() argument
608 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in sched_rt_bandwidth_account()
611 rt_rq->rt_time < rt_b->rt_runtime); in sched_rt_bandwidth_account()
618 static void do_balance_runtime(struct rt_rq *rt_rq) in do_balance_runtime() argument
620 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in do_balance_runtime()
621 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; in do_balance_runtime()
630 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); in do_balance_runtime()
633 if (iter == rt_rq) in do_balance_runtime()
652 if (rt_rq->rt_runtime + diff > rt_period) in do_balance_runtime()
653 diff = rt_period - rt_rq->rt_runtime; in do_balance_runtime()
655 rt_rq->rt_runtime += diff; in do_balance_runtime()
656 if (rt_rq->rt_runtime == rt_period) { in do_balance_runtime()
674 struct rt_rq *rt_rq; in __disable_runtime() local
679 for_each_rt_rq(rt_rq, iter, rq) { in __disable_runtime()
680 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in __disable_runtime()
685 raw_spin_lock(&rt_rq->rt_runtime_lock); in __disable_runtime()
691 if (rt_rq->rt_runtime == RUNTIME_INF || in __disable_runtime()
692 rt_rq->rt_runtime == rt_b->rt_runtime) in __disable_runtime()
694 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __disable_runtime()
701 want = rt_b->rt_runtime - rt_rq->rt_runtime; in __disable_runtime()
707 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); in __disable_runtime()
713 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) in __disable_runtime()
731 raw_spin_lock(&rt_rq->rt_runtime_lock); in __disable_runtime()
742 rt_rq->rt_runtime = RUNTIME_INF; in __disable_runtime()
743 rt_rq->rt_throttled = 0; in __disable_runtime()
744 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __disable_runtime()
748 sched_rt_rq_enqueue(rt_rq); in __disable_runtime()
755 struct rt_rq *rt_rq; in __enable_runtime() local
763 for_each_rt_rq(rt_rq, iter, rq) { in __enable_runtime()
764 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in __enable_runtime()
767 raw_spin_lock(&rt_rq->rt_runtime_lock); in __enable_runtime()
768 rt_rq->rt_runtime = rt_b->rt_runtime; in __enable_runtime()
769 rt_rq->rt_time = 0; in __enable_runtime()
770 rt_rq->rt_throttled = 0; in __enable_runtime()
771 raw_spin_unlock(&rt_rq->rt_runtime_lock); in __enable_runtime()
776 static void balance_runtime(struct rt_rq *rt_rq) in balance_runtime() argument
781 if (rt_rq->rt_time > rt_rq->rt_runtime) { in balance_runtime()
782 raw_spin_unlock(&rt_rq->rt_runtime_lock); in balance_runtime()
783 do_balance_runtime(rt_rq); in balance_runtime()
784 raw_spin_lock(&rt_rq->rt_runtime_lock); in balance_runtime()
788 static inline void balance_runtime(struct rt_rq *rt_rq) {} in balance_runtime() argument
812 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); in do_sched_rt_period_timer() local
813 struct rq *rq = rq_of_rt_rq(rt_rq); in do_sched_rt_period_timer()
821 raw_spin_lock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
822 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) in do_sched_rt_period_timer()
823 rt_rq->rt_runtime = rt_b->rt_runtime; in do_sched_rt_period_timer()
824 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; in do_sched_rt_period_timer()
825 raw_spin_unlock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
832 if (rt_rq->rt_time) { in do_sched_rt_period_timer()
835 raw_spin_lock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
836 if (rt_rq->rt_throttled) in do_sched_rt_period_timer()
837 balance_runtime(rt_rq); in do_sched_rt_period_timer()
838 runtime = rt_rq->rt_runtime; in do_sched_rt_period_timer()
839 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); in do_sched_rt_period_timer()
840 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { in do_sched_rt_period_timer()
841 rt_rq->rt_throttled = 0; in do_sched_rt_period_timer()
851 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer()
854 if (rt_rq->rt_time || rt_rq->rt_nr_running) in do_sched_rt_period_timer()
856 raw_spin_unlock(&rt_rq->rt_runtime_lock); in do_sched_rt_period_timer()
857 } else if (rt_rq->rt_nr_running) { in do_sched_rt_period_timer()
859 if (!rt_rq_throttled(rt_rq)) in do_sched_rt_period_timer()
862 if (rt_rq->rt_throttled) in do_sched_rt_period_timer()
866 sched_rt_rq_enqueue(rt_rq); in do_sched_rt_period_timer()
876 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) in sched_rt_runtime_exceeded() argument
878 u64 runtime = sched_rt_runtime(rt_rq); in sched_rt_runtime_exceeded()
880 if (rt_rq->rt_throttled) in sched_rt_runtime_exceeded()
881 return rt_rq_throttled(rt_rq); in sched_rt_runtime_exceeded()
883 if (runtime >= sched_rt_period(rt_rq)) in sched_rt_runtime_exceeded()
886 balance_runtime(rt_rq); in sched_rt_runtime_exceeded()
887 runtime = sched_rt_runtime(rt_rq); in sched_rt_runtime_exceeded()
891 if (rt_rq->rt_time > runtime) { in sched_rt_runtime_exceeded()
892 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); in sched_rt_runtime_exceeded()
899 rt_rq->rt_throttled = 1; in sched_rt_runtime_exceeded()
907 rt_rq->rt_time = 0; in sched_rt_runtime_exceeded()
910 if (rt_rq_throttled(rt_rq)) { in sched_rt_runtime_exceeded()
911 sched_rt_rq_dequeue(rt_rq); in sched_rt_runtime_exceeded()
921 typedef struct rt_rq *rt_rq_iter_t;
923 #define for_each_rt_rq(rt_rq, iter, rq) \ argument
924 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
929 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) in group_rt_rq()
934 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) in sched_rt_rq_enqueue() argument
936 struct rq *rq = rq_of_rt_rq(rt_rq); in sched_rt_rq_enqueue()
938 if (!rt_rq->rt_nr_running) in sched_rt_rq_enqueue()
941 enqueue_top_rt_rq(rt_rq); in sched_rt_rq_enqueue()
945 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) in sched_rt_rq_dequeue() argument
947 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running); in sched_rt_rq_dequeue()
950 static inline int rt_rq_throttled(struct rt_rq *rt_rq) in rt_rq_throttled() argument
961 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) in sched_rt_period_rt_rq()
976 struct rt_rq *rt_rq = group_rt_rq(rt_se); in rt_se_prio() local
978 if (rt_rq) in rt_se_prio()
979 return rt_rq->highest_prio.curr; in rt_se_prio()
1008 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in update_curr_rt() local
1011 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { in update_curr_rt()
1012 raw_spin_lock(&rt_rq->rt_runtime_lock); in update_curr_rt()
1013 rt_rq->rt_time += delta_exec; in update_curr_rt()
1014 exceeded = sched_rt_runtime_exceeded(rt_rq); in update_curr_rt()
1017 raw_spin_unlock(&rt_rq->rt_runtime_lock); in update_curr_rt()
1019 do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq)); in update_curr_rt()
1026 dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count) in dequeue_top_rt_rq() argument
1028 struct rq *rq = rq_of_rt_rq(rt_rq); in dequeue_top_rt_rq()
1030 BUG_ON(&rq->rt != rt_rq); in dequeue_top_rt_rq()
1032 if (!rt_rq->rt_queued) in dequeue_top_rt_rq()
1038 rt_rq->rt_queued = 0; in dequeue_top_rt_rq()
1043 enqueue_top_rt_rq(struct rt_rq *rt_rq) in enqueue_top_rt_rq() argument
1045 struct rq *rq = rq_of_rt_rq(rt_rq); in enqueue_top_rt_rq()
1047 BUG_ON(&rq->rt != rt_rq); in enqueue_top_rt_rq()
1049 if (rt_rq->rt_queued) in enqueue_top_rt_rq()
1052 if (rt_rq_throttled(rt_rq)) in enqueue_top_rt_rq()
1055 if (rt_rq->rt_nr_running) { in enqueue_top_rt_rq()
1056 add_nr_running(rq, rt_rq->rt_nr_running); in enqueue_top_rt_rq()
1057 rt_rq->rt_queued = 1; in enqueue_top_rt_rq()
1067 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) in inc_rt_prio_smp() argument
1069 struct rq *rq = rq_of_rt_rq(rt_rq); in inc_rt_prio_smp()
1075 if (&rq->rt != rt_rq) in inc_rt_prio_smp()
1083 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) in dec_rt_prio_smp() argument
1085 struct rq *rq = rq_of_rt_rq(rt_rq); in dec_rt_prio_smp()
1091 if (&rq->rt != rt_rq) in dec_rt_prio_smp()
1094 if (rq->online && rt_rq->highest_prio.curr != prev_prio) in dec_rt_prio_smp()
1095 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); in dec_rt_prio_smp()
1101 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} in inc_rt_prio_smp() argument
1103 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} in dec_rt_prio_smp() argument
1109 inc_rt_prio(struct rt_rq *rt_rq, int prio) in inc_rt_prio() argument
1111 int prev_prio = rt_rq->highest_prio.curr; in inc_rt_prio()
1114 rt_rq->highest_prio.curr = prio; in inc_rt_prio()
1116 inc_rt_prio_smp(rt_rq, prio, prev_prio); in inc_rt_prio()
1120 dec_rt_prio(struct rt_rq *rt_rq, int prio) in dec_rt_prio() argument
1122 int prev_prio = rt_rq->highest_prio.curr; in dec_rt_prio()
1124 if (rt_rq->rt_nr_running) { in dec_rt_prio()
1133 struct rt_prio_array *array = &rt_rq->active; in dec_rt_prio()
1135 rt_rq->highest_prio.curr = in dec_rt_prio()
1140 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in dec_rt_prio()
1143 dec_rt_prio_smp(rt_rq, prio, prev_prio); in dec_rt_prio()
1148 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} in inc_rt_prio() argument
1149 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} in dec_rt_prio() argument
1156 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_group() argument
1159 rt_rq->rt_nr_boosted++; in inc_rt_group()
1161 if (rt_rq->tg) in inc_rt_group()
1162 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); in inc_rt_group()
1166 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_group() argument
1169 rt_rq->rt_nr_boosted--; in dec_rt_group()
1171 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); in dec_rt_group()
1177 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_group() argument
1182 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} in dec_rt_group() argument
1189 struct rt_rq *group_rq = group_rt_rq(rt_se); in rt_se_nr_running()
1200 struct rt_rq *group_rq = group_rt_rq(rt_se); in rt_se_rr_nr_running()
1212 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in inc_rt_tasks() argument
1217 rt_rq->rt_nr_running += rt_se_nr_running(rt_se); in inc_rt_tasks()
1218 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se); in inc_rt_tasks()
1220 inc_rt_prio(rt_rq, prio); in inc_rt_tasks()
1221 inc_rt_group(rt_se, rt_rq); in inc_rt_tasks()
1225 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) in dec_rt_tasks() argument
1228 WARN_ON(!rt_rq->rt_nr_running); in dec_rt_tasks()
1229 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); in dec_rt_tasks()
1230 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se); in dec_rt_tasks()
1232 dec_rt_prio(rt_rq, rt_se_prio(rt_se)); in dec_rt_tasks()
1233 dec_rt_group(rt_se, rt_rq); in dec_rt_tasks()
1272 update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) in update_stats_wait_start_rt() argument
1287 __update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats); in update_stats_wait_start_rt()
1291 update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) in update_stats_enqueue_sleeper_rt() argument
1306 __update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats); in update_stats_enqueue_sleeper_rt()
1310 update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, in update_stats_enqueue_rt() argument
1317 update_stats_enqueue_sleeper_rt(rt_rq, rt_se); in update_stats_enqueue_rt()
1321 update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) in update_stats_wait_end_rt() argument
1336 __update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats); in update_stats_wait_end_rt()
1340 update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, in update_stats_dequeue_rt() argument
1357 rq_clock(rq_of_rt_rq(rt_rq))); in update_stats_dequeue_rt()
1361 rq_clock(rq_of_rt_rq(rt_rq))); in update_stats_dequeue_rt()
1367 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in __enqueue_rt_entity() local
1368 struct rt_prio_array *array = &rt_rq->active; in __enqueue_rt_entity()
1369 struct rt_rq *group_rq = group_rt_rq(rt_se); in __enqueue_rt_entity()
1396 inc_rt_tasks(rt_se, rt_rq); in __enqueue_rt_entity()
1401 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); in __dequeue_rt_entity() local
1402 struct rt_prio_array *array = &rt_rq->active; in __dequeue_rt_entity()
1410 dec_rt_tasks(rt_se, rt_rq); in __dequeue_rt_entity()
1458 struct rt_rq *rt_rq = group_rt_rq(rt_se); in dequeue_rt_entity() local
1460 if (rt_rq && rt_rq->rt_nr_running) in dequeue_rt_entity()
1503 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) in requeue_rt_entity() argument
1506 struct rt_prio_array *array = &rt_rq->active; in requeue_rt_entity()
1519 struct rt_rq *rt_rq; in requeue_task_rt() local
1522 rt_rq = rt_rq_of_se(rt_se); in requeue_task_rt()
1523 requeue_rt_entity(rt_rq, rt_se, head); in requeue_task_rt()
1683 struct rt_rq *rt_rq = &rq->rt; in set_next_task_rt() local
1687 update_stats_wait_end_rt(rt_rq, rt_se); in set_next_task_rt()
1706 static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq) in pick_next_rt_entity() argument
1708 struct rt_prio_array *array = &rt_rq->active; in pick_next_rt_entity()
1727 struct rt_rq *rt_rq = &rq->rt; in _pick_next_task_rt() local
1730 rt_se = pick_next_rt_entity(rt_rq); in _pick_next_task_rt()
1733 rt_rq = group_rt_rq(rt_se); in _pick_next_task_rt()
1734 } while (rt_rq); in _pick_next_task_rt()
1754 struct rt_rq *rt_rq = &rq->rt; in put_prev_task_rt() local
1757 update_stats_wait_start_rt(rt_rq, rt_se); in put_prev_task_rt()
2618 struct rt_rq *rt_rq; in task_is_throttled_rt() local
2621 rt_rq = task_group(p)->rt_rq[cpu]; in task_is_throttled_rt()
2623 rt_rq = &cpu_rq(cpu)->rt; in task_is_throttled_rt()
2626 return rt_rq_throttled(rt_rq); in task_is_throttled_rt()
2809 struct rt_rq *rt_rq = tg->rt_rq[i]; in tg_set_rt_bandwidth() local
2811 raw_spin_lock(&rt_rq->rt_runtime_lock); in tg_set_rt_bandwidth()
2812 rt_rq->rt_runtime = rt_runtime; in tg_set_rt_bandwidth()
2813 raw_spin_unlock(&rt_rq->rt_runtime_lock); in tg_set_rt_bandwidth()
2987 struct rt_rq *rt_rq; in print_rt_stats() local
2990 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) in print_rt_stats()
2991 print_rt_rq(m, cpu, rt_rq); in print_rt_stats()