Lines Matching refs:group
180 static void group_init(struct psi_group *group) in group_init() argument
184 group->enabled = true; in group_init()
186 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); in group_init()
187 group->avg_last_update = sched_clock(); in group_init()
188 group->avg_next_update = group->avg_last_update + psi_period; in group_init()
189 INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); in group_init()
190 mutex_init(&group->avgs_lock); in group_init()
192 atomic_set(&group->poll_scheduled, 0); in group_init()
193 mutex_init(&group->trigger_lock); in group_init()
194 INIT_LIST_HEAD(&group->triggers); in group_init()
195 group->poll_min_period = U32_MAX; in group_init()
196 group->polling_next_update = ULLONG_MAX; in group_init()
197 init_waitqueue_head(&group->poll_wait); in group_init()
198 timer_setup(&group->poll_timer, poll_timer_fn, 0); in group_init()
199 rcu_assign_pointer(group->poll_task, NULL); in group_init()
241 static void get_recent_times(struct psi_group *group, int cpu, in get_recent_times() argument
245 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu); in get_recent_times()
297 if (current_work() == &group->avgs_work.work) { in get_recent_times()
332 static void collect_percpu_times(struct psi_group *group, in collect_percpu_times() argument
355 get_recent_times(group, cpu, aggregator, times, in collect_percpu_times()
380 group->total[aggregator][s] += in collect_percpu_times()
387 static u64 update_averages(struct psi_group *group, u64 now) in update_averages() argument
395 expires = group->avg_next_update; in update_averages()
407 period = now - (group->avg_last_update + (missed_periods * psi_period)); in update_averages()
408 group->avg_last_update = now; in update_averages()
413 sample = group->total[PSI_AVGS][s] - group->avg_total[s]; in update_averages()
433 group->avg_total[s] += sample; in update_averages()
434 calc_avgs(group->avg[s], missed_periods, sample, period); in update_averages()
443 struct psi_group *group; in psi_avgs_work() local
448 group = container_of(dwork, struct psi_group, avgs_work); in psi_avgs_work()
450 mutex_lock(&group->avgs_lock); in psi_avgs_work()
454 collect_percpu_times(group, PSI_AVGS, &changed_states); in psi_avgs_work()
462 if (now >= group->avg_next_update) in psi_avgs_work()
463 group->avg_next_update = update_averages(group, now); in psi_avgs_work()
467 group->avg_next_update - now) + 1); in psi_avgs_work()
470 mutex_unlock(&group->avgs_lock); in psi_avgs_work()
519 static void init_triggers(struct psi_group *group, u64 now) in init_triggers() argument
523 list_for_each_entry(t, &group->triggers, node) in init_triggers()
525 group->total[PSI_POLL][t->state], 0); in init_triggers()
526 memcpy(group->polling_total, group->total[PSI_POLL], in init_triggers()
527 sizeof(group->polling_total)); in init_triggers()
528 group->polling_next_update = now + group->poll_min_period; in init_triggers()
531 static u64 update_triggers(struct psi_group *group, u64 now) in update_triggers() argument
535 u64 *total = group->total[PSI_POLL]; in update_triggers()
541 list_for_each_entry(t, &group->triggers, node) { in update_triggers()
545 new_stall = group->polling_total[t->state] != total[t->state]; in update_triggers()
587 memcpy(group->polling_total, total, in update_triggers()
588 sizeof(group->polling_total)); in update_triggers()
590 return now + group->poll_min_period; in update_triggers()
594 static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay, in psi_schedule_poll_work() argument
603 if (atomic_xchg(&group->poll_scheduled, 1) && !force) in psi_schedule_poll_work()
608 task = rcu_dereference(group->poll_task); in psi_schedule_poll_work()
614 mod_timer(&group->poll_timer, jiffies + delay); in psi_schedule_poll_work()
616 atomic_set(&group->poll_scheduled, 0); in psi_schedule_poll_work()
621 static void psi_poll_work(struct psi_group *group) in psi_poll_work() argument
627 mutex_lock(&group->trigger_lock); in psi_poll_work()
631 if (now > group->polling_until) { in psi_poll_work()
641 atomic_set(&group->poll_scheduled, 0); in psi_poll_work()
668 collect_percpu_times(group, PSI_POLL, &changed_states); in psi_poll_work()
670 if (changed_states & group->poll_states) { in psi_poll_work()
672 if (now > group->polling_until) in psi_poll_work()
673 init_triggers(group, now); in psi_poll_work()
680 group->polling_until = now + in psi_poll_work()
681 group->poll_min_period * UPDATES_PER_WINDOW; in psi_poll_work()
684 if (now > group->polling_until) { in psi_poll_work()
685 group->polling_next_update = ULLONG_MAX; in psi_poll_work()
689 if (now >= group->polling_next_update) in psi_poll_work()
690 group->polling_next_update = update_triggers(group, now); in psi_poll_work()
692 psi_schedule_poll_work(group, in psi_poll_work()
693 nsecs_to_jiffies(group->polling_next_update - now) + 1, in psi_poll_work()
697 mutex_unlock(&group->trigger_lock); in psi_poll_work()
702 struct psi_group *group = (struct psi_group *)data; in psi_poll_worker() local
707 wait_event_interruptible(group->poll_wait, in psi_poll_worker()
708 atomic_cmpxchg(&group->poll_wakeup, 1, 0) || in psi_poll_worker()
713 psi_poll_work(group); in psi_poll_worker()
720 struct psi_group *group = from_timer(group, t, poll_timer); in poll_timer_fn() local
722 atomic_set(&group->poll_wakeup, 1); in poll_timer_fn()
723 wake_up_interruptible(&group->poll_wait); in poll_timer_fn()
755 static void psi_group_change(struct psi_group *group, int cpu, in psi_group_change() argument
764 groupc = per_cpu_ptr(group->pcpu, cpu); in psi_group_change()
815 if (!group->enabled) { in psi_group_change()
854 if (state_mask & group->poll_states) in psi_group_change()
855 psi_schedule_poll_work(group, 1, false); in psi_group_change()
857 if (wake_clock && !delayed_work_pending(&group->avgs_work)) in psi_group_change()
858 schedule_delayed_work(&group->avgs_work, PSI_FREQ); in psi_group_change()
888 struct psi_group *group; in psi_task_change() local
898 group = task_psi_group(task); in psi_task_change()
900 psi_group_change(group, cpu, clear, set, now, true); in psi_task_change()
901 } while ((group = group->parent)); in psi_task_change()
907 struct psi_group *group, *common = NULL; in psi_task_switch() local
918 group = task_psi_group(next); in psi_task_switch()
920 if (per_cpu_ptr(group->pcpu, cpu)->state_mask & in psi_task_switch()
922 common = group; in psi_task_switch()
926 psi_group_change(group, cpu, 0, TSK_ONCPU, now, true); in psi_task_switch()
927 } while ((group = group->parent)); in psi_task_switch()
960 group = task_psi_group(prev); in psi_task_switch()
962 if (group == common) in psi_task_switch()
964 psi_group_change(group, cpu, clear, set, now, wake_clock); in psi_task_switch()
965 } while ((group = group->parent)); in psi_task_switch()
975 for (; group; group = group->parent) in psi_task_switch()
976 psi_group_change(group, cpu, clear, set, now, wake_clock); in psi_task_switch()
985 struct psi_group *group; in psi_account_irqtime() local
994 group = task_psi_group(task); in psi_account_irqtime()
996 if (!group->enabled) in psi_account_irqtime()
999 groupc = per_cpu_ptr(group->pcpu, cpu); in psi_account_irqtime()
1008 if (group->poll_states & (1 << PSI_IRQ_FULL)) in psi_account_irqtime()
1009 psi_schedule_poll_work(group, 1, false); in psi_account_irqtime()
1010 } while ((group = group->parent)); in psi_account_irqtime()
1175 void psi_cgroup_restart(struct psi_group *group) in psi_cgroup_restart() argument
1194 if (!group->enabled) in psi_cgroup_restart()
1204 psi_group_change(group, cpu, 0, 0, now, true); in psi_cgroup_restart()
1210 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res) in psi_show() argument
1220 mutex_lock(&group->avgs_lock); in psi_show()
1222 collect_percpu_times(group, PSI_AVGS, NULL); in psi_show()
1223 if (now >= group->avg_next_update) in psi_show()
1224 group->avg_next_update = update_averages(group, now); in psi_show()
1225 mutex_unlock(&group->avgs_lock); in psi_show()
1237 if (!(group == &psi_system && res == PSI_CPU && full)) { in psi_show()
1239 avg[w] = group->avg[res * 2 + full][w]; in psi_show()
1240 total = div_u64(group->total[PSI_AVGS][res * 2 + full], in psi_show()
1255 struct psi_trigger *psi_trigger_create(struct psi_group *group, in psi_trigger_create() argument
1293 t->group = group; in psi_trigger_create()
1298 group->total[PSI_POLL][t->state], 0); in psi_trigger_create()
1305 mutex_lock(&group->trigger_lock); in psi_trigger_create()
1307 if (!rcu_access_pointer(group->poll_task)) { in psi_trigger_create()
1310 task = kthread_create(psi_poll_worker, group, "psimon"); in psi_trigger_create()
1313 mutex_unlock(&group->trigger_lock); in psi_trigger_create()
1316 atomic_set(&group->poll_wakeup, 0); in psi_trigger_create()
1318 rcu_assign_pointer(group->poll_task, task); in psi_trigger_create()
1321 list_add(&t->node, &group->triggers); in psi_trigger_create()
1322 group->poll_min_period = min(group->poll_min_period, in psi_trigger_create()
1324 group->nr_triggers[t->state]++; in psi_trigger_create()
1325 group->poll_states |= (1 << t->state); in psi_trigger_create()
1327 mutex_unlock(&group->trigger_lock); in psi_trigger_create()
1334 struct psi_group *group; in psi_trigger_destroy() local
1344 group = t->group; in psi_trigger_destroy()
1352 mutex_lock(&group->trigger_lock); in psi_trigger_destroy()
1359 group->nr_triggers[t->state]--; in psi_trigger_destroy()
1360 if (!group->nr_triggers[t->state]) in psi_trigger_destroy()
1361 group->poll_states &= ~(1 << t->state); in psi_trigger_destroy()
1363 list_for_each_entry(tmp, &group->triggers, node) in psi_trigger_destroy()
1366 group->poll_min_period = period; in psi_trigger_destroy()
1368 if (group->poll_states == 0) { in psi_trigger_destroy()
1369 group->polling_until = 0; in psi_trigger_destroy()
1371 group->poll_task, in psi_trigger_destroy()
1372 lockdep_is_held(&group->trigger_lock)); in psi_trigger_destroy()
1373 rcu_assign_pointer(group->poll_task, NULL); in psi_trigger_destroy()
1374 del_timer(&group->poll_timer); in psi_trigger_destroy()
1378 mutex_unlock(&group->trigger_lock); in psi_trigger_destroy()
1396 atomic_set(&group->poll_scheduled, 0); in psi_trigger_destroy()