Lines Matching refs:rps
150 static u8 wait_for_freq(struct intel_rps *rps, u8 freq, int timeout_ms) in wait_for_freq() argument
165 act = read_cagf(rps); in wait_for_freq()
187 static u8 rps_set_check(struct intel_rps *rps, u8 freq) in rps_set_check() argument
189 mutex_lock(&rps->lock); in rps_set_check()
190 GEM_BUG_ON(!intel_rps_is_active(rps)); in rps_set_check()
191 if (wait_for(!intel_rps_set(rps, freq), 50)) { in rps_set_check()
192 mutex_unlock(&rps->lock); in rps_set_check()
195 GEM_BUG_ON(rps->last_freq != freq); in rps_set_check()
196 mutex_unlock(&rps->lock); in rps_set_check()
198 return wait_for_freq(rps, freq, 50); in rps_set_check()
201 static void show_pstate_limits(struct intel_rps *rps) in show_pstate_limits() argument
203 struct drm_i915_private *i915 = rps_to_i915(rps); in show_pstate_limits()
208 intel_uncore_read(rps_to_uncore(rps), in show_pstate_limits()
213 intel_uncore_read(rps_to_uncore(rps), in show_pstate_limits()
221 struct intel_rps *rps = >->rps; in live_rps_clock_interval() local
228 if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6) in live_rps_clock_interval()
235 saved_work = rps->work.func; in live_rps_clock_interval()
236 rps->work.func = dummy_rps_work; in live_rps_clock_interval()
239 intel_rps_disable(>->rps); in live_rps_clock_interval()
356 intel_rps_enable(>->rps); in live_rps_clock_interval()
362 rps->work.func = saved_work; in live_rps_clock_interval()
373 struct intel_rps *rps = >->rps; in live_rps_control() local
387 if (!intel_rps_is_enabled(rps)) in live_rps_control()
397 saved_work = rps->work.func; in live_rps_control()
398 rps->work.func = dummy_rps_work; in live_rps_control()
432 if (rps_set_check(rps, rps->min_freq) != rps->min_freq) { in live_rps_control()
434 engine->name, rps->min_freq, read_cagf(rps)); in live_rps_control()
437 show_pstate_limits(rps); in live_rps_control()
442 for (f = rps->min_freq + 1; f < rps->max_freq; f++) { in live_rps_control()
443 if (rps_set_check(rps, f) < f) in live_rps_control()
447 limit = rps_set_check(rps, f); in live_rps_control()
449 if (rps_set_check(rps, rps->min_freq) != rps->min_freq) { in live_rps_control()
451 engine->name, rps->min_freq, read_cagf(rps)); in live_rps_control()
454 show_pstate_limits(rps); in live_rps_control()
460 max = rps_set_check(rps, limit); in live_rps_control()
464 min = rps_set_check(rps, rps->min_freq); in live_rps_control()
472 rps->min_freq, intel_gpu_freq(rps, rps->min_freq), in live_rps_control()
473 rps->max_freq, intel_gpu_freq(rps, rps->max_freq), in live_rps_control()
474 limit, intel_gpu_freq(rps, limit), in live_rps_control()
477 if (limit == rps->min_freq) { in live_rps_control()
480 show_pstate_limits(rps); in live_rps_control()
495 rps->work.func = saved_work; in live_rps_control()
500 static void show_pcu_config(struct intel_rps *rps) in show_pcu_config() argument
502 struct drm_i915_private *i915 = rps_to_i915(rps); in show_pcu_config()
510 min_gpu_freq = rps->min_freq; in show_pcu_config()
511 max_gpu_freq = rps->max_freq; in show_pcu_config()
518 wakeref = intel_runtime_pm_get(rps_to_uncore(rps)->rpm); in show_pcu_config()
524 snb_pcode_read(rps_to_gt(rps)->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE, in show_pcu_config()
533 intel_runtime_pm_put(rps_to_uncore(rps)->rpm, wakeref); in show_pcu_config()
549 static u64 measure_frequency_at(struct intel_rps *rps, u32 *cntr, int *freq) in measure_frequency_at() argument
554 *freq = rps_set_check(rps, *freq); in measure_frequency_at()
557 *freq = (*freq + read_cagf(rps)) / 2; in measure_frequency_at()
578 static u64 measure_cs_frequency_at(struct intel_rps *rps, in measure_cs_frequency_at() argument
585 *freq = rps_set_check(rps, *freq); in measure_cs_frequency_at()
588 *freq = (*freq + read_cagf(rps)) / 2; in measure_cs_frequency_at()
604 struct intel_rps *rps = >->rps; in live_rps_frequency_cs() local
616 if (!intel_rps_is_enabled(rps)) in live_rps_frequency_cs()
626 saved_work = rps->work.func; in live_rps_frequency_cs()
627 rps->work.func = dummy_rps_work; in live_rps_frequency_cs()
671 min.freq = rps->min_freq; in live_rps_frequency_cs()
672 min.count = measure_cs_frequency_at(rps, engine, &min.freq); in live_rps_frequency_cs()
674 max.freq = rps->max_freq; in live_rps_frequency_cs()
675 max.count = measure_cs_frequency_at(rps, engine, &max.freq); in live_rps_frequency_cs()
679 min.count, intel_gpu_freq(rps, min.freq), in live_rps_frequency_cs()
680 max.count, intel_gpu_freq(rps, max.freq), in live_rps_frequency_cs()
693 show_pcu_config(rps); in live_rps_frequency_cs()
695 for (f = min.freq + 1; f <= rps->max_freq; f++) { in live_rps_frequency_cs()
699 count = measure_cs_frequency_at(rps, engine, &act); in live_rps_frequency_cs()
705 act, intel_gpu_freq(rps, act), count, in live_rps_frequency_cs()
731 rps->work.func = saved_work; in live_rps_frequency_cs()
743 struct intel_rps *rps = >->rps; in live_rps_frequency_srm() local
755 if (!intel_rps_is_enabled(rps)) in live_rps_frequency_srm()
765 saved_work = rps->work.func; in live_rps_frequency_srm()
766 rps->work.func = dummy_rps_work; in live_rps_frequency_srm()
809 min.freq = rps->min_freq; in live_rps_frequency_srm()
810 min.count = measure_frequency_at(rps, cntr, &min.freq); in live_rps_frequency_srm()
812 max.freq = rps->max_freq; in live_rps_frequency_srm()
813 max.count = measure_frequency_at(rps, cntr, &max.freq); in live_rps_frequency_srm()
817 min.count, intel_gpu_freq(rps, min.freq), in live_rps_frequency_srm()
818 max.count, intel_gpu_freq(rps, max.freq), in live_rps_frequency_srm()
831 show_pcu_config(rps); in live_rps_frequency_srm()
833 for (f = min.freq + 1; f <= rps->max_freq; f++) { in live_rps_frequency_srm()
837 count = measure_frequency_at(rps, cntr, &act); in live_rps_frequency_srm()
843 act, intel_gpu_freq(rps, act), count, in live_rps_frequency_srm()
869 rps->work.func = saved_work; in live_rps_frequency_srm()
877 static void sleep_for_ei(struct intel_rps *rps, int timeout_us) in sleep_for_ei() argument
883 rps_disable_interrupts(rps); in sleep_for_ei()
884 GEM_BUG_ON(rps->pm_iir); in sleep_for_ei()
885 rps_enable_interrupts(rps); in sleep_for_ei()
891 static int __rps_up_interrupt(struct intel_rps *rps, in __rps_up_interrupt() argument
902 rps_set_check(rps, rps->min_freq); in __rps_up_interrupt()
919 if (!intel_rps_is_active(rps)) { in __rps_up_interrupt()
927 if (!(rps->pm_events & GEN6_PM_RP_UP_THRESHOLD)) { in __rps_up_interrupt()
934 if (rps->last_freq != rps->min_freq) { in __rps_up_interrupt()
945 sleep_for_ei(rps, timeout); in __rps_up_interrupt()
951 if (rps->cur_freq != rps->min_freq) { in __rps_up_interrupt()
953 engine->name, intel_rps_read_actual_frequency(rps)); in __rps_up_interrupt()
957 if (!(rps->pm_iir & GEN6_PM_RP_UP_THRESHOLD)) { in __rps_up_interrupt()
959 engine->name, rps->pm_iir, in __rps_up_interrupt()
969 static int __rps_down_interrupt(struct intel_rps *rps, in __rps_down_interrupt() argument
975 rps_set_check(rps, rps->max_freq); in __rps_down_interrupt()
977 if (!(rps->pm_events & GEN6_PM_RP_DOWN_THRESHOLD)) { in __rps_down_interrupt()
983 if (rps->last_freq != rps->max_freq) { in __rps_down_interrupt()
993 sleep_for_ei(rps, timeout); in __rps_down_interrupt()
995 if (rps->cur_freq != rps->max_freq) { in __rps_down_interrupt()
998 intel_rps_read_actual_frequency(rps)); in __rps_down_interrupt()
1002 if (!(rps->pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT))) { in __rps_down_interrupt()
1004 engine->name, rps->pm_iir, in __rps_down_interrupt()
1020 struct intel_rps *rps = >->rps; in live_rps_interrupt() local
1032 if (!intel_rps_has_interrupts(rps) || GRAPHICS_VER(gt->i915) < 6) in live_rps_interrupt()
1036 pm_events = rps->pm_events; in live_rps_interrupt()
1047 saved_work = rps->work.func; in live_rps_interrupt()
1048 rps->work.func = dummy_rps_work; in live_rps_interrupt()
1054 GEM_BUG_ON(intel_rps_is_active(rps)); in live_rps_interrupt()
1058 err = __rps_up_interrupt(rps, engine, &spin); in live_rps_interrupt()
1072 err = __rps_down_interrupt(rps, engine); in live_rps_interrupt()
1088 rps->work.func = saved_work; in live_rps_interrupt()
1106 static u64 measure_power(struct intel_rps *rps, int *freq) in measure_power() argument
1114 *freq = (*freq + intel_rps_read_actual_frequency(rps)) / 2; in measure_power()
1121 static u64 measure_power_at(struct intel_rps *rps, int *freq) in measure_power_at() argument
1123 *freq = rps_set_check(rps, *freq); in measure_power_at()
1124 return measure_power(rps, freq); in measure_power_at()
1130 struct intel_rps *rps = >->rps; in live_rps_power() local
1143 if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6) in live_rps_power()
1153 saved_work = rps->work.func; in live_rps_power()
1154 rps->work.func = dummy_rps_work; in live_rps_power()
1189 max.freq = rps->max_freq; in live_rps_power()
1190 max.power = measure_power_at(rps, &max.freq); in live_rps_power()
1192 min.freq = rps->min_freq; in live_rps_power()
1193 min.power = measure_power_at(rps, &min.freq); in live_rps_power()
1200 min.power, intel_gpu_freq(rps, min.freq), in live_rps_power()
1201 max.power, intel_gpu_freq(rps, max.freq)); in live_rps_power()
1205 min.freq, intel_gpu_freq(rps, min.freq), in live_rps_power()
1206 max.freq, intel_gpu_freq(rps, max.freq)); in live_rps_power()
1226 rps->work.func = saved_work; in live_rps_power()
1234 struct intel_rps *rps = >->rps; in live_rps_dynamic() local
1247 if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6) in live_rps_dynamic()
1253 if (intel_rps_has_interrupts(rps)) in live_rps_dynamic()
1255 if (intel_rps_uses_timer(rps)) in live_rps_dynamic()
1269 GEM_BUG_ON(intel_rps_is_active(rps)); in live_rps_dynamic()
1270 rps->cur_freq = rps->min_freq; in live_rps_dynamic()
1274 GEM_BUG_ON(rps->last_freq != rps->min_freq); in live_rps_dynamic()
1287 max.freq = wait_for_freq(rps, rps->max_freq, 500); in live_rps_dynamic()
1293 min.freq = wait_for_freq(rps, rps->min_freq, 2000); in live_rps_dynamic()
1298 max.freq, intel_gpu_freq(rps, max.freq), in live_rps_dynamic()
1300 min.freq, intel_gpu_freq(rps, min.freq), in live_rps_dynamic()