1 /******************************************************************************
2 * xc_pm.c - Libxc API for Xen Power Management (Px/Cx/Tx, etc.) statistic
3 *
4 * Copyright (c) 2008, Liu Jinsong <jinsong.liu@intel.com>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation;
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
18 *
19 */
20
21 #include <stdbool.h>
22 #include "xc_private.h"
23
24 #include <xen-tools/common-macros.h>
25
26 /*
27 * Get PM statistic info
28 */
xc_pm_get_max_px(xc_interface * xch,int cpuid,int * max_px)29 int xc_pm_get_max_px(xc_interface *xch, int cpuid, int *max_px)
30 {
31 struct xen_sysctl sysctl = {};
32 int ret;
33
34 sysctl.cmd = XEN_SYSCTL_get_pmstat;
35 sysctl.u.get_pmstat.type = PMSTAT_get_max_px;
36 sysctl.u.get_pmstat.cpuid = cpuid;
37 ret = xc_sysctl(xch, &sysctl);
38 if ( ret )
39 return ret;
40
41 *max_px = sysctl.u.get_pmstat.u.getpx.total;
42 return ret;
43 }
44
xc_pm_get_pxstat(xc_interface * xch,int cpuid,struct xc_px_stat * pxpt)45 int xc_pm_get_pxstat(xc_interface *xch, int cpuid, struct xc_px_stat *pxpt)
46 {
47 struct xen_sysctl sysctl = {};
48 DECLARE_NAMED_HYPERCALL_BOUNCE(trans, pxpt->trans_pt,
49 pxpt->total * pxpt->total * sizeof(uint64_t),
50 XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
51 DECLARE_NAMED_HYPERCALL_BOUNCE(pt, pxpt->pt,
52 pxpt->total * sizeof(struct xc_px_val),
53 XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
54
55 int ret;
56
57 if ( !pxpt->trans_pt || !pxpt->pt )
58 {
59 errno = EINVAL;
60 return -1;
61 }
62
63 if ( xc_hypercall_bounce_pre(xch, trans) )
64 return -1;
65
66 if ( xc_hypercall_bounce_pre(xch, pt) )
67 {
68 xc_hypercall_bounce_post(xch, trans);
69 return -1;
70 }
71
72 sysctl.cmd = XEN_SYSCTL_get_pmstat;
73 sysctl.u.get_pmstat.type = PMSTAT_get_pxstat;
74 sysctl.u.get_pmstat.cpuid = cpuid;
75 sysctl.u.get_pmstat.u.getpx.total = pxpt->total;
76 set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.trans_pt, trans);
77 set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.pt, pt);
78
79 ret = xc_sysctl(xch, &sysctl);
80 if ( ret )
81 {
82 xc_hypercall_bounce_post(xch, trans);
83 xc_hypercall_bounce_post(xch, pt);
84 return ret;
85 }
86
87 pxpt->total = sysctl.u.get_pmstat.u.getpx.total;
88 pxpt->usable = sysctl.u.get_pmstat.u.getpx.usable;
89 pxpt->last = sysctl.u.get_pmstat.u.getpx.last;
90 pxpt->cur = sysctl.u.get_pmstat.u.getpx.cur;
91
92 xc_hypercall_bounce_post(xch, trans);
93 xc_hypercall_bounce_post(xch, pt);
94
95 return ret;
96 }
97
xc_pm_reset_pxstat(xc_interface * xch,int cpuid)98 int xc_pm_reset_pxstat(xc_interface *xch, int cpuid)
99 {
100 struct xen_sysctl sysctl = {};
101
102 sysctl.cmd = XEN_SYSCTL_get_pmstat;
103 sysctl.u.get_pmstat.type = PMSTAT_reset_pxstat;
104 sysctl.u.get_pmstat.cpuid = cpuid;
105
106 return xc_sysctl(xch, &sysctl);
107 }
108
xc_pm_get_max_cx(xc_interface * xch,int cpuid,int * max_cx)109 int xc_pm_get_max_cx(xc_interface *xch, int cpuid, int *max_cx)
110 {
111 struct xen_sysctl sysctl = {};
112 int ret = 0;
113
114 sysctl.cmd = XEN_SYSCTL_get_pmstat;
115 sysctl.u.get_pmstat.type = PMSTAT_get_max_cx;
116 sysctl.u.get_pmstat.cpuid = cpuid;
117 if ( (ret = xc_sysctl(xch, &sysctl)) != 0 )
118 return ret;
119
120 *max_cx = sysctl.u.get_pmstat.u.getcx.nr;
121 return ret;
122 }
123
xc_pm_get_cxstat(xc_interface * xch,int cpuid,struct xc_cx_stat * cxpt)124 int xc_pm_get_cxstat(xc_interface *xch, int cpuid, struct xc_cx_stat *cxpt)
125 {
126 struct xen_sysctl sysctl = {};
127 DECLARE_NAMED_HYPERCALL_BOUNCE(triggers, cxpt->triggers,
128 cxpt->nr * sizeof(*cxpt->triggers),
129 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
130 DECLARE_NAMED_HYPERCALL_BOUNCE(residencies, cxpt->residencies,
131 cxpt->nr * sizeof(*cxpt->residencies),
132 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
133 DECLARE_NAMED_HYPERCALL_BOUNCE(pc, cxpt->pc,
134 cxpt->nr_pc * sizeof(*cxpt->pc),
135 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
136 DECLARE_NAMED_HYPERCALL_BOUNCE(cc, cxpt->cc,
137 cxpt->nr_cc * sizeof(*cxpt->cc),
138 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
139 int ret = -1;
140
141 if ( xc_hypercall_bounce_pre(xch, triggers) )
142 goto unlock_0;
143 if ( xc_hypercall_bounce_pre(xch, residencies) )
144 goto unlock_1;
145 if ( xc_hypercall_bounce_pre(xch, pc) )
146 goto unlock_2;
147 if ( xc_hypercall_bounce_pre(xch, cc) )
148 goto unlock_3;
149
150 sysctl.cmd = XEN_SYSCTL_get_pmstat;
151 sysctl.u.get_pmstat.type = PMSTAT_get_cxstat;
152 sysctl.u.get_pmstat.cpuid = cpuid;
153 sysctl.u.get_pmstat.u.getcx.nr = cxpt->nr;
154 sysctl.u.get_pmstat.u.getcx.nr_pc = cxpt->nr_pc;
155 sysctl.u.get_pmstat.u.getcx.nr_cc = cxpt->nr_cc;
156 set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.triggers, triggers);
157 set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.residencies, residencies);
158 set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.pc, pc);
159 set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.cc, cc);
160
161 if ( (ret = xc_sysctl(xch, &sysctl)) )
162 goto unlock_4;
163
164 cxpt->nr = sysctl.u.get_pmstat.u.getcx.nr;
165 cxpt->last = sysctl.u.get_pmstat.u.getcx.last;
166 cxpt->idle_time = sysctl.u.get_pmstat.u.getcx.idle_time;
167 cxpt->nr_pc = sysctl.u.get_pmstat.u.getcx.nr_pc;
168 cxpt->nr_cc = sysctl.u.get_pmstat.u.getcx.nr_cc;
169
170 unlock_4:
171 xc_hypercall_bounce_post(xch, cc);
172 unlock_3:
173 xc_hypercall_bounce_post(xch, pc);
174 unlock_2:
175 xc_hypercall_bounce_post(xch, residencies);
176 unlock_1:
177 xc_hypercall_bounce_post(xch, triggers);
178 unlock_0:
179 return ret;
180 }
181
xc_pm_reset_cxstat(xc_interface * xch,int cpuid)182 int xc_pm_reset_cxstat(xc_interface *xch, int cpuid)
183 {
184 struct xen_sysctl sysctl = {};
185
186 sysctl.cmd = XEN_SYSCTL_get_pmstat;
187 sysctl.u.get_pmstat.type = PMSTAT_reset_cxstat;
188 sysctl.u.get_pmstat.cpuid = cpuid;
189
190 return xc_sysctl(xch, &sysctl);
191 }
192
193
194 /*
195 * 1. Get PM parameter
196 * 2. Provide user PM control
197 */
xc_get_cpufreq_para(xc_interface * xch,int cpuid,struct xc_get_cpufreq_para * user_para)198 int xc_get_cpufreq_para(xc_interface *xch, int cpuid,
199 struct xc_get_cpufreq_para *user_para)
200 {
201 struct xen_sysctl sysctl = {};
202 int ret = 0;
203 struct xen_get_cpufreq_para *sys_para = &sysctl.u.pm_op.u.get_para;
204 DECLARE_NAMED_HYPERCALL_BOUNCE(affected_cpus,
205 user_para->affected_cpus,
206 user_para->cpu_num * sizeof(uint32_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
207 DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_frequencies,
208 user_para->scaling_available_frequencies,
209 user_para->freq_num * sizeof(uint32_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
210 DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_governors,
211 user_para->scaling_available_governors,
212 user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
213 bool has_num = user_para->cpu_num && user_para->freq_num;
214
215 if ( has_num )
216 {
217 if ( (!user_para->affected_cpus) ||
218 (!user_para->scaling_available_frequencies) ||
219 (user_para->gov_num && !user_para->scaling_available_governors) )
220 {
221 errno = EINVAL;
222 return -1;
223 }
224 ret = xc_hypercall_bounce_pre(xch, affected_cpus);
225 if ( ret )
226 return ret;
227 ret = xc_hypercall_bounce_pre(xch, scaling_available_frequencies);
228 if ( ret )
229 goto unlock_2;
230 if ( user_para->gov_num )
231 ret = xc_hypercall_bounce_pre(xch, scaling_available_governors);
232 if ( ret )
233 goto unlock_3;
234
235 set_xen_guest_handle(sys_para->affected_cpus, affected_cpus);
236 set_xen_guest_handle(sys_para->scaling_available_frequencies, scaling_available_frequencies);
237 if ( user_para->gov_num )
238 set_xen_guest_handle(sys_para->scaling_available_governors,
239 scaling_available_governors);
240 }
241
242 sysctl.cmd = XEN_SYSCTL_pm_op;
243 sysctl.u.pm_op.cmd = GET_CPUFREQ_PARA;
244 sysctl.u.pm_op.cpuid = cpuid;
245 sys_para->cpu_num = user_para->cpu_num;
246 sys_para->freq_num = user_para->freq_num;
247 sys_para->gov_num = user_para->gov_num;
248
249 ret = xc_sysctl(xch, &sysctl);
250 if ( ret )
251 {
252 if ( errno == EAGAIN )
253 {
254 user_para->cpu_num = sys_para->cpu_num;
255 user_para->freq_num = sys_para->freq_num;
256 user_para->gov_num = sys_para->gov_num;
257 }
258
259 if ( has_num )
260 goto unlock_4;
261 return ret;
262 }
263 else
264 {
265 user_para->cpuinfo_cur_freq = sys_para->cpuinfo_cur_freq;
266 user_para->cpuinfo_max_freq = sys_para->cpuinfo_max_freq;
267 user_para->cpuinfo_min_freq = sys_para->cpuinfo_min_freq;
268 user_para->turbo_enabled = sys_para->turbo_enabled;
269
270 memcpy(user_para->scaling_driver,
271 sys_para->scaling_driver, CPUFREQ_NAME_LEN);
272
273 /*
274 * Copy to user_para no matter what cpufreq driver/governor.
275 *
276 * First sanity check layout of the union subject to memcpy() below.
277 */
278 BUILD_BUG_ON(sizeof(user_para->u) != sizeof(sys_para->u));
279
280 #define CHK_FIELD(fld) \
281 BUILD_BUG_ON(offsetof(typeof(user_para->u), fld) != \
282 offsetof(typeof(sys_para->u), fld))
283
284 CHK_FIELD(s.scaling_cur_freq);
285 CHK_FIELD(s.scaling_governor);
286 CHK_FIELD(s.scaling_max_freq);
287 CHK_FIELD(s.scaling_min_freq);
288 CHK_FIELD(s.u.userspace);
289 CHK_FIELD(s.u.ondemand);
290 CHK_FIELD(cppc_para);
291
292 #undef CHK_FIELD
293
294 memcpy(&user_para->u, &sys_para->u, sizeof(sys_para->u));
295 }
296
297 unlock_4:
298 xc_hypercall_bounce_post(xch, scaling_available_governors);
299 unlock_3:
300 xc_hypercall_bounce_post(xch, scaling_available_frequencies);
301 unlock_2:
302 xc_hypercall_bounce_post(xch, affected_cpus);
303
304 return ret;
305 }
306
xc_set_cpufreq_gov(xc_interface * xch,int cpuid,char * govname)307 int xc_set_cpufreq_gov(xc_interface *xch, int cpuid, char *govname)
308 {
309 struct xen_sysctl sysctl = {};
310 char *scaling_governor = sysctl.u.pm_op.u.set_gov.scaling_governor;
311
312 if ( !xch || !govname )
313 {
314 errno = EINVAL;
315 return -1;
316 }
317 sysctl.cmd = XEN_SYSCTL_pm_op;
318 sysctl.u.pm_op.cmd = SET_CPUFREQ_GOV;
319 sysctl.u.pm_op.cpuid = cpuid;
320 strncpy(scaling_governor, govname, CPUFREQ_NAME_LEN - 1);
321 scaling_governor[CPUFREQ_NAME_LEN - 1] = '\0';
322
323 return xc_sysctl(xch, &sysctl);
324 }
325
xc_set_cpufreq_para(xc_interface * xch,int cpuid,int ctrl_type,int ctrl_value)326 int xc_set_cpufreq_para(xc_interface *xch, int cpuid,
327 int ctrl_type, int ctrl_value)
328 {
329 struct xen_sysctl sysctl = {};
330
331 if ( !xch )
332 {
333 errno = EINVAL;
334 return -1;
335 }
336 sysctl.cmd = XEN_SYSCTL_pm_op;
337 sysctl.u.pm_op.cmd = SET_CPUFREQ_PARA;
338 sysctl.u.pm_op.cpuid = cpuid;
339 sysctl.u.pm_op.u.set_para.ctrl_type = ctrl_type;
340 sysctl.u.pm_op.u.set_para.ctrl_value = ctrl_value;
341
342 return xc_sysctl(xch, &sysctl);
343 }
344
xc_set_cpufreq_cppc(xc_interface * xch,int cpuid,xc_set_cppc_para_t * set_cppc)345 int xc_set_cpufreq_cppc(xc_interface *xch, int cpuid,
346 xc_set_cppc_para_t *set_cppc)
347 {
348 struct xen_sysctl sysctl = {};
349 int ret;
350
351 if ( !xch )
352 {
353 errno = EINVAL;
354 return -1;
355 }
356 sysctl.cmd = XEN_SYSCTL_pm_op;
357 sysctl.u.pm_op.cmd = SET_CPUFREQ_CPPC;
358 sysctl.u.pm_op.cpuid = cpuid;
359 sysctl.u.pm_op.u.set_cppc = *set_cppc;
360
361 ret = xc_sysctl(xch, &sysctl);
362
363 *set_cppc = sysctl.u.pm_op.u.set_cppc;
364
365 return ret;
366 }
367
xc_get_cpufreq_avgfreq(xc_interface * xch,int cpuid,int * avg_freq)368 int xc_get_cpufreq_avgfreq(xc_interface *xch, int cpuid, int *avg_freq)
369 {
370 int ret = 0;
371 struct xen_sysctl sysctl = {};
372
373 if ( !xch || !avg_freq )
374 {
375 errno = EINVAL;
376 return -1;
377 }
378 sysctl.cmd = XEN_SYSCTL_pm_op;
379 sysctl.u.pm_op.cmd = GET_CPUFREQ_AVGFREQ;
380 sysctl.u.pm_op.cpuid = cpuid;
381 ret = xc_sysctl(xch, &sysctl);
382
383 *avg_freq = sysctl.u.pm_op.u.get_avgfreq;
384
385 return ret;
386 }
387
388 /* value: 0 - disable sched_smt_power_savings
389 1 - enable sched_smt_power_savings
390 */
xc_set_sched_opt_smt(xc_interface * xch,uint32_t value)391 int xc_set_sched_opt_smt(xc_interface *xch, uint32_t value)
392 {
393 int rc;
394 struct xen_sysctl sysctl = {};
395
396 sysctl.cmd = XEN_SYSCTL_pm_op;
397 sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_set_sched_opt_smt;
398 sysctl.u.pm_op.cpuid = 0;
399 sysctl.u.pm_op.u.set_sched_opt_smt = value;
400 rc = do_sysctl(xch, &sysctl);
401
402 return rc;
403 }
404
get_max_cstate(xc_interface * xch,uint32_t * value,uint32_t type)405 static int get_max_cstate(xc_interface *xch, uint32_t *value, uint32_t type)
406 {
407 int rc;
408 struct xen_sysctl sysctl = {};
409
410 if ( !xch || !value )
411 {
412 errno = EINVAL;
413 return -1;
414 }
415 sysctl.cmd = XEN_SYSCTL_pm_op;
416 sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_get_max_cstate;
417 sysctl.u.pm_op.cpuid = type;
418 sysctl.u.pm_op.u.get_max_cstate = 0;
419 rc = do_sysctl(xch, &sysctl);
420 *value = sysctl.u.pm_op.u.get_max_cstate;
421
422 return rc;
423 }
424
xc_get_cpuidle_max_cstate(xc_interface * xch,uint32_t * value)425 int xc_get_cpuidle_max_cstate(xc_interface *xch, uint32_t *value)
426 {
427 return get_max_cstate(xch, value, 0);
428 }
429
xc_get_cpuidle_max_csubstate(xc_interface * xch,uint32_t * value)430 int xc_get_cpuidle_max_csubstate(xc_interface *xch, uint32_t *value)
431 {
432 return get_max_cstate(xch, value, 1);
433 }
434
set_max_cstate(xc_interface * xch,uint32_t value,uint32_t type)435 static int set_max_cstate(xc_interface *xch, uint32_t value, uint32_t type)
436 {
437 struct xen_sysctl sysctl = {};
438
439 if ( !xch )
440 {
441 errno = EINVAL;
442 return -1;
443 }
444 sysctl.cmd = XEN_SYSCTL_pm_op;
445 sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_set_max_cstate;
446 sysctl.u.pm_op.cpuid = type;
447 sysctl.u.pm_op.u.set_max_cstate = value;
448
449 return do_sysctl(xch, &sysctl);
450 }
451
xc_set_cpuidle_max_cstate(xc_interface * xch,uint32_t value)452 int xc_set_cpuidle_max_cstate(xc_interface *xch, uint32_t value)
453 {
454 return set_max_cstate(xch, value, 0);
455 }
456
xc_set_cpuidle_max_csubstate(xc_interface * xch,uint32_t value)457 int xc_set_cpuidle_max_csubstate(xc_interface *xch, uint32_t value)
458 {
459 return set_max_cstate(xch, value, 1);
460 }
461
xc_enable_turbo(xc_interface * xch,int cpuid)462 int xc_enable_turbo(xc_interface *xch, int cpuid)
463 {
464 struct xen_sysctl sysctl = {};
465
466 if ( !xch )
467 {
468 errno = EINVAL;
469 return -1;
470 }
471 sysctl.cmd = XEN_SYSCTL_pm_op;
472 sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_enable_turbo;
473 sysctl.u.pm_op.cpuid = cpuid;
474 return do_sysctl(xch, &sysctl);
475 }
476
xc_disable_turbo(xc_interface * xch,int cpuid)477 int xc_disable_turbo(xc_interface *xch, int cpuid)
478 {
479 struct xen_sysctl sysctl = {};
480
481 if ( !xch )
482 {
483 errno = EINVAL;
484 return -1;
485 }
486 sysctl.cmd = XEN_SYSCTL_pm_op;
487 sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_disable_turbo;
488 sysctl.u.pm_op.cpuid = cpuid;
489 return do_sysctl(xch, &sysctl);
490 }
491