1 /******************************************************************************
2 * xc_pm.c - Libxc API for Xen Power Management (Px/Cx/Tx, etc.) statistic
3 *
4 * Copyright (c) 2008, Liu Jinsong <jinsong.liu@intel.com>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation;
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
18 *
19 */
20
21 #include <stdbool.h>
22 #include "xc_private.h"
23
24 #include <xen-tools/common-macros.h>
25
26 /*
27 * Get PM statistic info
28 */
xc_pm_get_max_px(xc_interface * xch,int cpuid,int * max_px)29 int xc_pm_get_max_px(xc_interface *xch, int cpuid, int *max_px)
30 {
31 struct xen_sysctl sysctl = {};
32 int ret;
33
34 sysctl.cmd = XEN_SYSCTL_get_pmstat;
35 sysctl.u.get_pmstat.type = PMSTAT_get_max_px;
36 sysctl.u.get_pmstat.cpuid = cpuid;
37 ret = xc_sysctl(xch, &sysctl);
38 if ( ret )
39 return ret;
40
41 *max_px = sysctl.u.get_pmstat.u.getpx.total;
42 return ret;
43 }
44
xc_pm_get_pxstat(xc_interface * xch,int cpuid,struct xc_px_stat * pxpt)45 int xc_pm_get_pxstat(xc_interface *xch, int cpuid, struct xc_px_stat *pxpt)
46 {
47 struct xen_sysctl sysctl = {};
48 /* Sizes unknown until xc_pm_get_max_px */
49 DECLARE_NAMED_HYPERCALL_BOUNCE(trans, pxpt->trans_pt, 0, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
50 DECLARE_NAMED_HYPERCALL_BOUNCE(pt, pxpt->pt, 0, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
51
52 int max_px, ret;
53
54 if ( !pxpt->trans_pt || !pxpt->pt )
55 {
56 errno = EINVAL;
57 return -1;
58 }
59 if ( (ret = xc_pm_get_max_px(xch, cpuid, &max_px)) != 0)
60 return ret;
61
62 HYPERCALL_BOUNCE_SET_SIZE(trans, max_px * max_px * sizeof(uint64_t));
63 HYPERCALL_BOUNCE_SET_SIZE(pt, max_px * sizeof(struct xc_px_val));
64
65 if ( xc_hypercall_bounce_pre(xch, trans) )
66 return ret;
67
68 if ( xc_hypercall_bounce_pre(xch, pt) )
69 {
70 xc_hypercall_bounce_post(xch, trans);
71 return ret;
72 }
73
74 sysctl.cmd = XEN_SYSCTL_get_pmstat;
75 sysctl.u.get_pmstat.type = PMSTAT_get_pxstat;
76 sysctl.u.get_pmstat.cpuid = cpuid;
77 sysctl.u.get_pmstat.u.getpx.total = max_px;
78 set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.trans_pt, trans);
79 set_xen_guest_handle(sysctl.u.get_pmstat.u.getpx.pt, pt);
80
81 ret = xc_sysctl(xch, &sysctl);
82 if ( ret )
83 {
84 xc_hypercall_bounce_post(xch, trans);
85 xc_hypercall_bounce_post(xch, pt);
86 return ret;
87 }
88
89 pxpt->total = sysctl.u.get_pmstat.u.getpx.total;
90 pxpt->usable = sysctl.u.get_pmstat.u.getpx.usable;
91 pxpt->last = sysctl.u.get_pmstat.u.getpx.last;
92 pxpt->cur = sysctl.u.get_pmstat.u.getpx.cur;
93
94 xc_hypercall_bounce_post(xch, trans);
95 xc_hypercall_bounce_post(xch, pt);
96
97 return ret;
98 }
99
xc_pm_reset_pxstat(xc_interface * xch,int cpuid)100 int xc_pm_reset_pxstat(xc_interface *xch, int cpuid)
101 {
102 struct xen_sysctl sysctl = {};
103
104 sysctl.cmd = XEN_SYSCTL_get_pmstat;
105 sysctl.u.get_pmstat.type = PMSTAT_reset_pxstat;
106 sysctl.u.get_pmstat.cpuid = cpuid;
107
108 return xc_sysctl(xch, &sysctl);
109 }
110
xc_pm_get_max_cx(xc_interface * xch,int cpuid,int * max_cx)111 int xc_pm_get_max_cx(xc_interface *xch, int cpuid, int *max_cx)
112 {
113 struct xen_sysctl sysctl = {};
114 int ret = 0;
115
116 sysctl.cmd = XEN_SYSCTL_get_pmstat;
117 sysctl.u.get_pmstat.type = PMSTAT_get_max_cx;
118 sysctl.u.get_pmstat.cpuid = cpuid;
119 if ( (ret = xc_sysctl(xch, &sysctl)) != 0 )
120 return ret;
121
122 *max_cx = sysctl.u.get_pmstat.u.getcx.nr;
123 return ret;
124 }
125
xc_pm_get_cxstat(xc_interface * xch,int cpuid,struct xc_cx_stat * cxpt)126 int xc_pm_get_cxstat(xc_interface *xch, int cpuid, struct xc_cx_stat *cxpt)
127 {
128 struct xen_sysctl sysctl = {};
129 DECLARE_NAMED_HYPERCALL_BOUNCE(triggers, cxpt->triggers,
130 cxpt->nr * sizeof(*cxpt->triggers),
131 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
132 DECLARE_NAMED_HYPERCALL_BOUNCE(residencies, cxpt->residencies,
133 cxpt->nr * sizeof(*cxpt->residencies),
134 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
135 DECLARE_NAMED_HYPERCALL_BOUNCE(pc, cxpt->pc,
136 cxpt->nr_pc * sizeof(*cxpt->pc),
137 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
138 DECLARE_NAMED_HYPERCALL_BOUNCE(cc, cxpt->cc,
139 cxpt->nr_cc * sizeof(*cxpt->cc),
140 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
141 int ret = -1;
142
143 if ( xc_hypercall_bounce_pre(xch, triggers) )
144 goto unlock_0;
145 if ( xc_hypercall_bounce_pre(xch, residencies) )
146 goto unlock_1;
147 if ( xc_hypercall_bounce_pre(xch, pc) )
148 goto unlock_2;
149 if ( xc_hypercall_bounce_pre(xch, cc) )
150 goto unlock_3;
151
152 sysctl.cmd = XEN_SYSCTL_get_pmstat;
153 sysctl.u.get_pmstat.type = PMSTAT_get_cxstat;
154 sysctl.u.get_pmstat.cpuid = cpuid;
155 sysctl.u.get_pmstat.u.getcx.nr = cxpt->nr;
156 sysctl.u.get_pmstat.u.getcx.nr_pc = cxpt->nr_pc;
157 sysctl.u.get_pmstat.u.getcx.nr_cc = cxpt->nr_cc;
158 set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.triggers, triggers);
159 set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.residencies, residencies);
160 set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.pc, pc);
161 set_xen_guest_handle(sysctl.u.get_pmstat.u.getcx.cc, cc);
162
163 if ( (ret = xc_sysctl(xch, &sysctl)) )
164 goto unlock_4;
165
166 cxpt->nr = sysctl.u.get_pmstat.u.getcx.nr;
167 cxpt->last = sysctl.u.get_pmstat.u.getcx.last;
168 cxpt->idle_time = sysctl.u.get_pmstat.u.getcx.idle_time;
169 cxpt->nr_pc = sysctl.u.get_pmstat.u.getcx.nr_pc;
170 cxpt->nr_cc = sysctl.u.get_pmstat.u.getcx.nr_cc;
171
172 unlock_4:
173 xc_hypercall_bounce_post(xch, cc);
174 unlock_3:
175 xc_hypercall_bounce_post(xch, pc);
176 unlock_2:
177 xc_hypercall_bounce_post(xch, residencies);
178 unlock_1:
179 xc_hypercall_bounce_post(xch, triggers);
180 unlock_0:
181 return ret;
182 }
183
xc_pm_reset_cxstat(xc_interface * xch,int cpuid)184 int xc_pm_reset_cxstat(xc_interface *xch, int cpuid)
185 {
186 struct xen_sysctl sysctl = {};
187
188 sysctl.cmd = XEN_SYSCTL_get_pmstat;
189 sysctl.u.get_pmstat.type = PMSTAT_reset_cxstat;
190 sysctl.u.get_pmstat.cpuid = cpuid;
191
192 return xc_sysctl(xch, &sysctl);
193 }
194
195
196 /*
197 * 1. Get PM parameter
198 * 2. Provide user PM control
199 */
xc_get_cpufreq_para(xc_interface * xch,int cpuid,struct xc_get_cpufreq_para * user_para)200 int xc_get_cpufreq_para(xc_interface *xch, int cpuid,
201 struct xc_get_cpufreq_para *user_para)
202 {
203 struct xen_sysctl sysctl = {};
204 int ret = 0;
205 struct xen_get_cpufreq_para *sys_para = &sysctl.u.pm_op.u.get_para;
206 DECLARE_NAMED_HYPERCALL_BOUNCE(affected_cpus,
207 user_para->affected_cpus,
208 user_para->cpu_num * sizeof(uint32_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
209 DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_frequencies,
210 user_para->scaling_available_frequencies,
211 user_para->freq_num * sizeof(uint32_t), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
212 DECLARE_NAMED_HYPERCALL_BOUNCE(scaling_available_governors,
213 user_para->scaling_available_governors,
214 user_para->gov_num * CPUFREQ_NAME_LEN * sizeof(char), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
215
216 bool has_num = user_para->cpu_num &&
217 user_para->freq_num &&
218 user_para->gov_num;
219
220 if ( has_num )
221 {
222 if ( (!user_para->affected_cpus) ||
223 (!user_para->scaling_available_frequencies) ||
224 (user_para->gov_num && !user_para->scaling_available_governors) )
225 {
226 errno = EINVAL;
227 return -1;
228 }
229 if ( xc_hypercall_bounce_pre(xch, affected_cpus) )
230 goto unlock_1;
231 if ( xc_hypercall_bounce_pre(xch, scaling_available_frequencies) )
232 goto unlock_2;
233 if ( user_para->gov_num &&
234 xc_hypercall_bounce_pre(xch, scaling_available_governors) )
235 goto unlock_3;
236
237 set_xen_guest_handle(sys_para->affected_cpus, affected_cpus);
238 set_xen_guest_handle(sys_para->scaling_available_frequencies, scaling_available_frequencies);
239 if ( user_para->gov_num )
240 set_xen_guest_handle(sys_para->scaling_available_governors,
241 scaling_available_governors);
242 }
243
244 sysctl.cmd = XEN_SYSCTL_pm_op;
245 sysctl.u.pm_op.cmd = GET_CPUFREQ_PARA;
246 sysctl.u.pm_op.cpuid = cpuid;
247 sys_para->cpu_num = user_para->cpu_num;
248 sys_para->freq_num = user_para->freq_num;
249 sys_para->gov_num = user_para->gov_num;
250
251 ret = xc_sysctl(xch, &sysctl);
252 if ( ret )
253 {
254 if ( errno == EAGAIN )
255 {
256 user_para->cpu_num = sys_para->cpu_num;
257 user_para->freq_num = sys_para->freq_num;
258 user_para->gov_num = sys_para->gov_num;
259 ret = -errno;
260 }
261
262 if ( has_num )
263 goto unlock_4;
264 goto unlock_1;
265 }
266 else
267 {
268 user_para->cpuinfo_cur_freq = sys_para->cpuinfo_cur_freq;
269 user_para->cpuinfo_max_freq = sys_para->cpuinfo_max_freq;
270 user_para->cpuinfo_min_freq = sys_para->cpuinfo_min_freq;
271 user_para->turbo_enabled = sys_para->turbo_enabled;
272
273 memcpy(user_para->scaling_driver,
274 sys_para->scaling_driver, CPUFREQ_NAME_LEN);
275
276 /*
277 * Copy to user_para no matter what cpufreq driver/governor.
278 *
279 * First sanity check layout of the union subject to memcpy() below.
280 */
281 BUILD_BUG_ON(sizeof(user_para->u) != sizeof(sys_para->u));
282
283 #define CHK_FIELD(fld) \
284 BUILD_BUG_ON(offsetof(typeof(user_para->u), fld) != \
285 offsetof(typeof(sys_para->u), fld))
286
287 CHK_FIELD(s.scaling_cur_freq);
288 CHK_FIELD(s.scaling_governor);
289 CHK_FIELD(s.scaling_max_freq);
290 CHK_FIELD(s.scaling_min_freq);
291 CHK_FIELD(s.u.userspace);
292 CHK_FIELD(s.u.ondemand);
293 CHK_FIELD(cppc_para);
294
295 #undef CHK_FIELD
296
297 memcpy(&user_para->u, &sys_para->u, sizeof(sys_para->u));
298 }
299
300 unlock_4:
301 if ( user_para->gov_num )
302 xc_hypercall_bounce_post(xch, scaling_available_governors);
303 unlock_3:
304 xc_hypercall_bounce_post(xch, scaling_available_frequencies);
305 unlock_2:
306 xc_hypercall_bounce_post(xch, affected_cpus);
307 unlock_1:
308 return ret;
309 }
310
xc_set_cpufreq_gov(xc_interface * xch,int cpuid,char * govname)311 int xc_set_cpufreq_gov(xc_interface *xch, int cpuid, char *govname)
312 {
313 struct xen_sysctl sysctl = {};
314 char *scaling_governor = sysctl.u.pm_op.u.set_gov.scaling_governor;
315
316 if ( !xch || !govname )
317 {
318 errno = EINVAL;
319 return -1;
320 }
321 sysctl.cmd = XEN_SYSCTL_pm_op;
322 sysctl.u.pm_op.cmd = SET_CPUFREQ_GOV;
323 sysctl.u.pm_op.cpuid = cpuid;
324 strncpy(scaling_governor, govname, CPUFREQ_NAME_LEN - 1);
325 scaling_governor[CPUFREQ_NAME_LEN - 1] = '\0';
326
327 return xc_sysctl(xch, &sysctl);
328 }
329
xc_set_cpufreq_para(xc_interface * xch,int cpuid,int ctrl_type,int ctrl_value)330 int xc_set_cpufreq_para(xc_interface *xch, int cpuid,
331 int ctrl_type, int ctrl_value)
332 {
333 struct xen_sysctl sysctl = {};
334
335 if ( !xch )
336 {
337 errno = EINVAL;
338 return -1;
339 }
340 sysctl.cmd = XEN_SYSCTL_pm_op;
341 sysctl.u.pm_op.cmd = SET_CPUFREQ_PARA;
342 sysctl.u.pm_op.cpuid = cpuid;
343 sysctl.u.pm_op.u.set_para.ctrl_type = ctrl_type;
344 sysctl.u.pm_op.u.set_para.ctrl_value = ctrl_value;
345
346 return xc_sysctl(xch, &sysctl);
347 }
348
xc_set_cpufreq_cppc(xc_interface * xch,int cpuid,xc_set_cppc_para_t * set_cppc)349 int xc_set_cpufreq_cppc(xc_interface *xch, int cpuid,
350 xc_set_cppc_para_t *set_cppc)
351 {
352 struct xen_sysctl sysctl = {};
353 int ret;
354
355 if ( !xch )
356 {
357 errno = EINVAL;
358 return -1;
359 }
360 sysctl.cmd = XEN_SYSCTL_pm_op;
361 sysctl.u.pm_op.cmd = SET_CPUFREQ_CPPC;
362 sysctl.u.pm_op.cpuid = cpuid;
363 sysctl.u.pm_op.u.set_cppc = *set_cppc;
364
365 ret = xc_sysctl(xch, &sysctl);
366
367 *set_cppc = sysctl.u.pm_op.u.set_cppc;
368
369 return ret;
370 }
371
xc_get_cpufreq_avgfreq(xc_interface * xch,int cpuid,int * avg_freq)372 int xc_get_cpufreq_avgfreq(xc_interface *xch, int cpuid, int *avg_freq)
373 {
374 int ret = 0;
375 struct xen_sysctl sysctl = {};
376
377 if ( !xch || !avg_freq )
378 {
379 errno = EINVAL;
380 return -1;
381 }
382 sysctl.cmd = XEN_SYSCTL_pm_op;
383 sysctl.u.pm_op.cmd = GET_CPUFREQ_AVGFREQ;
384 sysctl.u.pm_op.cpuid = cpuid;
385 ret = xc_sysctl(xch, &sysctl);
386
387 *avg_freq = sysctl.u.pm_op.u.get_avgfreq;
388
389 return ret;
390 }
391
392 /* value: 0 - disable sched_smt_power_savings
393 1 - enable sched_smt_power_savings
394 */
xc_set_sched_opt_smt(xc_interface * xch,uint32_t value)395 int xc_set_sched_opt_smt(xc_interface *xch, uint32_t value)
396 {
397 int rc;
398 struct xen_sysctl sysctl = {};
399
400 sysctl.cmd = XEN_SYSCTL_pm_op;
401 sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_set_sched_opt_smt;
402 sysctl.u.pm_op.cpuid = 0;
403 sysctl.u.pm_op.u.set_sched_opt_smt = value;
404 rc = do_sysctl(xch, &sysctl);
405
406 return rc;
407 }
408
get_max_cstate(xc_interface * xch,uint32_t * value,uint32_t type)409 static int get_max_cstate(xc_interface *xch, uint32_t *value, uint32_t type)
410 {
411 int rc;
412 struct xen_sysctl sysctl = {};
413
414 if ( !xch || !value )
415 {
416 errno = EINVAL;
417 return -1;
418 }
419 sysctl.cmd = XEN_SYSCTL_pm_op;
420 sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_get_max_cstate;
421 sysctl.u.pm_op.cpuid = type;
422 sysctl.u.pm_op.u.get_max_cstate = 0;
423 rc = do_sysctl(xch, &sysctl);
424 *value = sysctl.u.pm_op.u.get_max_cstate;
425
426 return rc;
427 }
428
xc_get_cpuidle_max_cstate(xc_interface * xch,uint32_t * value)429 int xc_get_cpuidle_max_cstate(xc_interface *xch, uint32_t *value)
430 {
431 return get_max_cstate(xch, value, 0);
432 }
433
xc_get_cpuidle_max_csubstate(xc_interface * xch,uint32_t * value)434 int xc_get_cpuidle_max_csubstate(xc_interface *xch, uint32_t *value)
435 {
436 return get_max_cstate(xch, value, 1);
437 }
438
set_max_cstate(xc_interface * xch,uint32_t value,uint32_t type)439 static int set_max_cstate(xc_interface *xch, uint32_t value, uint32_t type)
440 {
441 struct xen_sysctl sysctl = {};
442
443 if ( !xch )
444 {
445 errno = EINVAL;
446 return -1;
447 }
448 sysctl.cmd = XEN_SYSCTL_pm_op;
449 sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_set_max_cstate;
450 sysctl.u.pm_op.cpuid = type;
451 sysctl.u.pm_op.u.set_max_cstate = value;
452
453 return do_sysctl(xch, &sysctl);
454 }
455
xc_set_cpuidle_max_cstate(xc_interface * xch,uint32_t value)456 int xc_set_cpuidle_max_cstate(xc_interface *xch, uint32_t value)
457 {
458 return set_max_cstate(xch, value, 0);
459 }
460
xc_set_cpuidle_max_csubstate(xc_interface * xch,uint32_t value)461 int xc_set_cpuidle_max_csubstate(xc_interface *xch, uint32_t value)
462 {
463 return set_max_cstate(xch, value, 1);
464 }
465
xc_enable_turbo(xc_interface * xch,int cpuid)466 int xc_enable_turbo(xc_interface *xch, int cpuid)
467 {
468 struct xen_sysctl sysctl = {};
469
470 if ( !xch )
471 {
472 errno = EINVAL;
473 return -1;
474 }
475 sysctl.cmd = XEN_SYSCTL_pm_op;
476 sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_enable_turbo;
477 sysctl.u.pm_op.cpuid = cpuid;
478 return do_sysctl(xch, &sysctl);
479 }
480
xc_disable_turbo(xc_interface * xch,int cpuid)481 int xc_disable_turbo(xc_interface *xch, int cpuid)
482 {
483 struct xen_sysctl sysctl = {};
484
485 if ( !xch )
486 {
487 errno = EINVAL;
488 return -1;
489 }
490 sysctl.cmd = XEN_SYSCTL_pm_op;
491 sysctl.u.pm_op.cmd = XEN_SYSCTL_pm_op_disable_turbo;
492 sysctl.u.pm_op.cpuid = cpuid;
493 return do_sysctl(xch, &sysctl);
494 }
495