1 /******************************************************************************
2 * Arch-specific sysctl.c
3 *
4 * System management operations. For use by node control stack.
5 *
6 * Copyright (c) 2002-2006, K Fraser
7 */
8
9 #include <xen/types.h>
10 #include <xen/lib.h>
11 #include <xen/mm.h>
12 #include <xen/guest_access.h>
13 #include <xen/hypercall.h>
14 #include <public/sysctl.h>
15 #include <xen/sched.h>
16 #include <xen/event.h>
17 #include <xen/domain_page.h>
18 #include <asm/msr.h>
19 #include <xen/trace.h>
20 #include <xen/console.h>
21 #include <xen/iocap.h>
22 #include <asm/irq.h>
23 #include <asm/hvm/hvm.h>
24 #include <asm/hvm/support.h>
25 #include <asm/processor.h>
26 #include <asm/smp.h>
27 #include <asm/numa.h>
28 #include <xen/nodemask.h>
29 #include <xen/cpu.h>
30 #include <xsm/xsm.h>
31 #include <asm/psr.h>
32 #include <asm/cpuid.h>
33
34 struct l3_cache_info {
35 int ret;
36 unsigned long size;
37 };
38
l3_cache_get(void * arg)39 static void l3_cache_get(void *arg)
40 {
41 struct cpuid4_info info;
42 struct l3_cache_info *l3_info = arg;
43
44 l3_info->ret = cpuid4_cache_lookup(3, &info);
45 if ( !l3_info->ret )
46 l3_info->size = info.size / 1024; /* in KB unit */
47 }
48
cpu_up_helper(void * data)49 long cpu_up_helper(void *data)
50 {
51 int cpu = (unsigned long)data;
52 int ret = cpu_up(cpu);
53 if ( ret == -EBUSY )
54 {
55 /* On EBUSY, flush RCU work and have one more go. */
56 rcu_barrier();
57 ret = cpu_up(cpu);
58 }
59 return ret;
60 }
61
cpu_down_helper(void * data)62 long cpu_down_helper(void *data)
63 {
64 int cpu = (unsigned long)data;
65 int ret = cpu_down(cpu);
66 if ( ret == -EBUSY )
67 {
68 /* On EBUSY, flush RCU work and have one more go. */
69 rcu_barrier();
70 ret = cpu_down(cpu);
71 }
72 return ret;
73 }
74
arch_do_physinfo(struct xen_sysctl_physinfo * pi)75 void arch_do_physinfo(struct xen_sysctl_physinfo *pi)
76 {
77 memcpy(pi->hw_cap, boot_cpu_data.x86_capability,
78 min(sizeof(pi->hw_cap), sizeof(boot_cpu_data.x86_capability)));
79 if ( hvm_enabled )
80 pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm;
81 if ( iommu_enabled )
82 pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
83 }
84
arch_do_sysctl(struct xen_sysctl * sysctl,XEN_GUEST_HANDLE_PARAM (xen_sysctl_t)u_sysctl)85 long arch_do_sysctl(
86 struct xen_sysctl *sysctl, XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
87 {
88 long ret = 0;
89
90 switch ( sysctl->cmd )
91 {
92
93 case XEN_SYSCTL_cpu_hotplug:
94 {
95 unsigned int cpu = sysctl->u.cpu_hotplug.cpu;
96
97 switch ( sysctl->u.cpu_hotplug.op )
98 {
99 case XEN_SYSCTL_CPU_HOTPLUG_ONLINE:
100 ret = xsm_resource_plug_core(XSM_HOOK);
101 if ( ret )
102 break;
103 ret = continue_hypercall_on_cpu(
104 0, cpu_up_helper, (void *)(unsigned long)cpu);
105 break;
106 case XEN_SYSCTL_CPU_HOTPLUG_OFFLINE:
107 ret = xsm_resource_unplug_core(XSM_HOOK);
108 if ( ret )
109 break;
110 ret = continue_hypercall_on_cpu(
111 0, cpu_down_helper, (void *)(unsigned long)cpu);
112 break;
113 default:
114 ret = -EINVAL;
115 break;
116 }
117 }
118 break;
119
120 case XEN_SYSCTL_psr_cmt_op:
121 if ( !psr_cmt_enabled() )
122 return -ENODEV;
123
124 if ( sysctl->u.psr_cmt_op.flags != 0 )
125 return -EINVAL;
126
127 switch ( sysctl->u.psr_cmt_op.cmd )
128 {
129 case XEN_SYSCTL_PSR_CMT_enabled:
130 sysctl->u.psr_cmt_op.u.data =
131 (psr_cmt->features & PSR_RESOURCE_TYPE_L3) &&
132 (psr_cmt->l3.features & PSR_CMT_L3_OCCUPANCY);
133 break;
134 case XEN_SYSCTL_PSR_CMT_get_total_rmid:
135 sysctl->u.psr_cmt_op.u.data = psr_cmt->rmid_max;
136 break;
137 case XEN_SYSCTL_PSR_CMT_get_l3_upscaling_factor:
138 sysctl->u.psr_cmt_op.u.data = psr_cmt->l3.upscaling_factor;
139 break;
140 case XEN_SYSCTL_PSR_CMT_get_l3_cache_size:
141 {
142 struct l3_cache_info info;
143 unsigned int cpu = sysctl->u.psr_cmt_op.u.l3_cache.cpu;
144
145 if ( (cpu >= nr_cpu_ids) || !cpu_online(cpu) )
146 {
147 ret = -ENODEV;
148 sysctl->u.psr_cmt_op.u.data = 0;
149 break;
150 }
151 if ( cpu == smp_processor_id() )
152 l3_cache_get(&info);
153 else
154 on_selected_cpus(cpumask_of(cpu), l3_cache_get, &info, 1);
155
156 ret = info.ret;
157 sysctl->u.psr_cmt_op.u.data = (ret ? 0 : info.size);
158 break;
159 }
160 case XEN_SYSCTL_PSR_CMT_get_l3_event_mask:
161 sysctl->u.psr_cmt_op.u.data = psr_cmt->l3.features;
162 break;
163 default:
164 sysctl->u.psr_cmt_op.u.data = 0;
165 ret = -ENOSYS;
166 break;
167 }
168
169 if ( __copy_to_guest(u_sysctl, sysctl, 1) )
170 ret = -EFAULT;
171
172 break;
173
174 case XEN_SYSCTL_psr_cat_op:
175 switch ( sysctl->u.psr_cat_op.cmd )
176 {
177 uint32_t data[PSR_INFO_ARRAY_SIZE];
178
179 case XEN_SYSCTL_PSR_CAT_get_l3_info:
180 {
181 ret = psr_get_info(sysctl->u.psr_cat_op.target,
182 PSR_CBM_TYPE_L3, data, ARRAY_SIZE(data));
183 if ( ret )
184 break;
185
186 sysctl->u.psr_cat_op.u.cat_info.cos_max =
187 data[PSR_INFO_IDX_COS_MAX];
188 sysctl->u.psr_cat_op.u.cat_info.cbm_len =
189 data[PSR_INFO_IDX_CAT_CBM_LEN];
190 sysctl->u.psr_cat_op.u.cat_info.flags =
191 data[PSR_INFO_IDX_CAT_FLAG];
192
193 if ( __copy_field_to_guest(u_sysctl, sysctl, u.psr_cat_op) )
194 ret = -EFAULT;
195 break;
196 }
197
198 case XEN_SYSCTL_PSR_CAT_get_l2_info:
199 {
200 ret = psr_get_info(sysctl->u.psr_cat_op.target,
201 PSR_CBM_TYPE_L2, data, ARRAY_SIZE(data));
202 if ( ret )
203 break;
204
205 sysctl->u.psr_cat_op.u.cat_info.cos_max =
206 data[PSR_INFO_IDX_COS_MAX];
207 sysctl->u.psr_cat_op.u.cat_info.cbm_len =
208 data[PSR_INFO_IDX_CAT_CBM_LEN];
209 sysctl->u.psr_cat_op.u.cat_info.flags =
210 data[PSR_INFO_IDX_CAT_FLAG];
211
212 if ( __copy_field_to_guest(u_sysctl, sysctl, u.psr_cat_op) )
213 ret = -EFAULT;
214 break;
215 }
216
217 default:
218 ret = -EOPNOTSUPP;
219 break;
220 }
221 break;
222
223 case XEN_SYSCTL_get_cpu_levelling_caps:
224 sysctl->u.cpu_levelling_caps.caps = levelling_caps;
225 if ( __copy_field_to_guest(u_sysctl, sysctl, u.cpu_levelling_caps.caps) )
226 ret = -EFAULT;
227 break;
228
229 case XEN_SYSCTL_get_cpu_featureset:
230 {
231 static const struct cpuid_policy *const policy_table[] = {
232 [XEN_SYSCTL_cpu_featureset_raw] = &raw_cpuid_policy,
233 [XEN_SYSCTL_cpu_featureset_host] = &host_cpuid_policy,
234 [XEN_SYSCTL_cpu_featureset_pv] = &pv_max_cpuid_policy,
235 [XEN_SYSCTL_cpu_featureset_hvm] = &hvm_max_cpuid_policy,
236 };
237 const struct cpuid_policy *p = NULL;
238 uint32_t featureset[FSCAPINTS];
239 unsigned int nr;
240
241 /* Request for maximum number of features? */
242 if ( guest_handle_is_null(sysctl->u.cpu_featureset.features) )
243 {
244 sysctl->u.cpu_featureset.nr_features = FSCAPINTS;
245 if ( __copy_field_to_guest(u_sysctl, sysctl,
246 u.cpu_featureset.nr_features) )
247 ret = -EFAULT;
248 break;
249 }
250
251 /* Clip the number of entries. */
252 nr = min_t(unsigned int, sysctl->u.cpu_featureset.nr_features,
253 FSCAPINTS);
254
255 /* Look up requested featureset. */
256 if ( sysctl->u.cpu_featureset.index < ARRAY_SIZE(policy_table) )
257 p = policy_table[sysctl->u.cpu_featureset.index];
258
259 /* Bad featureset index? */
260 if ( !p )
261 ret = -EINVAL;
262 else
263 cpuid_policy_to_featureset(p, featureset);
264
265 /* Copy the requested featureset into place. */
266 if ( !ret && copy_to_guest(sysctl->u.cpu_featureset.features,
267 featureset, nr) )
268 ret = -EFAULT;
269
270 /* Inform the caller of how many features we wrote. */
271 sysctl->u.cpu_featureset.nr_features = nr;
272 if ( !ret && __copy_field_to_guest(u_sysctl, sysctl,
273 u.cpu_featureset.nr_features) )
274 ret = -EFAULT;
275
276 /* Inform the caller if there was more data to provide. */
277 if ( !ret && nr < FSCAPINTS )
278 ret = -ENOBUFS;
279
280 break;
281 }
282
283 default:
284 ret = -ENOSYS;
285 break;
286 }
287
288 return ret;
289 }
290
291 /*
292 * Local variables:
293 * mode: C
294 * c-file-style: "BSD"
295 * c-basic-offset: 4
296 * tab-width: 4
297 * indent-tabs-mode: nil
298 * End:
299 */
300