1 /*
2 * utility.c - misc functions for cpufreq driver and Px statistic
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
11 * Feb 2008 - Liu Jinsong <jinsong.liu@intel.com>
12 * 1. Merge cpufreq.c and freq_table.c of linux 2.6.23
13 * And poring to Xen hypervisor
14 * 2. some Px statistic interface funcdtions
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 *
20 */
21
22 #include <xen/errno.h>
23 #include <xen/cpumask.h>
24 #include <xen/types.h>
25 #include <xen/spinlock.h>
26 #include <xen/percpu.h>
27 #include <xen/types.h>
28 #include <xen/sched.h>
29 #include <xen/timer.h>
30 #include <xen/trace.h>
31 #include <acpi/cpufreq/cpufreq.h>
32 #include <public/sysctl.h>
33
34 struct cpufreq_driver *cpufreq_driver;
35 struct processor_pminfo *__read_mostly processor_pminfo[NR_CPUS];
36 DEFINE_PER_CPU_READ_MOSTLY(struct cpufreq_policy *, cpufreq_cpu_policy);
37
38 DEFINE_PER_CPU(spinlock_t, cpufreq_statistic_lock);
39
40 /*********************************************************************
41 * Px STATISTIC INFO *
42 *********************************************************************/
43
cpufreq_residency_update(unsigned int cpu,uint8_t state)44 void cpufreq_residency_update(unsigned int cpu, uint8_t state)
45 {
46 uint64_t now, total_idle_ns;
47 int64_t delta;
48 struct pm_px *pxpt = per_cpu(cpufreq_statistic_data, cpu);
49
50 total_idle_ns = get_cpu_idle_time(cpu);
51 now = NOW();
52
53 delta = (now - pxpt->prev_state_wall) -
54 (total_idle_ns - pxpt->prev_idle_wall);
55
56 if ( likely(delta >= 0) )
57 pxpt->u.pt[state].residency += delta;
58
59 pxpt->prev_state_wall = now;
60 pxpt->prev_idle_wall = total_idle_ns;
61 }
62
cpufreq_statistic_update(unsigned int cpu,uint8_t from,uint8_t to)63 void cpufreq_statistic_update(unsigned int cpu, uint8_t from, uint8_t to)
64 {
65 struct pm_px *pxpt;
66 struct processor_pminfo *pmpt = processor_pminfo[cpu];
67 spinlock_t *cpufreq_statistic_lock =
68 &per_cpu(cpufreq_statistic_lock, cpu);
69
70 spin_lock(cpufreq_statistic_lock);
71
72 pxpt = per_cpu(cpufreq_statistic_data, cpu);
73 if ( !pxpt || !pmpt ) {
74 spin_unlock(cpufreq_statistic_lock);
75 return;
76 }
77
78 pxpt->u.last = from;
79 pxpt->u.cur = to;
80 pxpt->u.pt[to].count++;
81
82 cpufreq_residency_update(cpu, from);
83
84 (*(pxpt->u.trans_pt + from * pmpt->perf.state_count + to))++;
85
86 spin_unlock(cpufreq_statistic_lock);
87 }
88
cpufreq_statistic_init(unsigned int cpuid)89 int cpufreq_statistic_init(unsigned int cpuid)
90 {
91 uint32_t i, count;
92 struct pm_px *pxpt;
93 const struct processor_pminfo *pmpt = processor_pminfo[cpuid];
94 spinlock_t *cpufreq_statistic_lock =
95 &per_cpu(cpufreq_statistic_lock, cpuid);
96
97 spin_lock_init(cpufreq_statistic_lock);
98
99 if ( !pmpt )
100 return -EINVAL;
101
102 spin_lock(cpufreq_statistic_lock);
103
104 pxpt = per_cpu(cpufreq_statistic_data, cpuid);
105 if ( pxpt ) {
106 spin_unlock(cpufreq_statistic_lock);
107 return 0;
108 }
109
110 count = pmpt->perf.state_count;
111
112 pxpt = xzalloc(struct pm_px);
113 if ( !pxpt ) {
114 spin_unlock(cpufreq_statistic_lock);
115 return -ENOMEM;
116 }
117 per_cpu(cpufreq_statistic_data, cpuid) = pxpt;
118
119 pxpt->u.trans_pt = xzalloc_array(uint64_t, count * count);
120 if (!pxpt->u.trans_pt) {
121 xfree(pxpt);
122 spin_unlock(cpufreq_statistic_lock);
123 return -ENOMEM;
124 }
125
126 pxpt->u.pt = xzalloc_array(struct pm_px_val, count);
127 if (!pxpt->u.pt) {
128 xfree(pxpt->u.trans_pt);
129 xfree(pxpt);
130 spin_unlock(cpufreq_statistic_lock);
131 return -ENOMEM;
132 }
133
134 pxpt->u.total = pmpt->perf.state_count;
135 pxpt->u.usable = pmpt->perf.state_count - pmpt->perf.platform_limit;
136
137 for (i=0; i < pmpt->perf.state_count; i++)
138 pxpt->u.pt[i].freq = pmpt->perf.states[i].core_frequency;
139
140 pxpt->prev_state_wall = NOW();
141 pxpt->prev_idle_wall = get_cpu_idle_time(cpuid);
142
143 spin_unlock(cpufreq_statistic_lock);
144
145 return 0;
146 }
147
cpufreq_statistic_exit(unsigned int cpuid)148 void cpufreq_statistic_exit(unsigned int cpuid)
149 {
150 struct pm_px *pxpt;
151 spinlock_t *cpufreq_statistic_lock =
152 &per_cpu(cpufreq_statistic_lock, cpuid);
153
154 spin_lock(cpufreq_statistic_lock);
155
156 pxpt = per_cpu(cpufreq_statistic_data, cpuid);
157 if (!pxpt) {
158 spin_unlock(cpufreq_statistic_lock);
159 return;
160 }
161
162 xfree(pxpt->u.trans_pt);
163 xfree(pxpt->u.pt);
164 xfree(pxpt);
165 per_cpu(cpufreq_statistic_data, cpuid) = NULL;
166
167 spin_unlock(cpufreq_statistic_lock);
168 }
169
cpufreq_statistic_reset(unsigned int cpuid)170 void cpufreq_statistic_reset(unsigned int cpuid)
171 {
172 uint32_t i, j, count;
173 struct pm_px *pxpt;
174 const struct processor_pminfo *pmpt = processor_pminfo[cpuid];
175 spinlock_t *cpufreq_statistic_lock =
176 &per_cpu(cpufreq_statistic_lock, cpuid);
177
178 spin_lock(cpufreq_statistic_lock);
179
180 pxpt = per_cpu(cpufreq_statistic_data, cpuid);
181 if ( !pmpt || !pxpt || !pxpt->u.pt || !pxpt->u.trans_pt ) {
182 spin_unlock(cpufreq_statistic_lock);
183 return;
184 }
185
186 count = pmpt->perf.state_count;
187
188 for (i=0; i < count; i++) {
189 pxpt->u.pt[i].residency = 0;
190 pxpt->u.pt[i].count = 0;
191
192 for (j=0; j < count; j++)
193 *(pxpt->u.trans_pt + i*count + j) = 0;
194 }
195
196 pxpt->prev_state_wall = NOW();
197 pxpt->prev_idle_wall = get_cpu_idle_time(cpuid);
198
199 spin_unlock(cpufreq_statistic_lock);
200 }
201
202
203 /*********************************************************************
204 * FREQUENCY TABLE HELPERS *
205 *********************************************************************/
206
cpufreq_frequency_table_cpuinfo(struct cpufreq_policy * policy,struct cpufreq_frequency_table * table)207 int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
208 struct cpufreq_frequency_table *table)
209 {
210 unsigned int min_freq = ~0;
211 unsigned int max_freq = 0;
212 unsigned int second_max_freq = 0;
213 unsigned int i;
214
215 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
216 unsigned int freq = table[i].frequency;
217 if (freq == CPUFREQ_ENTRY_INVALID)
218 continue;
219 if (freq < min_freq)
220 min_freq = freq;
221 if (freq > max_freq)
222 max_freq = freq;
223 }
224 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
225 unsigned int freq = table[i].frequency;
226 if (freq == CPUFREQ_ENTRY_INVALID || freq == max_freq)
227 continue;
228 if (freq > second_max_freq)
229 second_max_freq = freq;
230 }
231 if (second_max_freq == 0)
232 second_max_freq = max_freq;
233 if (cpufreq_verbose)
234 printk("max_freq: %u second_max_freq: %u\n",
235 max_freq, second_max_freq);
236
237 policy->min = policy->cpuinfo.min_freq = min_freq;
238 policy->max = policy->cpuinfo.max_freq = max_freq;
239 policy->cpuinfo.second_max_freq = second_max_freq;
240
241 if (policy->min == ~0)
242 return -EINVAL;
243 else
244 return 0;
245 }
246
cpufreq_frequency_table_verify(struct cpufreq_policy * policy,struct cpufreq_frequency_table * table)247 int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
248 struct cpufreq_frequency_table *table)
249 {
250 unsigned int next_larger = ~0;
251 unsigned int i;
252 unsigned int count = 0;
253
254 if (!cpu_online(policy->cpu))
255 return -EINVAL;
256
257 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
258 policy->cpuinfo.max_freq);
259
260 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
261 unsigned int freq = table[i].frequency;
262 if (freq == CPUFREQ_ENTRY_INVALID)
263 continue;
264 if ((freq >= policy->min) && (freq <= policy->max))
265 count++;
266 else if ((next_larger > freq) && (freq > policy->max))
267 next_larger = freq;
268 }
269
270 if (!count)
271 policy->max = next_larger;
272
273 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
274 policy->cpuinfo.max_freq);
275
276 return 0;
277 }
278
cpufreq_frequency_table_target(struct cpufreq_policy * policy,struct cpufreq_frequency_table * table,unsigned int target_freq,unsigned int relation,unsigned int * index)279 int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
280 struct cpufreq_frequency_table *table,
281 unsigned int target_freq,
282 unsigned int relation,
283 unsigned int *index)
284 {
285 struct cpufreq_frequency_table optimal = {
286 .index = ~0,
287 .frequency = 0,
288 };
289 struct cpufreq_frequency_table suboptimal = {
290 .index = ~0,
291 .frequency = 0,
292 };
293 unsigned int i;
294
295 switch (relation) {
296 case CPUFREQ_RELATION_H:
297 suboptimal.frequency = ~0;
298 break;
299 case CPUFREQ_RELATION_L:
300 optimal.frequency = ~0;
301 break;
302 }
303
304 if (!cpu_online(policy->cpu))
305 return -EINVAL;
306
307 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
308 unsigned int freq = table[i].frequency;
309 if (freq == CPUFREQ_ENTRY_INVALID)
310 continue;
311 if ((freq < policy->min) || (freq > policy->max))
312 continue;
313 switch(relation) {
314 case CPUFREQ_RELATION_H:
315 if (freq <= target_freq) {
316 if (freq >= optimal.frequency) {
317 optimal.frequency = freq;
318 optimal.index = i;
319 }
320 } else {
321 if (freq <= suboptimal.frequency) {
322 suboptimal.frequency = freq;
323 suboptimal.index = i;
324 }
325 }
326 break;
327 case CPUFREQ_RELATION_L:
328 if (freq >= target_freq) {
329 if (freq <= optimal.frequency) {
330 optimal.frequency = freq;
331 optimal.index = i;
332 }
333 } else {
334 if (freq >= suboptimal.frequency) {
335 suboptimal.frequency = freq;
336 suboptimal.index = i;
337 }
338 }
339 break;
340 }
341 }
342 if (optimal.index > i) {
343 if (suboptimal.index > i)
344 return -EINVAL;
345 *index = suboptimal.index;
346 } else
347 *index = optimal.index;
348
349 return 0;
350 }
351
352
353 /*********************************************************************
354 * GOVERNORS *
355 *********************************************************************/
356
__cpufreq_driver_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)357 int __cpufreq_driver_target(struct cpufreq_policy *policy,
358 unsigned int target_freq,
359 unsigned int relation)
360 {
361 int retval = -EINVAL;
362
363 if (cpu_online(policy->cpu) && cpufreq_driver->target)
364 {
365 unsigned int prev_freq = policy->cur;
366
367 retval = cpufreq_driver->target(policy, target_freq, relation);
368 if ( retval == 0 )
369 TRACE_2D(TRC_PM_FREQ_CHANGE, prev_freq/1000, policy->cur/1000);
370 }
371
372 return retval;
373 }
374
cpufreq_driver_getavg(unsigned int cpu,unsigned int flag)375 int cpufreq_driver_getavg(unsigned int cpu, unsigned int flag)
376 {
377 struct cpufreq_policy *policy;
378 int freq_avg;
379
380 if (!cpu_online(cpu) || !(policy = per_cpu(cpufreq_cpu_policy, cpu)))
381 return 0;
382
383 if (cpufreq_driver->getavg)
384 {
385 freq_avg = cpufreq_driver->getavg(cpu, flag);
386 if (freq_avg > 0)
387 return freq_avg;
388 }
389
390 return policy->cur;
391 }
392
cpufreq_update_turbo(int cpuid,int new_state)393 int cpufreq_update_turbo(int cpuid, int new_state)
394 {
395 struct cpufreq_policy *policy;
396 int curr_state;
397 int ret = 0;
398
399 if (new_state != CPUFREQ_TURBO_ENABLED &&
400 new_state != CPUFREQ_TURBO_DISABLED)
401 return -EINVAL;
402
403 policy = per_cpu(cpufreq_cpu_policy, cpuid);
404 if (!policy)
405 return -EACCES;
406
407 if (policy->turbo == CPUFREQ_TURBO_UNSUPPORTED)
408 return -EOPNOTSUPP;
409
410 curr_state = policy->turbo;
411 if (curr_state == new_state)
412 return 0;
413
414 policy->turbo = new_state;
415 if (cpufreq_driver->update)
416 {
417 ret = cpufreq_driver->update(cpuid, policy);
418 if (ret)
419 policy->turbo = curr_state;
420 }
421
422 return ret;
423 }
424
425
cpufreq_get_turbo_status(int cpuid)426 int cpufreq_get_turbo_status(int cpuid)
427 {
428 struct cpufreq_policy *policy;
429
430 policy = per_cpu(cpufreq_cpu_policy, cpuid);
431 return policy && policy->turbo == CPUFREQ_TURBO_ENABLED;
432 }
433
434 /*********************************************************************
435 * POLICY *
436 *********************************************************************/
437
438 /*
439 * data : current policy.
440 * policy : policy to be set.
441 */
__cpufreq_set_policy(struct cpufreq_policy * data,struct cpufreq_policy * policy)442 int __cpufreq_set_policy(struct cpufreq_policy *data,
443 struct cpufreq_policy *policy)
444 {
445 int ret = 0;
446
447 memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo));
448
449 if (policy->min > data->min && policy->min > policy->max)
450 return -EINVAL;
451
452 /* verify the cpu speed can be set within this limit */
453 ret = cpufreq_driver->verify(policy);
454 if (ret)
455 return ret;
456
457 data->min = policy->min;
458 data->max = policy->max;
459 data->limits = policy->limits;
460 if (cpufreq_driver->setpolicy)
461 return cpufreq_driver->setpolicy(data);
462
463 if (policy->governor != data->governor) {
464 /* save old, working values */
465 struct cpufreq_governor *old_gov = data->governor;
466
467 /* end old governor */
468 if (data->governor)
469 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
470
471 /* start new governor */
472 data->governor = policy->governor;
473 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
474 printk(KERN_WARNING "Fail change to %s governor\n",
475 data->governor->name);
476
477 /* new governor failed, so re-start old one */
478 data->governor = old_gov;
479 if (old_gov) {
480 __cpufreq_governor(data, CPUFREQ_GOV_START);
481 printk(KERN_WARNING "Still stay at %s governor\n",
482 data->governor->name);
483 }
484 return -EINVAL;
485 }
486 /* might be a policy change, too, so fall through */
487 }
488
489 return __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
490 }
491