1 /*
2 * utility.c - misc functions for cpufreq driver and Px statistic
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
11 * Feb 2008 - Liu Jinsong <jinsong.liu@intel.com>
12 * 1. Merge cpufreq.c and freq_table.c of linux 2.6.23
13 * And poring to Xen hypervisor
14 * 2. some Px statistic interface funcdtions
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 *
20 */
21
22 #include <xen/errno.h>
23 #include <xen/cpumask.h>
24 #include <xen/types.h>
25 #include <xen/spinlock.h>
26 #include <xen/percpu.h>
27 #include <xen/types.h>
28 #include <xen/sched.h>
29 #include <xen/timer.h>
30 #include <xen/trace.h>
31 #include <acpi/cpufreq/cpufreq.h>
32 #include <public/sysctl.h>
33
34 struct cpufreq_driver __read_mostly cpufreq_driver;
35 struct processor_pminfo *__read_mostly processor_pminfo[NR_CPUS];
36 DEFINE_PER_CPU_READ_MOSTLY(struct cpufreq_policy *, cpufreq_cpu_policy);
37
38 /*********************************************************************
39 * FREQUENCY TABLE HELPERS *
40 *********************************************************************/
41
cpufreq_frequency_table_cpuinfo(struct cpufreq_policy * policy,struct cpufreq_frequency_table * table)42 int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
43 struct cpufreq_frequency_table *table)
44 {
45 unsigned int min_freq = ~0;
46 unsigned int max_freq = 0;
47 unsigned int second_max_freq = 0;
48 unsigned int i;
49
50 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
51 unsigned int freq = table[i].frequency;
52 if (freq == CPUFREQ_ENTRY_INVALID)
53 continue;
54 if (freq < min_freq)
55 min_freq = freq;
56 if (freq > max_freq)
57 max_freq = freq;
58 }
59 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
60 unsigned int freq = table[i].frequency;
61 if (freq == CPUFREQ_ENTRY_INVALID || freq == max_freq)
62 continue;
63 if (freq > second_max_freq)
64 second_max_freq = freq;
65 }
66 if (second_max_freq == 0)
67 second_max_freq = max_freq;
68 if (cpufreq_verbose)
69 printk("max_freq: %u second_max_freq: %u\n",
70 max_freq, second_max_freq);
71
72 policy->min = policy->cpuinfo.min_freq = min_freq;
73 policy->max = policy->cpuinfo.max_freq = max_freq;
74 policy->cpuinfo.perf_freq = max_freq;
75 policy->cpuinfo.second_max_freq = second_max_freq;
76
77 if (policy->min == ~0)
78 return -EINVAL;
79 else
80 return 0;
81 }
82
cpufreq_frequency_table_verify(struct cpufreq_policy * policy,struct cpufreq_frequency_table * table)83 int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
84 struct cpufreq_frequency_table *table)
85 {
86 unsigned int next_larger = ~0;
87 unsigned int i;
88 unsigned int count = 0;
89
90 if (!cpu_online(policy->cpu))
91 return -EINVAL;
92
93 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
94 policy->cpuinfo.max_freq);
95
96 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
97 unsigned int freq = table[i].frequency;
98 if (freq == CPUFREQ_ENTRY_INVALID)
99 continue;
100 if ((freq >= policy->min) && (freq <= policy->max))
101 count++;
102 else if ((next_larger > freq) && (freq > policy->max))
103 next_larger = freq;
104 }
105
106 if (!count)
107 policy->max = next_larger;
108
109 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
110 policy->cpuinfo.max_freq);
111
112 return 0;
113 }
114
cpufreq_frequency_table_target(struct cpufreq_policy * policy,struct cpufreq_frequency_table * table,unsigned int target_freq,unsigned int relation,unsigned int * index)115 int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
116 struct cpufreq_frequency_table *table,
117 unsigned int target_freq,
118 unsigned int relation,
119 unsigned int *index)
120 {
121 struct cpufreq_frequency_table optimal = {
122 .index = ~0,
123 .frequency = 0,
124 };
125 struct cpufreq_frequency_table suboptimal = {
126 .index = ~0,
127 .frequency = 0,
128 };
129 unsigned int i;
130
131 switch (relation) {
132 case CPUFREQ_RELATION_H:
133 suboptimal.frequency = ~0;
134 break;
135 case CPUFREQ_RELATION_L:
136 optimal.frequency = ~0;
137 break;
138 }
139
140 if (!cpu_online(policy->cpu))
141 return -EINVAL;
142
143 for (i=0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
144 unsigned int freq = table[i].frequency;
145 if (freq == CPUFREQ_ENTRY_INVALID)
146 continue;
147 if ((freq < policy->min) || (freq > policy->max))
148 continue;
149 switch(relation) {
150 case CPUFREQ_RELATION_H:
151 if (freq <= target_freq) {
152 if (freq >= optimal.frequency) {
153 optimal.frequency = freq;
154 optimal.index = i;
155 }
156 } else {
157 if (freq <= suboptimal.frequency) {
158 suboptimal.frequency = freq;
159 suboptimal.index = i;
160 }
161 }
162 break;
163 case CPUFREQ_RELATION_L:
164 if (freq >= target_freq) {
165 if (freq <= optimal.frequency) {
166 optimal.frequency = freq;
167 optimal.index = i;
168 }
169 } else {
170 if (freq >= suboptimal.frequency) {
171 suboptimal.frequency = freq;
172 suboptimal.index = i;
173 }
174 }
175 break;
176 }
177 }
178 if (optimal.index > i) {
179 if (suboptimal.index > i)
180 return -EINVAL;
181 *index = suboptimal.index;
182 } else
183 *index = optimal.index;
184
185 return 0;
186 }
187
188
189 /*********************************************************************
190 * GOVERNORS *
191 *********************************************************************/
192
__cpufreq_driver_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)193 int __cpufreq_driver_target(struct cpufreq_policy *policy,
194 unsigned int target_freq,
195 unsigned int relation)
196 {
197 int retval = -EINVAL;
198
199 if (cpu_online(policy->cpu) && cpufreq_driver.target)
200 {
201 unsigned int prev_freq = policy->cur;
202
203 retval = alternative_call(cpufreq_driver.target,
204 policy, target_freq, relation);
205 if ( retval == 0 )
206 TRACE_TIME(TRC_PM_FREQ_CHANGE, prev_freq / 1000, policy->cur / 1000);
207 }
208
209 return retval;
210 }
211
cpufreq_driver_getavg(unsigned int cpu,unsigned int flag)212 int cpufreq_driver_getavg(unsigned int cpu, unsigned int flag)
213 {
214 struct cpufreq_policy *policy;
215 int freq_avg;
216
217 if (!cpu_online(cpu) || !(policy = per_cpu(cpufreq_cpu_policy, cpu)))
218 return 0;
219
220 freq_avg = get_measured_perf(cpu, flag);
221 if ( freq_avg > 0 )
222 return freq_avg;
223
224 return policy->cur;
225 }
226
227 /*********************************************************************
228 * POLICY *
229 *********************************************************************/
230
231 /*
232 * data : current policy.
233 * policy : policy to be set.
234 */
__cpufreq_set_policy(struct cpufreq_policy * data,struct cpufreq_policy * policy)235 int __cpufreq_set_policy(struct cpufreq_policy *data,
236 struct cpufreq_policy *policy)
237 {
238 int ret = 0;
239
240 memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo));
241
242 if (policy->min > data->min && policy->min > policy->max)
243 return -EINVAL;
244
245 /* verify the cpu speed can be set within this limit */
246 ret = alternative_call(cpufreq_driver.verify, policy);
247 if (ret)
248 return ret;
249
250 data->min = policy->min;
251 data->max = policy->max;
252 data->limits = policy->limits;
253 if (cpufreq_driver.setpolicy)
254 return alternative_call(cpufreq_driver.setpolicy, data);
255
256 if (policy->governor != data->governor) {
257 /* save old, working values */
258 struct cpufreq_governor *old_gov = data->governor;
259
260 /* end old governor */
261 if (data->governor)
262 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
263
264 /* start new governor */
265 data->governor = policy->governor;
266 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
267 printk(KERN_WARNING "Fail change to %s governor\n",
268 data->governor->name);
269
270 /* new governor failed, so re-start old one */
271 data->governor = old_gov;
272 if (old_gov) {
273 __cpufreq_governor(data, CPUFREQ_GOV_START);
274 printk(KERN_WARNING "Still stay at %s governor\n",
275 data->governor->name);
276 }
277 return -EINVAL;
278 }
279 /* might be a policy change, too, so fall through */
280 }
281
282 return __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
283 }
284