1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Fast batching percpu counters.
4  */
5 
6 #include <linux/percpu_counter.h>
7 #include <linux/mutex.h>
8 #include <linux/init.h>
9 #include <linux/cpu.h>
10 #include <linux/module.h>
11 #include <linux/debugobjects.h>
12 
13 #ifdef CONFIG_HOTPLUG_CPU
14 static LIST_HEAD(percpu_counters);
15 static DEFINE_SPINLOCK(percpu_counters_lock);
16 #endif
17 
18 #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
19 
20 static const struct debug_obj_descr percpu_counter_debug_descr;
21 
percpu_counter_fixup_free(void * addr,enum debug_obj_state state)22 static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
23 {
24 	struct percpu_counter *fbc = addr;
25 
26 	switch (state) {
27 	case ODEBUG_STATE_ACTIVE:
28 		percpu_counter_destroy(fbc);
29 		debug_object_free(fbc, &percpu_counter_debug_descr);
30 		return true;
31 	default:
32 		return false;
33 	}
34 }
35 
36 static const struct debug_obj_descr percpu_counter_debug_descr = {
37 	.name		= "percpu_counter",
38 	.fixup_free	= percpu_counter_fixup_free,
39 };
40 
debug_percpu_counter_activate(struct percpu_counter * fbc)41 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
42 {
43 	debug_object_init(fbc, &percpu_counter_debug_descr);
44 	debug_object_activate(fbc, &percpu_counter_debug_descr);
45 }
46 
debug_percpu_counter_deactivate(struct percpu_counter * fbc)47 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
48 {
49 	debug_object_deactivate(fbc, &percpu_counter_debug_descr);
50 	debug_object_free(fbc, &percpu_counter_debug_descr);
51 }
52 
53 #else	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
debug_percpu_counter_activate(struct percpu_counter * fbc)54 static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
55 { }
debug_percpu_counter_deactivate(struct percpu_counter * fbc)56 static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
57 { }
58 #endif	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
59 
percpu_counter_set(struct percpu_counter * fbc,s64 amount)60 void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
61 {
62 	int cpu;
63 	unsigned long flags;
64 
65 	raw_spin_lock_irqsave(&fbc->lock, flags);
66 	for_each_possible_cpu(cpu) {
67 		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
68 		*pcount = 0;
69 	}
70 	fbc->count = amount;
71 	raw_spin_unlock_irqrestore(&fbc->lock, flags);
72 }
73 EXPORT_SYMBOL(percpu_counter_set);
74 
75 /*
76  * local_irq_save() is needed to make the function irq safe:
77  * - The slow path would be ok as protected by an irq-safe spinlock.
78  * - this_cpu_add would be ok as it is irq-safe by definition.
79  * But:
80  * The decision slow path/fast path and the actual update must be atomic, too.
81  * Otherwise a call in process context could check the current values and
82  * decide that the fast path can be used. If now an interrupt occurs before
83  * the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters),
84  * then the this_cpu_add() that is executed after the interrupt has completed
85  * can produce values larger than "batch" or even overflows.
86  */
percpu_counter_add_batch(struct percpu_counter * fbc,s64 amount,s32 batch)87 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
88 {
89 	s64 count;
90 	unsigned long flags;
91 
92 	local_irq_save(flags);
93 	count = __this_cpu_read(*fbc->counters) + amount;
94 	if (abs(count) >= batch) {
95 		raw_spin_lock(&fbc->lock);
96 		fbc->count += count;
97 		__this_cpu_sub(*fbc->counters, count - amount);
98 		raw_spin_unlock(&fbc->lock);
99 	} else {
100 		this_cpu_add(*fbc->counters, amount);
101 	}
102 	local_irq_restore(flags);
103 }
104 EXPORT_SYMBOL(percpu_counter_add_batch);
105 
106 /*
107  * For percpu_counter with a big batch, the devication of its count could
108  * be big, and there is requirement to reduce the deviation, like when the
109  * counter's batch could be runtime decreased to get a better accuracy,
110  * which can be achieved by running this sync function on each CPU.
111  */
percpu_counter_sync(struct percpu_counter * fbc)112 void percpu_counter_sync(struct percpu_counter *fbc)
113 {
114 	unsigned long flags;
115 	s64 count;
116 
117 	raw_spin_lock_irqsave(&fbc->lock, flags);
118 	count = __this_cpu_read(*fbc->counters);
119 	fbc->count += count;
120 	__this_cpu_sub(*fbc->counters, count);
121 	raw_spin_unlock_irqrestore(&fbc->lock, flags);
122 }
123 EXPORT_SYMBOL(percpu_counter_sync);
124 
__percpu_counter_sum_mask(struct percpu_counter * fbc,const struct cpumask * cpu_mask)125 static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
126 			      const struct cpumask *cpu_mask)
127 {
128 	s64 ret;
129 	int cpu;
130 	unsigned long flags;
131 
132 	raw_spin_lock_irqsave(&fbc->lock, flags);
133 	ret = fbc->count;
134 	for_each_cpu(cpu, cpu_mask) {
135 		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
136 		ret += *pcount;
137 	}
138 	raw_spin_unlock_irqrestore(&fbc->lock, flags);
139 	return ret;
140 }
141 
142 /*
143  * Add up all the per-cpu counts, return the result.  This is a more accurate
144  * but much slower version of percpu_counter_read_positive()
145  */
__percpu_counter_sum(struct percpu_counter * fbc)146 s64 __percpu_counter_sum(struct percpu_counter *fbc)
147 {
148 	return __percpu_counter_sum_mask(fbc, cpu_online_mask);
149 }
150 EXPORT_SYMBOL(__percpu_counter_sum);
151 
152 /*
153  * This is slower version of percpu_counter_sum as it traverses all possible
154  * cpus. Use this only in the cases where accurate data is needed in the
155  * presense of CPUs getting offlined.
156  */
percpu_counter_sum_all(struct percpu_counter * fbc)157 s64 percpu_counter_sum_all(struct percpu_counter *fbc)
158 {
159 	return __percpu_counter_sum_mask(fbc, cpu_possible_mask);
160 }
161 EXPORT_SYMBOL(percpu_counter_sum_all);
162 
__percpu_counter_init(struct percpu_counter * fbc,s64 amount,gfp_t gfp,struct lock_class_key * key)163 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
164 			  struct lock_class_key *key)
165 {
166 	unsigned long flags __maybe_unused;
167 
168 	raw_spin_lock_init(&fbc->lock);
169 	lockdep_set_class(&fbc->lock, key);
170 	fbc->count = amount;
171 	fbc->counters = alloc_percpu_gfp(s32, gfp);
172 	if (!fbc->counters)
173 		return -ENOMEM;
174 
175 	debug_percpu_counter_activate(fbc);
176 
177 #ifdef CONFIG_HOTPLUG_CPU
178 	INIT_LIST_HEAD(&fbc->list);
179 	spin_lock_irqsave(&percpu_counters_lock, flags);
180 	list_add(&fbc->list, &percpu_counters);
181 	spin_unlock_irqrestore(&percpu_counters_lock, flags);
182 #endif
183 	return 0;
184 }
185 EXPORT_SYMBOL(__percpu_counter_init);
186 
percpu_counter_destroy(struct percpu_counter * fbc)187 void percpu_counter_destroy(struct percpu_counter *fbc)
188 {
189 	unsigned long flags __maybe_unused;
190 
191 	if (!fbc->counters)
192 		return;
193 
194 	debug_percpu_counter_deactivate(fbc);
195 
196 #ifdef CONFIG_HOTPLUG_CPU
197 	spin_lock_irqsave(&percpu_counters_lock, flags);
198 	list_del(&fbc->list);
199 	spin_unlock_irqrestore(&percpu_counters_lock, flags);
200 #endif
201 	free_percpu(fbc->counters);
202 	fbc->counters = NULL;
203 }
204 EXPORT_SYMBOL(percpu_counter_destroy);
205 
206 int percpu_counter_batch __read_mostly = 32;
207 EXPORT_SYMBOL(percpu_counter_batch);
208 
compute_batch_value(unsigned int cpu)209 static int compute_batch_value(unsigned int cpu)
210 {
211 	int nr = num_online_cpus();
212 
213 	percpu_counter_batch = max(32, nr*2);
214 	return 0;
215 }
216 
percpu_counter_cpu_dead(unsigned int cpu)217 static int percpu_counter_cpu_dead(unsigned int cpu)
218 {
219 #ifdef CONFIG_HOTPLUG_CPU
220 	struct percpu_counter *fbc;
221 
222 	compute_batch_value(cpu);
223 
224 	spin_lock_irq(&percpu_counters_lock);
225 	list_for_each_entry(fbc, &percpu_counters, list) {
226 		s32 *pcount;
227 
228 		raw_spin_lock(&fbc->lock);
229 		pcount = per_cpu_ptr(fbc->counters, cpu);
230 		fbc->count += *pcount;
231 		*pcount = 0;
232 		raw_spin_unlock(&fbc->lock);
233 	}
234 	spin_unlock_irq(&percpu_counters_lock);
235 #endif
236 	return 0;
237 }
238 
239 /*
240  * Compare counter against given value.
241  * Return 1 if greater, 0 if equal and -1 if less
242  */
__percpu_counter_compare(struct percpu_counter * fbc,s64 rhs,s32 batch)243 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
244 {
245 	s64	count;
246 
247 	count = percpu_counter_read(fbc);
248 	/* Check to see if rough count will be sufficient for comparison */
249 	if (abs(count - rhs) > (batch * num_online_cpus())) {
250 		if (count > rhs)
251 			return 1;
252 		else
253 			return -1;
254 	}
255 	/* Need to use precise count */
256 	count = percpu_counter_sum(fbc);
257 	if (count > rhs)
258 		return 1;
259 	else if (count < rhs)
260 		return -1;
261 	else
262 		return 0;
263 }
264 EXPORT_SYMBOL(__percpu_counter_compare);
265 
percpu_counter_startup(void)266 static int __init percpu_counter_startup(void)
267 {
268 	int ret;
269 
270 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
271 				compute_batch_value, NULL);
272 	WARN_ON(ret < 0);
273 	ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
274 					"lib/percpu_cnt:dead", NULL,
275 					percpu_counter_cpu_dead);
276 	WARN_ON(ret < 0);
277 	return 0;
278 }
279 module_init(percpu_counter_startup);
280