1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_U64_STATS_SYNC_H
3 #define _LINUX_U64_STATS_SYNC_H
4
5 /*
6 * Protect against 64-bit values tearing on 32-bit architectures. This is
7 * typically used for statistics read/update in different subsystems.
8 *
9 * Key points :
10 *
11 * - Use a seqcount on 32-bit
12 * - The whole thing is a no-op on 64-bit architectures.
13 *
14 * Usage constraints:
15 *
16 * 1) Write side must ensure mutual exclusion, or one seqcount update could
17 * be lost, thus blocking readers forever.
18 *
19 * 2) Write side must disable preemption, or a seqcount reader can preempt the
20 * writer and also spin forever.
21 *
22 * 3) Write side must use the _irqsave() variant if other writers, or a reader,
23 * can be invoked from an IRQ context. On 64bit systems this variant does not
24 * disable interrupts.
25 *
26 * 4) If reader fetches several counters, there is no guarantee the whole values
27 * are consistent w.r.t. each other (remember point #2: seqcounts are not
28 * used for 64bit architectures).
29 *
30 * 5) Readers are allowed to sleep or be preempted/interrupted: they perform
31 * pure reads.
32 *
33 * Usage :
34 *
35 * Stats producer (writer) should use following template granted it already got
36 * an exclusive access to counters (a lock is already taken, or per cpu
37 * data is used [in a non preemptable context])
38 *
39 * spin_lock_bh(...) or other synchronization to get exclusive access
40 * ...
41 * u64_stats_update_begin(&stats->syncp);
42 * u64_stats_add(&stats->bytes64, len); // non atomic operation
43 * u64_stats_inc(&stats->packets64); // non atomic operation
44 * u64_stats_update_end(&stats->syncp);
45 *
46 * While a consumer (reader) should use following template to get consistent
47 * snapshot for each variable (but no guarantee on several ones)
48 *
49 * u64 tbytes, tpackets;
50 * unsigned int start;
51 *
52 * do {
53 * start = u64_stats_fetch_begin(&stats->syncp);
54 * tbytes = u64_stats_read(&stats->bytes64); // non atomic operation
55 * tpackets = u64_stats_read(&stats->packets64); // non atomic operation
56 * } while (u64_stats_fetch_retry(&stats->syncp, start));
57 *
58 *
59 * Example of use in drivers/net/loopback.c, using per_cpu containers,
60 * in BH disabled context.
61 */
62 #include <linux/seqlock.h>
63
64 struct u64_stats_sync {
65 #if BITS_PER_LONG == 32
66 seqcount_t seq;
67 #endif
68 };
69
70 #if BITS_PER_LONG == 64
71 #include <asm/local64.h>
72
73 typedef struct {
74 local64_t v;
75 } u64_stats_t ;
76
u64_stats_read(const u64_stats_t * p)77 static inline u64 u64_stats_read(const u64_stats_t *p)
78 {
79 return local64_read(&p->v);
80 }
81
u64_stats_set(u64_stats_t * p,u64 val)82 static inline void u64_stats_set(u64_stats_t *p, u64 val)
83 {
84 local64_set(&p->v, val);
85 }
86
u64_stats_add(u64_stats_t * p,unsigned long val)87 static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
88 {
89 local64_add(val, &p->v);
90 }
91
u64_stats_inc(u64_stats_t * p)92 static inline void u64_stats_inc(u64_stats_t *p)
93 {
94 local64_inc(&p->v);
95 }
96
u64_stats_init(struct u64_stats_sync * syncp)97 static inline void u64_stats_init(struct u64_stats_sync *syncp) { }
__u64_stats_update_begin(struct u64_stats_sync * syncp)98 static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp) { }
__u64_stats_update_end(struct u64_stats_sync * syncp)99 static inline void __u64_stats_update_end(struct u64_stats_sync *syncp) { }
__u64_stats_irqsave(void)100 static inline unsigned long __u64_stats_irqsave(void) { return 0; }
__u64_stats_irqrestore(unsigned long flags)101 static inline void __u64_stats_irqrestore(unsigned long flags) { }
__u64_stats_fetch_begin(const struct u64_stats_sync * syncp)102 static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
103 {
104 return 0;
105 }
__u64_stats_fetch_retry(const struct u64_stats_sync * syncp,unsigned int start)106 static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
107 unsigned int start)
108 {
109 return false;
110 }
111
112 #else /* 64 bit */
113
114 typedef struct {
115 u64 v;
116 } u64_stats_t;
117
u64_stats_read(const u64_stats_t * p)118 static inline u64 u64_stats_read(const u64_stats_t *p)
119 {
120 return p->v;
121 }
122
u64_stats_set(u64_stats_t * p,u64 val)123 static inline void u64_stats_set(u64_stats_t *p, u64 val)
124 {
125 p->v = val;
126 }
127
u64_stats_add(u64_stats_t * p,unsigned long val)128 static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
129 {
130 p->v += val;
131 }
132
u64_stats_inc(u64_stats_t * p)133 static inline void u64_stats_inc(u64_stats_t *p)
134 {
135 p->v++;
136 }
137
u64_stats_init(struct u64_stats_sync * syncp)138 static inline void u64_stats_init(struct u64_stats_sync *syncp)
139 {
140 seqcount_init(&syncp->seq);
141 }
142
__u64_stats_update_begin(struct u64_stats_sync * syncp)143 static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp)
144 {
145 preempt_disable_nested();
146 write_seqcount_begin(&syncp->seq);
147 }
148
__u64_stats_update_end(struct u64_stats_sync * syncp)149 static inline void __u64_stats_update_end(struct u64_stats_sync *syncp)
150 {
151 write_seqcount_end(&syncp->seq);
152 preempt_enable_nested();
153 }
154
__u64_stats_irqsave(void)155 static inline unsigned long __u64_stats_irqsave(void)
156 {
157 unsigned long flags;
158
159 local_irq_save(flags);
160 return flags;
161 }
162
__u64_stats_irqrestore(unsigned long flags)163 static inline void __u64_stats_irqrestore(unsigned long flags)
164 {
165 local_irq_restore(flags);
166 }
167
__u64_stats_fetch_begin(const struct u64_stats_sync * syncp)168 static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
169 {
170 return read_seqcount_begin(&syncp->seq);
171 }
172
__u64_stats_fetch_retry(const struct u64_stats_sync * syncp,unsigned int start)173 static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
174 unsigned int start)
175 {
176 return read_seqcount_retry(&syncp->seq, start);
177 }
178 #endif /* !64 bit */
179
u64_stats_update_begin(struct u64_stats_sync * syncp)180 static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
181 {
182 __u64_stats_update_begin(syncp);
183 }
184
u64_stats_update_end(struct u64_stats_sync * syncp)185 static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
186 {
187 __u64_stats_update_end(syncp);
188 }
189
u64_stats_update_begin_irqsave(struct u64_stats_sync * syncp)190 static inline unsigned long u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
191 {
192 unsigned long flags = __u64_stats_irqsave();
193
194 __u64_stats_update_begin(syncp);
195 return flags;
196 }
197
u64_stats_update_end_irqrestore(struct u64_stats_sync * syncp,unsigned long flags)198 static inline void u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
199 unsigned long flags)
200 {
201 __u64_stats_update_end(syncp);
202 __u64_stats_irqrestore(flags);
203 }
204
u64_stats_fetch_begin(const struct u64_stats_sync * syncp)205 static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
206 {
207 return __u64_stats_fetch_begin(syncp);
208 }
209
u64_stats_fetch_retry(const struct u64_stats_sync * syncp,unsigned int start)210 static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
211 unsigned int start)
212 {
213 return __u64_stats_fetch_retry(syncp, start);
214 }
215
216 #endif /* _LINUX_U64_STATS_SYNC_H */
217