1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * include/linux/backing-dev.h
4 *
5 * low-level device information and state which is propagated up through
6 * to high-level code.
7 */
8
9 #ifndef _LINUX_BACKING_DEV_H
10 #define _LINUX_BACKING_DEV_H
11
12 #include <linux/kernel.h>
13 #include <linux/fs.h>
14 #include <linux/sched.h>
15 #include <linux/device.h>
16 #include <linux/writeback.h>
17 #include <linux/backing-dev-defs.h>
18 #include <linux/slab.h>
19
bdi_get(struct backing_dev_info * bdi)20 static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
21 {
22 kref_get(&bdi->refcnt);
23 return bdi;
24 }
25
26 struct backing_dev_info *bdi_get_by_id(u64 id);
27 void bdi_put(struct backing_dev_info *bdi);
28
29 __printf(2, 3)
30 int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
31 __printf(2, 0)
32 int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
33 va_list args);
34 void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner);
35 void bdi_unregister(struct backing_dev_info *bdi);
36
37 struct backing_dev_info *bdi_alloc(int node_id);
38
39 void wb_start_background_writeback(struct bdi_writeback *wb);
40 void wb_workfn(struct work_struct *work);
41 void wb_wakeup_delayed(struct bdi_writeback *wb);
42
43 void wb_wait_for_completion(struct wb_completion *done);
44
45 extern spinlock_t bdi_lock;
46 extern struct list_head bdi_list;
47
48 extern struct workqueue_struct *bdi_wq;
49 extern struct workqueue_struct *bdi_async_bio_wq;
50
wb_has_dirty_io(struct bdi_writeback * wb)51 static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
52 {
53 return test_bit(WB_has_dirty_io, &wb->state);
54 }
55
bdi_has_dirty_io(struct backing_dev_info * bdi)56 static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
57 {
58 /*
59 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
60 * any dirty wbs. See wb_update_write_bandwidth().
61 */
62 return atomic_long_read(&bdi->tot_write_bandwidth);
63 }
64
wb_stat_mod(struct bdi_writeback * wb,enum wb_stat_item item,s64 amount)65 static inline void wb_stat_mod(struct bdi_writeback *wb,
66 enum wb_stat_item item, s64 amount)
67 {
68 percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
69 }
70
inc_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)71 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
72 {
73 wb_stat_mod(wb, item, 1);
74 }
75
dec_wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)76 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
77 {
78 wb_stat_mod(wb, item, -1);
79 }
80
wb_stat(struct bdi_writeback * wb,enum wb_stat_item item)81 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
82 {
83 return percpu_counter_read_positive(&wb->stat[item]);
84 }
85
wb_stat_sum(struct bdi_writeback * wb,enum wb_stat_item item)86 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
87 {
88 return percpu_counter_sum_positive(&wb->stat[item]);
89 }
90
91 extern void wb_writeout_inc(struct bdi_writeback *wb);
92
93 /*
94 * maximal error of a stat counter.
95 */
wb_stat_error(void)96 static inline unsigned long wb_stat_error(void)
97 {
98 #ifdef CONFIG_SMP
99 return nr_cpu_ids * WB_STAT_BATCH;
100 #else
101 return 1;
102 #endif
103 }
104
105 /* BDI ratio is expressed as part per 1000000 for finer granularity. */
106 #define BDI_RATIO_SCALE 10000
107
108 u64 bdi_get_min_bytes(struct backing_dev_info *bdi);
109 u64 bdi_get_max_bytes(struct backing_dev_info *bdi);
110 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
111 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
112 int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio);
113 int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio);
114 int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes);
115 int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes);
116 int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit);
117
118 /*
119 * Flags in backing_dev_info::capability
120 *
121 * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages
122 * should contribute to accounting
123 * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages
124 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold
125 */
126 #define BDI_CAP_WRITEBACK (1 << 0)
127 #define BDI_CAP_WRITEBACK_ACCT (1 << 1)
128 #define BDI_CAP_STRICTLIMIT (1 << 2)
129
130 extern struct backing_dev_info noop_backing_dev_info;
131
132 int bdi_init(struct backing_dev_info *bdi);
133
134 /**
135 * writeback_in_progress - determine whether there is writeback in progress
136 * @wb: bdi_writeback of interest
137 *
138 * Determine whether there is writeback waiting to be handled against a
139 * bdi_writeback.
140 */
writeback_in_progress(struct bdi_writeback * wb)141 static inline bool writeback_in_progress(struct bdi_writeback *wb)
142 {
143 return test_bit(WB_writeback_running, &wb->state);
144 }
145
146 struct backing_dev_info *inode_to_bdi(struct inode *inode);
147
mapping_can_writeback(struct address_space * mapping)148 static inline bool mapping_can_writeback(struct address_space *mapping)
149 {
150 return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
151 }
152
153 #ifdef CONFIG_CGROUP_WRITEBACK
154
155 struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
156 struct cgroup_subsys_state *memcg_css);
157 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
158 struct cgroup_subsys_state *memcg_css,
159 gfp_t gfp);
160 void wb_memcg_offline(struct mem_cgroup *memcg);
161 void wb_blkcg_offline(struct cgroup_subsys_state *css);
162
163 /**
164 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
165 * @inode: inode of interest
166 *
167 * Cgroup writeback requires support from the filesystem. Also, both memcg and
168 * iocg have to be on the default hierarchy. Test whether all conditions are
169 * met.
170 *
171 * Note that the test result may change dynamically on the same inode
172 * depending on how memcg and iocg are configured.
173 */
inode_cgwb_enabled(struct inode * inode)174 static inline bool inode_cgwb_enabled(struct inode *inode)
175 {
176 struct backing_dev_info *bdi = inode_to_bdi(inode);
177
178 return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
179 cgroup_subsys_on_dfl(io_cgrp_subsys) &&
180 (bdi->capabilities & BDI_CAP_WRITEBACK) &&
181 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
182 }
183
184 /**
185 * wb_find_current - find wb for %current on a bdi
186 * @bdi: bdi of interest
187 *
188 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
189 * Must be called under rcu_read_lock() which protects the returend wb.
190 * NULL if not found.
191 */
wb_find_current(struct backing_dev_info * bdi)192 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
193 {
194 struct cgroup_subsys_state *memcg_css;
195 struct bdi_writeback *wb;
196
197 memcg_css = task_css(current, memory_cgrp_id);
198 if (!memcg_css->parent)
199 return &bdi->wb;
200
201 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
202
203 /*
204 * %current's blkcg equals the effective blkcg of its memcg. No
205 * need to use the relatively expensive cgroup_get_e_css().
206 */
207 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
208 return wb;
209 return NULL;
210 }
211
212 /**
213 * wb_get_create_current - get or create wb for %current on a bdi
214 * @bdi: bdi of interest
215 * @gfp: allocation mask
216 *
217 * Equivalent to wb_get_create() on %current's memcg. This function is
218 * called from a relatively hot path and optimizes the common cases using
219 * wb_find_current().
220 */
221 static inline struct bdi_writeback *
wb_get_create_current(struct backing_dev_info * bdi,gfp_t gfp)222 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
223 {
224 struct bdi_writeback *wb;
225
226 rcu_read_lock();
227 wb = wb_find_current(bdi);
228 if (wb && unlikely(!wb_tryget(wb)))
229 wb = NULL;
230 rcu_read_unlock();
231
232 if (unlikely(!wb)) {
233 struct cgroup_subsys_state *memcg_css;
234
235 memcg_css = task_get_css(current, memory_cgrp_id);
236 wb = wb_get_create(bdi, memcg_css, gfp);
237 css_put(memcg_css);
238 }
239 return wb;
240 }
241
242 /**
243 * inode_to_wb - determine the wb of an inode
244 * @inode: inode of interest
245 *
246 * Returns the wb @inode is currently associated with. The caller must be
247 * holding either @inode->i_lock, the i_pages lock, or the
248 * associated wb's list_lock.
249 */
inode_to_wb(const struct inode * inode)250 static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
251 {
252 #ifdef CONFIG_LOCKDEP
253 WARN_ON_ONCE(debug_locks &&
254 (!lockdep_is_held(&inode->i_lock) &&
255 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
256 !lockdep_is_held(&inode->i_wb->list_lock)));
257 #endif
258 return inode->i_wb;
259 }
260
inode_to_wb_wbc(struct inode * inode,struct writeback_control * wbc)261 static inline struct bdi_writeback *inode_to_wb_wbc(
262 struct inode *inode,
263 struct writeback_control *wbc)
264 {
265 /*
266 * If wbc does not have inode attached, it means cgroup writeback was
267 * disabled when wbc started. Just use the default wb in that case.
268 */
269 return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
270 }
271
272 /**
273 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
274 * @inode: target inode
275 * @cookie: output param, to be passed to the end function
276 *
277 * The caller wants to access the wb associated with @inode but isn't
278 * holding inode->i_lock, the i_pages lock or wb->list_lock. This
279 * function determines the wb associated with @inode and ensures that the
280 * association doesn't change until the transaction is finished with
281 * unlocked_inode_to_wb_end().
282 *
283 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
284 * can't sleep during the transaction. IRQs may or may not be disabled on
285 * return.
286 */
287 static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode * inode,struct wb_lock_cookie * cookie)288 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
289 {
290 rcu_read_lock();
291
292 /*
293 * Paired with store_release in inode_switch_wbs_work_fn() and
294 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
295 */
296 cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
297
298 if (unlikely(cookie->locked))
299 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
300
301 /*
302 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
303 * lock. inode_to_wb() will bark. Deref directly.
304 */
305 return inode->i_wb;
306 }
307
308 /**
309 * unlocked_inode_to_wb_end - end inode wb access transaction
310 * @inode: target inode
311 * @cookie: @cookie from unlocked_inode_to_wb_begin()
312 */
unlocked_inode_to_wb_end(struct inode * inode,struct wb_lock_cookie * cookie)313 static inline void unlocked_inode_to_wb_end(struct inode *inode,
314 struct wb_lock_cookie *cookie)
315 {
316 if (unlikely(cookie->locked))
317 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
318
319 rcu_read_unlock();
320 }
321
322 #else /* CONFIG_CGROUP_WRITEBACK */
323
inode_cgwb_enabled(struct inode * inode)324 static inline bool inode_cgwb_enabled(struct inode *inode)
325 {
326 return false;
327 }
328
wb_find_current(struct backing_dev_info * bdi)329 static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
330 {
331 return &bdi->wb;
332 }
333
334 static inline struct bdi_writeback *
wb_get_create_current(struct backing_dev_info * bdi,gfp_t gfp)335 wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
336 {
337 return &bdi->wb;
338 }
339
inode_to_wb(struct inode * inode)340 static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
341 {
342 return &inode_to_bdi(inode)->wb;
343 }
344
inode_to_wb_wbc(struct inode * inode,struct writeback_control * wbc)345 static inline struct bdi_writeback *inode_to_wb_wbc(
346 struct inode *inode,
347 struct writeback_control *wbc)
348 {
349 return inode_to_wb(inode);
350 }
351
352
353 static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode * inode,struct wb_lock_cookie * cookie)354 unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
355 {
356 return inode_to_wb(inode);
357 }
358
unlocked_inode_to_wb_end(struct inode * inode,struct wb_lock_cookie * cookie)359 static inline void unlocked_inode_to_wb_end(struct inode *inode,
360 struct wb_lock_cookie *cookie)
361 {
362 }
363
wb_memcg_offline(struct mem_cgroup * memcg)364 static inline void wb_memcg_offline(struct mem_cgroup *memcg)
365 {
366 }
367
wb_blkcg_offline(struct cgroup_subsys_state * css)368 static inline void wb_blkcg_offline(struct cgroup_subsys_state *css)
369 {
370 }
371
372 #endif /* CONFIG_CGROUP_WRITEBACK */
373
374 const char *bdi_dev_name(struct backing_dev_info *bdi);
375
376 #endif /* _LINUX_BACKING_DEV_H */
377