Lines Matching refs:wb
129 struct bdi_writeback *wb; member
155 #define GDTC_INIT(__wb) .wb = (__wb), \
161 #define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \
181 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb) in wb_memcg_completions() argument
183 return &wb->memcg_completions; in wb_memcg_completions()
186 static void wb_min_max_ratio(struct bdi_writeback *wb, in wb_min_max_ratio() argument
189 unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth); in wb_min_max_ratio()
190 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); in wb_min_max_ratio()
191 unsigned long long min = wb->bdi->min_ratio; in wb_min_max_ratio()
192 unsigned long long max = wb->bdi->max_ratio; in wb_min_max_ratio()
215 #define GDTC_INIT(__wb) .wb = (__wb), \
235 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb) in wb_memcg_completions() argument
240 static void wb_min_max_ratio(struct bdi_writeback *wb, in wb_min_max_ratio() argument
243 *minp = wb->bdi->min_ratio; in wb_min_max_ratio()
244 *maxp = wb->bdi->max_ratio; in wb_min_max_ratio()
603 static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr) in __wb_writeout_add() argument
607 wb_stat_mod(wb, WB_WRITTEN, nr); in __wb_writeout_add()
608 wb_domain_writeout_add(&global_wb_domain, &wb->completions, in __wb_writeout_add()
609 wb->bdi->max_prop_frac, nr); in __wb_writeout_add()
611 cgdom = mem_cgroup_wb_domain(wb); in __wb_writeout_add()
613 wb_domain_writeout_add(cgdom, wb_memcg_completions(wb), in __wb_writeout_add()
614 wb->bdi->max_prop_frac, nr); in __wb_writeout_add()
617 void wb_writeout_inc(struct bdi_writeback *wb) in wb_writeout_inc() argument
622 __wb_writeout_add(wb, 1); in wb_writeout_inc()
884 mem_cgroup_wb_stats(dtc->wb, &filepages, &headroom, &dtc->dirty, in domain_dirty_avail()
934 wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio); in __wb_calc_thresh()
943 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh) in wb_calc_thresh() argument
945 struct dirty_throttle_control gdtc = { GDTC_INIT(wb) }; in wb_calc_thresh()
950 unsigned long cgwb_calc_thresh(struct bdi_writeback *wb) in cgwb_calc_thresh() argument
953 struct dirty_throttle_control mdtc = { MDTC_INIT(wb, &gdtc) }; in cgwb_calc_thresh()
1070 struct bdi_writeback *wb = dtc->wb; in wb_position_ratio() local
1071 unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth); in wb_position_ratio()
1120 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { in wb_position_ratio()
1247 static void wb_update_write_bandwidth(struct bdi_writeback *wb, in wb_update_write_bandwidth() argument
1252 unsigned long avg = wb->avg_write_bandwidth; in wb_update_write_bandwidth()
1253 unsigned long old = wb->write_bandwidth; in wb_update_write_bandwidth()
1266 bw = written - min(written, wb->written_stamp); in wb_update_write_bandwidth()
1273 bw += (u64)wb->write_bandwidth * (period - elapsed); in wb_update_write_bandwidth()
1288 if (wb_has_dirty_io(wb)) { in wb_update_write_bandwidth()
1289 long delta = avg - wb->avg_write_bandwidth; in wb_update_write_bandwidth()
1291 &wb->bdi->tot_write_bandwidth) <= 0); in wb_update_write_bandwidth()
1293 wb->write_bandwidth = bw; in wb_update_write_bandwidth()
1294 WRITE_ONCE(wb->avg_write_bandwidth, avg); in wb_update_write_bandwidth()
1355 struct bdi_writeback *wb = dtc->wb; in wb_update_dirty_ratelimit() local
1360 unsigned long write_bw = wb->avg_write_bandwidth; in wb_update_dirty_ratelimit()
1361 unsigned long dirty_ratelimit = wb->dirty_ratelimit; in wb_update_dirty_ratelimit()
1373 dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed; in wb_update_dirty_ratelimit()
1467 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { in wb_update_dirty_ratelimit()
1476 x = min3(wb->balanced_dirty_ratelimit, in wb_update_dirty_ratelimit()
1481 x = max3(wb->balanced_dirty_ratelimit, in wb_update_dirty_ratelimit()
1503 WRITE_ONCE(wb->dirty_ratelimit, max(dirty_ratelimit, 1UL)); in wb_update_dirty_ratelimit()
1504 wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit; in wb_update_dirty_ratelimit()
1506 trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit); in wb_update_dirty_ratelimit()
1513 struct bdi_writeback *wb = gdtc->wb; in __wb_update_bandwidth() local
1519 spin_lock(&wb->list_lock); in __wb_update_bandwidth()
1527 elapsed = max(now - wb->bw_time_stamp, 1UL); in __wb_update_bandwidth()
1528 dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]); in __wb_update_bandwidth()
1529 written = percpu_counter_read(&wb->stat[WB_WRITTEN]); in __wb_update_bandwidth()
1544 wb_update_write_bandwidth(wb, elapsed, written); in __wb_update_bandwidth()
1546 wb->dirtied_stamp = dirtied; in __wb_update_bandwidth()
1547 wb->written_stamp = written; in __wb_update_bandwidth()
1548 WRITE_ONCE(wb->bw_time_stamp, now); in __wb_update_bandwidth()
1549 spin_unlock(&wb->list_lock); in __wb_update_bandwidth()
1552 void wb_update_bandwidth(struct bdi_writeback *wb) in wb_update_bandwidth() argument
1554 struct dirty_throttle_control gdtc = { GDTC_INIT(wb) }; in wb_update_bandwidth()
1562 static void wb_bandwidth_estimate_start(struct bdi_writeback *wb) in wb_bandwidth_estimate_start() argument
1565 unsigned long elapsed = now - READ_ONCE(wb->bw_time_stamp); in wb_bandwidth_estimate_start()
1568 !atomic_read(&wb->writeback_inodes)) { in wb_bandwidth_estimate_start()
1569 spin_lock(&wb->list_lock); in wb_bandwidth_estimate_start()
1570 wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED); in wb_bandwidth_estimate_start()
1571 wb->written_stamp = wb_stat(wb, WB_WRITTEN); in wb_bandwidth_estimate_start()
1572 WRITE_ONCE(wb->bw_time_stamp, now); in wb_bandwidth_estimate_start()
1573 spin_unlock(&wb->list_lock); in wb_bandwidth_estimate_start()
1594 static unsigned long wb_max_pause(struct bdi_writeback *wb, in wb_max_pause() argument
1597 unsigned long bw = READ_ONCE(wb->avg_write_bandwidth); in wb_max_pause()
1613 static long wb_min_pause(struct bdi_writeback *wb, in wb_min_pause() argument
1619 long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth)); in wb_min_pause()
1620 long lo = ilog2(READ_ONCE(wb->dirty_ratelimit)); in wb_min_pause()
1690 struct bdi_writeback *wb = dtc->wb; in wb_dirty_limits() local
1721 wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); in wb_dirty_limits()
1722 dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK); in wb_dirty_limits()
1724 wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE); in wb_dirty_limits()
1725 dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK); in wb_dirty_limits()
1830 static int balance_dirty_pages(struct bdi_writeback *wb, in balance_dirty_pages() argument
1833 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) }; in balance_dirty_pages()
1834 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) }; in balance_dirty_pages()
1847 struct backing_dev_info *bdi = wb->bdi; in balance_dirty_pages()
1876 !writeback_in_progress(wb)) in balance_dirty_pages()
1877 wb_start_background_writeback(wb); in balance_dirty_pages()
1900 if (unlikely(!writeback_in_progress(wb))) in balance_dirty_pages()
1901 wb_start_background_writeback(wb); in balance_dirty_pages()
1903 mem_cgroup_flush_foreign(wb); in balance_dirty_pages()
1928 wb->dirty_exceeded = gdtc->dirty_exceeded || in balance_dirty_pages()
1930 if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) + in balance_dirty_pages()
1935 dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit); in balance_dirty_pages()
1938 max_pause = wb_max_pause(wb, sdtc->wb_dirty); in balance_dirty_pages()
1939 min_pause = wb_min_pause(wb, max_pause, in balance_dirty_pages()
1960 trace_balance_dirty_pages(wb, in balance_dirty_pages()
1989 trace_balance_dirty_pages(wb, in balance_dirty_pages()
2079 struct bdi_writeback *wb = NULL; in balance_dirty_pages_ratelimited_flags() local
2088 wb = wb_get_create_current(bdi, GFP_KERNEL); in balance_dirty_pages_ratelimited_flags()
2089 if (!wb) in balance_dirty_pages_ratelimited_flags()
2090 wb = &bdi->wb; in balance_dirty_pages_ratelimited_flags()
2093 if (wb->dirty_exceeded) in balance_dirty_pages_ratelimited_flags()
2125 ret = balance_dirty_pages(wb, current->nr_dirtied, flags); in balance_dirty_pages_ratelimited_flags()
2127 wb_put(wb); in balance_dirty_pages_ratelimited_flags()
2156 struct bdi_writeback *wb = dtc->wb; in wb_bg_dirty_limits() local
2160 dtc->wb_dirty = wb_stat_sum(wb, WB_RECLAIMABLE); in wb_bg_dirty_limits()
2162 dtc->wb_dirty = wb_stat(wb, WB_RECLAIMABLE); in wb_bg_dirty_limits()
2188 bool wb_over_bg_thresh(struct bdi_writeback *wb) in wb_over_bg_thresh() argument
2190 struct dirty_throttle_control gdtc = { GDTC_INIT(wb) }; in wb_over_bg_thresh()
2191 struct dirty_throttle_control mdtc = { MDTC_INIT(wb, &gdtc) }; in wb_over_bg_thresh()
2675 struct bdi_writeback *wb; in do_writepages() local
2679 wb = inode_to_wb_wbc(mapping->host, wbc); in do_writepages()
2680 wb_bandwidth_estimate_start(wb); in do_writepages()
2707 if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) + in do_writepages()
2709 wb_update_bandwidth(wb); in do_writepages()
2739 struct bdi_writeback *wb; in folio_account_dirtied() local
2743 wb = inode_to_wb(inode); in folio_account_dirtied()
2748 wb_stat_mod(wb, WB_RECLAIMABLE, nr); in folio_account_dirtied()
2749 wb_stat_mod(wb, WB_DIRTIED, nr); in folio_account_dirtied()
2754 mem_cgroup_track_foreign_dirty(folio, wb); in folio_account_dirtied()
2763 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb) in folio_account_cleaned() argument
2769 wb_stat_mod(wb, WB_RECLAIMABLE, -nr); in folio_account_cleaned()
2865 struct bdi_writeback *wb; in folio_redirty_for_writepage() local
2868 wb = unlocked_inode_to_wb_begin(inode, &cookie); in folio_redirty_for_writepage()
2871 wb_stat_mod(wb, WB_DIRTIED, -nr); in folio_redirty_for_writepage()
2956 struct bdi_writeback *wb; in __folio_cancel_dirty() local
2960 wb = unlocked_inode_to_wb_begin(inode, &cookie); in __folio_cancel_dirty()
2963 folio_account_cleaned(folio, wb); in __folio_cancel_dirty()
2996 struct bdi_writeback *wb; in folio_clear_dirty_for_io() local
3034 wb = unlocked_inode_to_wb_begin(inode, &cookie); in folio_clear_dirty_for_io()
3039 wb_stat_mod(wb, WB_RECLAIMABLE, -nr); in folio_clear_dirty_for_io()
3049 static void wb_inode_writeback_start(struct bdi_writeback *wb) in wb_inode_writeback_start() argument
3051 atomic_inc(&wb->writeback_inodes); in wb_inode_writeback_start()
3054 static void wb_inode_writeback_end(struct bdi_writeback *wb) in wb_inode_writeback_end() argument
3057 atomic_dec(&wb->writeback_inodes); in wb_inode_writeback_end()
3065 spin_lock_irqsave(&wb->work_lock, flags); in wb_inode_writeback_end()
3066 if (test_bit(WB_registered, &wb->state)) in wb_inode_writeback_end()
3067 queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL); in wb_inode_writeback_end()
3068 spin_unlock_irqrestore(&wb->work_lock, flags); in wb_inode_writeback_end()
3088 struct bdi_writeback *wb = inode_to_wb(inode); in __folio_end_writeback() local
3090 wb_stat_mod(wb, WB_WRITEBACK, -nr); in __folio_end_writeback()
3091 __wb_writeout_add(wb, nr); in __folio_end_writeback()
3093 wb_inode_writeback_end(wb); in __folio_end_writeback()
3137 struct bdi_writeback *wb = inode_to_wb(inode); in __folio_start_writeback() local
3139 wb_stat_mod(wb, WB_WRITEBACK, nr); in __folio_start_writeback()
3141 wb_inode_writeback_start(wb); in __folio_start_writeback()