Lines Matching refs:clock
22 void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer) in bch2_io_timer_add() argument
24 spin_lock(&clock->timer_lock); in bch2_io_timer_add()
26 if (time_after_eq64((u64) atomic64_read(&clock->now), timer->expire)) { in bch2_io_timer_add()
27 spin_unlock(&clock->timer_lock); in bch2_io_timer_add()
32 for (size_t i = 0; i < clock->timers.nr; i++) in bch2_io_timer_add()
33 if (clock->timers.data[i] == timer) in bch2_io_timer_add()
36 BUG_ON(!min_heap_push(&clock->timers, &timer, &callbacks, NULL)); in bch2_io_timer_add()
38 spin_unlock(&clock->timer_lock); in bch2_io_timer_add()
41 void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer) in bch2_io_timer_del() argument
43 spin_lock(&clock->timer_lock); in bch2_io_timer_del()
45 for (size_t i = 0; i < clock->timers.nr; i++) in bch2_io_timer_del()
46 if (clock->timers.data[i] == timer) { in bch2_io_timer_del()
47 min_heap_del(&clock->timers, i, &callbacks, NULL); in bch2_io_timer_del()
51 spin_unlock(&clock->timer_lock); in bch2_io_timer_del()
69 void bch2_io_clock_schedule_timeout(struct io_clock *clock, u64 until) in bch2_io_clock_schedule_timeout() argument
78 bch2_io_timer_add(clock, &wait.io_timer); in bch2_io_clock_schedule_timeout()
80 bch2_io_timer_del(clock, &wait.io_timer); in bch2_io_clock_schedule_timeout()
83 unsigned long bch2_kthread_io_clock_wait_once(struct io_clock *clock, in bch2_kthread_io_clock_wait_once() argument
94 bch2_io_timer_add(clock, &wait.io_timer); in bch2_kthread_io_clock_wait_once()
103 bch2_io_timer_del(clock, &wait.io_timer); in bch2_kthread_io_clock_wait_once()
107 void bch2_kthread_io_clock_wait(struct io_clock *clock, in bch2_kthread_io_clock_wait() argument
114 atomic64_read(&clock->now) < io_until) in bch2_kthread_io_clock_wait()
115 cpu_timeout = bch2_kthread_io_clock_wait_once(clock, io_until, cpu_timeout); in bch2_kthread_io_clock_wait()
118 static struct io_timer *get_expired_timer(struct io_clock *clock, u64 now) in get_expired_timer() argument
122 if (clock->timers.nr && in get_expired_timer()
123 time_after_eq64(now, clock->timers.data[0]->expire)) { in get_expired_timer()
124 ret = *min_heap_peek(&clock->timers); in get_expired_timer()
125 min_heap_pop(&clock->timers, &callbacks, NULL); in get_expired_timer()
131 void __bch2_increment_clock(struct io_clock *clock, u64 sectors) in __bch2_increment_clock() argument
134 u64 now = atomic64_add_return(sectors, &clock->now); in __bch2_increment_clock()
136 spin_lock(&clock->timer_lock); in __bch2_increment_clock()
137 while ((timer = get_expired_timer(clock, now))) in __bch2_increment_clock()
139 spin_unlock(&clock->timer_lock); in __bch2_increment_clock()
142 void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock) in bch2_io_timers_to_text() argument
145 spin_lock(&clock->timer_lock); in bch2_io_timers_to_text()
146 u64 now = atomic64_read(&clock->now); in bch2_io_timers_to_text()
151 for (unsigned i = 0; i < clock->timers.nr; i++) in bch2_io_timers_to_text()
153 clock->timers.data[i]->fn, in bch2_io_timers_to_text()
154 clock->timers.data[i]->fn2, in bch2_io_timers_to_text()
155 clock->timers.data[i]->expire); in bch2_io_timers_to_text()
156 spin_unlock(&clock->timer_lock); in bch2_io_timers_to_text()
160 void bch2_io_clock_exit(struct io_clock *clock) in bch2_io_clock_exit() argument
162 free_heap(&clock->timers); in bch2_io_clock_exit()
163 free_percpu(clock->pcpu_buf); in bch2_io_clock_exit()
166 int bch2_io_clock_init(struct io_clock *clock) in bch2_io_clock_init() argument
168 atomic64_set(&clock->now, 0); in bch2_io_clock_init()
169 spin_lock_init(&clock->timer_lock); in bch2_io_clock_init()
171 clock->max_slop = IO_CLOCK_PCPU_SECTORS * num_possible_cpus(); in bch2_io_clock_init()
173 clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf); in bch2_io_clock_init()
174 if (!clock->pcpu_buf) in bch2_io_clock_init()
177 if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL)) in bch2_io_clock_init()