Lines Matching refs:wp

638 static inline void __wp_update_state(struct write_point *wp, enum write_point_state state)  in __wp_update_state()  argument
640 if (state != wp->state) { in __wp_update_state()
647 wp->last_runtime = runtime; in __wp_update_state()
648 else if (wp->state == WRITE_POINT_runnable) in __wp_update_state()
649 wp->time[WRITE_POINT_running] += runtime - wp->last_runtime; in __wp_update_state()
651 if (wp->last_state_change && in __wp_update_state()
652 time_after64(now, wp->last_state_change)) in __wp_update_state()
653 wp->time[wp->state] += now - wp->last_state_change; in __wp_update_state()
654 wp->state = state; in __wp_update_state()
655 wp->last_state_change = now; in __wp_update_state()
659 static inline void wp_update_state(struct write_point *wp, bool running) in wp_update_state() argument
664 !list_empty(&wp->writes) ? WRITE_POINT_waiting_io in wp_update_state()
667 __wp_update_state(wp, state); in wp_update_state()
673 struct write_point *wp = op->wp; in CLOSURE_CALLBACK() local
681 spin_lock_irqsave(&wp->writes_lock, flags); in CLOSURE_CALLBACK()
682 if (wp->state == WRITE_POINT_waiting_io) in CLOSURE_CALLBACK()
683 __wp_update_state(wp, WRITE_POINT_waiting_work); in CLOSURE_CALLBACK()
684 list_add_tail(&op->wp_list, &wp->writes); in CLOSURE_CALLBACK()
685 spin_unlock_irqrestore (&wp->writes_lock, flags); in CLOSURE_CALLBACK()
687 queue_work(wq, &wp->index_update_work); in CLOSURE_CALLBACK()
690 static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp) in bch2_write_queue() argument
692 op->wp = wp; in bch2_write_queue()
694 if (wp->state == WRITE_POINT_stopped) { in bch2_write_queue()
695 spin_lock_irq(&wp->writes_lock); in bch2_write_queue()
696 __wp_update_state(wp, WRITE_POINT_waiting_io); in bch2_write_queue()
697 spin_unlock_irq(&wp->writes_lock); in bch2_write_queue()
703 struct write_point *wp = in bch2_write_point_do_index_updates() local
708 spin_lock_irq(&wp->writes_lock); in bch2_write_point_do_index_updates()
709 op = list_pop_entry(&wp->writes, struct bch_write_op, wp_list); in bch2_write_point_do_index_updates()
710 wp_update_state(wp, op != NULL); in bch2_write_point_do_index_updates()
711 spin_unlock_irq(&wp->writes_lock); in bch2_write_point_do_index_updates()
782 struct write_point *wp, in init_append_extent() argument
800 bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size, in init_append_extent()
810 struct write_point *wp, in bch2_write_bio_alloc() argument
818 min(wp->sectors_free << 9, src->bi_iter.bi_size); in bch2_write_bio_alloc()
884 static noinline int bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp) in bch2_write_prep_encoded_data() argument
896 op->crc.compressed_size <= wp->sectors_free && in bch2_write_prep_encoded_data()
983 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp, in bch2_write_extent() argument
1000 ec_buf = bch2_writepoint_ec_buf(c, wp); in bch2_write_extent()
1003 ret = bch2_write_prep_encoded_data(op, wp); in bch2_write_extent()
1008 dst = bch2_write_bio_alloc(c, wp, src, in bch2_write_extent()
1014 init_append_extent(op, wp, op->version, op->crc); in bch2_write_extent()
1025 dst = bch2_write_bio_alloc(c, wp, src, in bch2_write_extent()
1034 dst = bch2_write_bio_alloc(c, wp, src, in bch2_write_extent()
1048 dst->bi_iter.bi_size < (wp->sectors_free << 9) && in bch2_write_extent()
1065 dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9); in bch2_write_extent()
1148 init_append_extent(op, wp, version, crc); in bch2_write_extent()
1165 wp->sectors_free && in bch2_write_extent()
1497 struct write_point *wp = NULL; in __bch2_write() local
1543 &op->cl, &wp))); in __bch2_write()
1551 EBUG_ON(!wp); in __bch2_write()
1553 bch2_open_bucket_get(c, wp, &op->open_buckets); in __bch2_write()
1554 ret = bch2_write_extent(op, wp, &bio); in __bch2_write()
1556 bch2_alloc_sectors_done_inlined(c, wp); in __bch2_write()
1601 bch2_write_queue(op, wp); in __bch2_write()