Lines Matching refs:c
170 int bch_journal_read(struct cache_set *c, struct list_head *list) in bch_journal_read() argument
181 struct cache *ca = c->cache; in bch_journal_read()
285 c->journal.seq = list_entry(list->prev, in bch_journal_read()
293 void bch_journal_mark(struct cache_set *c, struct list_head *list) in bch_journal_mark() argument
298 struct journal *j = &c->journal; in bch_journal_mark()
327 if (!__bch_extent_invalid(c, k)) { in bch_journal_mark()
331 if (ptr_available(c, k, j)) in bch_journal_mark()
332 atomic_inc(&PTR_BUCKET(c, k, j)->pin); in bch_journal_mark()
334 bch_initial_mark_key(c, 0, k); in bch_journal_mark()
416 static void btree_flush_write(struct cache_set *c) in btree_flush_write() argument
424 if (c->journal.btree_flushing) in btree_flush_write()
427 spin_lock(&c->journal.flush_write_lock); in btree_flush_write()
428 if (c->journal.btree_flushing) { in btree_flush_write()
429 spin_unlock(&c->journal.flush_write_lock); in btree_flush_write()
432 c->journal.btree_flushing = true; in btree_flush_write()
433 spin_unlock(&c->journal.flush_write_lock); in btree_flush_write()
436 spin_lock(&c->journal.lock); in btree_flush_write()
437 fifo_front_p = &fifo_front(&c->journal.pin); in btree_flush_write()
444 spin_unlock(&c->journal.lock); in btree_flush_write()
447 spin_unlock(&c->journal.lock); in btree_flush_write()
449 mask = c->journal.pin.mask; in btree_flush_write()
451 atomic_long_inc(&c->flush_write); in btree_flush_write()
454 mutex_lock(&c->bucket_lock); in btree_flush_write()
455 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { in btree_flush_write()
462 now_fifo_front_p = &fifo_front(&c->journal.pin); in btree_flush_write()
528 mutex_unlock(&c->bucket_lock); in btree_flush_write()
564 spin_lock(&c->journal.flush_write_lock); in btree_flush_write()
565 c->journal.btree_flushing = false; in btree_flush_write()
566 spin_unlock(&c->journal.flush_write_lock); in btree_flush_write()
630 static unsigned int free_journal_buckets(struct cache_set *c) in free_journal_buckets() argument
632 struct journal *j = &c->journal; in free_journal_buckets()
633 struct cache *ca = c->cache; in free_journal_buckets()
634 struct journal_device *ja = &c->cache->journal; in free_journal_buckets()
649 static void journal_reclaim(struct cache_set *c) in journal_reclaim() argument
651 struct bkey *k = &c->journal.key; in journal_reclaim()
652 struct cache *ca = c->cache; in journal_reclaim()
657 atomic_long_inc(&c->reclaim); in journal_reclaim()
659 while (!atomic_read(&fifo_front(&c->journal.pin))) in journal_reclaim()
660 fifo_pop(&c->journal.pin, p); in journal_reclaim()
662 last_seq = last_seq(&c->journal); in journal_reclaim()
673 if (c->journal.blocks_free) in journal_reclaim()
676 if (!free_journal_buckets(c)) in journal_reclaim()
681 bucket_to_sector(c, ca->sb.d[ja->cur_idx]), in journal_reclaim()
683 atomic_long_inc(&c->reclaimed_journal_buckets); in journal_reclaim()
687 c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits; in journal_reclaim()
690 if (!journal_full(&c->journal)) in journal_reclaim()
691 __closure_wake_up(&c->journal.wait); in journal_reclaim()
722 cache_set_err_on(bio->bi_status, w->c, "journal io error"); in journal_write_endio()
723 closure_put(&w->c->journal.io); in journal_write_endio()
740 __releases(&c->journal.lock)
742 closure_type(c, struct cache_set, journal.io);
744 c->journal.io_in_flight = 0;
745 spin_unlock(&c->journal.lock);
749 __releases(c->journal.lock)
751 closure_type(c, struct cache_set, journal.io);
752 struct cache *ca = c->cache;
753 struct journal_write *w = c->journal.cur;
754 struct bkey *k = &c->journal.key;
766 } else if (journal_full(&c->journal)) {
767 journal_reclaim(c);
768 spin_unlock(&c->journal.lock);
770 btree_flush_write(c);
775 c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca));
777 w->data->btree_level = c->root->level;
779 bkey_copy(&w->data->btree_root, &c->root->key);
780 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
785 w->data->last_seq = last_seq(&c->journal);
789 ca = c->cache;
814 atomic_dec_bug(&fifo_back(&c->journal.pin));
815 bch_journal_next(&c->journal);
816 journal_reclaim(c);
818 spin_unlock(&c->journal.lock);
821 closure_bio_submit(c, bio, cl);
828 closure_type(c, struct cache_set, journal.io); in CLOSURE_CALLBACK()
830 spin_lock(&c->journal.lock); in CLOSURE_CALLBACK()
834 static void journal_try_write(struct cache_set *c) in journal_try_write() argument
835 __releases(c->journal.lock) in journal_try_write()
837 struct closure *cl = &c->journal.io; in journal_try_write()
838 struct journal_write *w = c->journal.cur; in journal_try_write()
842 if (!c->journal.io_in_flight) { in journal_try_write()
843 c->journal.io_in_flight = 1; in journal_try_write()
844 closure_call(cl, journal_write_unlocked, NULL, &c->cl); in journal_try_write()
846 spin_unlock(&c->journal.lock); in journal_try_write()
850 static struct journal_write *journal_wait_for_write(struct cache_set *c, in journal_wait_for_write() argument
852 __acquires(&c->journal.lock) in journal_wait_for_write()
857 struct cache *ca = c->cache; in journal_wait_for_write()
861 spin_lock(&c->journal.lock); in journal_wait_for_write()
864 struct journal_write *w = c->journal.cur; in journal_wait_for_write()
870 c->journal.blocks_free * ca->sb.block_size, in journal_wait_for_write()
875 closure_wait(&c->journal.wait, &cl); in journal_wait_for_write()
877 if (!journal_full(&c->journal)) { in journal_wait_for_write()
879 trace_bcache_journal_entry_full(c); in journal_wait_for_write()
889 journal_try_write(c); /* unlocks */ in journal_wait_for_write()
892 trace_bcache_journal_full(c); in journal_wait_for_write()
894 journal_reclaim(c); in journal_wait_for_write()
895 spin_unlock(&c->journal.lock); in journal_wait_for_write()
897 btree_flush_write(c); in journal_wait_for_write()
901 spin_lock(&c->journal.lock); in journal_wait_for_write()
908 struct cache_set *c = container_of(to_delayed_work(work), in journal_write_work() local
911 spin_lock(&c->journal.lock); in journal_write_work()
912 if (c->journal.cur->dirty) in journal_write_work()
913 journal_try_write(c); in journal_write_work()
915 spin_unlock(&c->journal.lock); in journal_write_work()
924 atomic_t *bch_journal(struct cache_set *c, in bch_journal() argument
932 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) in bch_journal()
935 if (!CACHE_SYNC(&c->cache->sb)) in bch_journal()
938 w = journal_wait_for_write(c, bch_keylist_nkeys(keys)); in bch_journal()
943 ret = &fifo_back(&c->journal.pin); in bch_journal()
948 journal_try_write(c); in bch_journal()
951 queue_delayed_work(bch_flush_wq, &c->journal.work, in bch_journal()
952 msecs_to_jiffies(c->journal_delay_ms)); in bch_journal()
953 spin_unlock(&c->journal.lock); in bch_journal()
955 spin_unlock(&c->journal.lock); in bch_journal()
962 void bch_journal_meta(struct cache_set *c, struct closure *cl) in bch_journal_meta() argument
969 ref = bch_journal(c, &keys, cl); in bch_journal_meta()
974 void bch_journal_free(struct cache_set *c) in bch_journal_free() argument
976 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS); in bch_journal_free()
977 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS); in bch_journal_free()
978 free_fifo(&c->journal.pin); in bch_journal_free()
981 int bch_journal_alloc(struct cache_set *c) in bch_journal_alloc() argument
983 struct journal *j = &c->journal; in bch_journal_alloc()
989 c->journal_delay_ms = 100; in bch_journal_alloc()
991 j->w[0].c = c; in bch_journal_alloc()
992 j->w[1].c = c; in bch_journal_alloc()