Lines Matching refs:e
234 struct wc_entry *e; member
392 static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e) in memory_entry() argument
394 return &sb(wc)->entries[e->index]; in memory_entry()
397 static void *memory_data(struct dm_writecache *wc, struct wc_entry *e) in memory_data() argument
399 return (char *)wc->block_start + (e->index << wc->block_size_bits); in memory_data()
402 static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e) in cache_sector() argument
405 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT)); in cache_sector()
408 static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e) in read_original_sector() argument
411 return e->original_sector; in read_original_sector()
413 return le64_to_cpu(memory_entry(wc, e)->original_sector); in read_original_sector()
417 static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e) in read_seq_count() argument
420 return e->seq_count; in read_seq_count()
422 return le64_to_cpu(memory_entry(wc, e)->seq_count); in read_seq_count()
426 static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e) in clear_seq_count() argument
429 e->seq_count = -1; in clear_seq_count()
431 pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1)); in clear_seq_count()
434 static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e, in write_original_sector_seq_count() argument
439 e->original_sector = original_sector; in write_original_sector_seq_count()
440 e->seq_count = seq_count; in write_original_sector_seq_count()
444 pmem_assign(*memory_entry(wc, e), me); in write_original_sector_seq_count()
609 struct wc_entry *e; in writecache_find_entry() local
616 e = container_of(node, struct wc_entry, rb_node); in writecache_find_entry()
617 if (read_original_sector(wc, e) == block) in writecache_find_entry()
620 node = (read_original_sector(wc, e) >= block ? in writecache_find_entry()
621 e->rb_node.rb_left : e->rb_node.rb_right); in writecache_find_entry()
625 if (read_original_sector(wc, e) >= block) in writecache_find_entry()
626 return e; in writecache_find_entry()
628 node = rb_next(&e->rb_node); in writecache_find_entry()
632 e = container_of(node, struct wc_entry, rb_node); in writecache_find_entry()
633 return e; in writecache_find_entry()
641 node = rb_prev(&e->rb_node); in writecache_find_entry()
643 node = rb_next(&e->rb_node); in writecache_find_entry()
645 return e; in writecache_find_entry()
648 return e; in writecache_find_entry()
649 e = e2; in writecache_find_entry()
655 struct wc_entry *e; in writecache_insert_entry() local
659 e = container_of(*node, struct wc_entry, rb_node); in writecache_insert_entry()
660 parent = &e->rb_node; in writecache_insert_entry()
661 if (read_original_sector(wc, e) > read_original_sector(wc, ins)) in writecache_insert_entry()
672 static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e) in writecache_unlink() argument
674 list_del(&e->lru); in writecache_unlink()
675 rb_erase(&e->rb_node, &wc->tree); in writecache_unlink()
678 static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e) in writecache_add_to_freelist() argument
684 wc->current_free = e; in writecache_add_to_freelist()
687 if (&e->rb_node < *node) in writecache_add_to_freelist()
692 rb_link_node(&e->rb_node, parent, node); in writecache_add_to_freelist()
693 rb_insert_color(&e->rb_node, &wc->freetree); in writecache_add_to_freelist()
695 list_add_tail(&e->lru, &wc->freelist); in writecache_add_to_freelist()
718 struct wc_entry *e; in writecache_pop_from_freelist() local
725 e = wc->current_free; in writecache_pop_from_freelist()
726 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector)) in writecache_pop_from_freelist()
728 next = rb_next(&e->rb_node); in writecache_pop_from_freelist()
729 rb_erase(&e->rb_node, &wc->freetree); in writecache_pop_from_freelist()
736 e = container_of(wc->freelist.next, struct wc_entry, lru); in writecache_pop_from_freelist()
737 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector)) in writecache_pop_from_freelist()
739 list_del(&e->lru); in writecache_pop_from_freelist()
745 return e; in writecache_pop_from_freelist()
748 static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e) in writecache_free_entry() argument
750 writecache_unlink(wc, e); in writecache_free_entry()
751 writecache_add_to_freelist(wc, e); in writecache_free_entry()
752 clear_seq_count(wc, e); in writecache_free_entry()
753 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry)); in writecache_free_entry()
781 static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e) in writecache_flush_entry() argument
783 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry)); in writecache_flush_entry()
785 writecache_flush_region(wc, memory_data(wc, e), wc->block_size); in writecache_flush_entry()
788 static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e) in writecache_entry_is_committed() argument
790 return read_seq_count(wc, e) < wc->seq_count; in writecache_entry_is_committed()
795 struct wc_entry *e, *e2; in writecache_flush() local
804 e = container_of(wc->lru.next, struct wc_entry, lru); in writecache_flush()
805 if (writecache_entry_is_committed(wc, e)) { in writecache_flush()
814 writecache_flush_entry(wc, e); in writecache_flush()
815 if (unlikely(e->lru.next == &wc->lru)) in writecache_flush()
817 e2 = container_of(e->lru.next, struct wc_entry, lru); in writecache_flush()
820 e = e2; in writecache_flush()
837 struct rb_node *rb_node = rb_prev(&e->rb_node); in writecache_flush()
841 if (read_original_sector(wc, e2) == read_original_sector(wc, e) && in writecache_flush()
847 if (unlikely(e->lru.prev == &wc->lru)) in writecache_flush()
849 e = container_of(e->lru.prev, struct wc_entry, lru); in writecache_flush()
882 struct wc_entry *e; in writecache_discard() local
885 e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ); in writecache_discard()
886 if (unlikely(!e)) in writecache_discard()
889 while (read_original_sector(wc, e) < end) { in writecache_discard()
890 struct rb_node *node = rb_next(&e->rb_node); in writecache_discard()
892 if (likely(!e->write_in_progress)) { in writecache_discard()
900 if (!writecache_entry_is_committed(wc, e)) in writecache_discard()
902 writecache_free_entry(wc, e); in writecache_discard()
908 e = container_of(node, struct wc_entry, rb_node); in writecache_discard()
968 struct wc_entry *e = &wc->entries[b]; in writecache_alloc_entries() local
970 e->index = b; in writecache_alloc_entries()
971 e->write_in_progress = false; in writecache_alloc_entries()
1041 struct wc_entry *e = &wc->entries[b]; in writecache_resume() local
1045 e->original_sector = -1; in writecache_resume()
1046 e->seq_count = -1; in writecache_resume()
1049 r = copy_mc_to_kernel(&wme, memory_entry(wc, e), in writecache_resume()
1054 e->original_sector = -1; in writecache_resume()
1055 e->seq_count = -1; in writecache_resume()
1057 e->original_sector = le64_to_cpu(wme.original_sector); in writecache_resume()
1058 e->seq_count = le64_to_cpu(wme.seq_count); in writecache_resume()
1064 struct wc_entry *e = &wc->entries[b]; in writecache_resume() local
1066 if (!writecache_entry_is_committed(wc, e)) { in writecache_resume()
1067 if (read_seq_count(wc, e) != -1) { in writecache_resume()
1069 clear_seq_count(wc, e); in writecache_resume()
1072 writecache_add_to_freelist(wc, e); in writecache_resume()
1076 old = writecache_find_entry(wc, read_original_sector(wc, e), 0); in writecache_resume()
1078 writecache_insert_entry(wc, e); in writecache_resume()
1080 if (read_seq_count(wc, old) == read_seq_count(wc, e)) { in writecache_resume()
1083 (unsigned long long)b, (unsigned long long)read_original_sector(wc, e), in writecache_resume()
1084 (unsigned long long)read_seq_count(wc, e)); in writecache_resume()
1086 if (read_seq_count(wc, old) > read_seq_count(wc, e)) { in writecache_resume()
1090 writecache_insert_entry(wc, e); in writecache_resume()
1339 struct wc_entry *e) in writecache_map_remap_origin() argument
1341 if (e) { in writecache_map_remap_origin()
1343 read_original_sector(wc, e) - bio->bi_iter.bi_sector; in writecache_map_remap_origin()
1352 struct wc_entry *e; in writecache_map_read() local
1356 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING); in writecache_map_read()
1357 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) { in writecache_map_read()
1360 bio_copy_block(wc, bio, memory_data(wc, e)); in writecache_map_read()
1367 bio->bi_iter.bi_sector = cache_sector(wc, e); in writecache_map_read()
1368 if (!writecache_entry_is_committed(wc, e)) in writecache_map_read()
1373 writecache_map_remap_origin(wc, bio, e); in writecache_map_read()
1382 struct wc_entry *e, bool search_used) in writecache_bio_copy_ssd() argument
1385 sector_t start_cache_sec = cache_sector(wc, e); in writecache_bio_copy_ssd()
1400 struct rb_node *next = rb_next(&e->rb_node); in writecache_bio_copy_ssd()
1405 if (f != e + 1) in writecache_bio_copy_ssd()
1408 read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT)) in writecache_bio_copy_ssd()
1414 e = f; in writecache_bio_copy_ssd()
1437 struct wc_entry *e; in writecache_map_write() local
1447 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0); in writecache_map_write()
1448 if (e) { in writecache_map_write()
1449 if (!writecache_entry_is_committed(wc, e)) { in writecache_map_write()
1455 if (!WC_MODE_PMEM(wc) && !e->write_in_progress) { in writecache_map_write()
1466 e = writecache_pop_from_freelist(wc, (sector_t)-1); in writecache_map_write()
1467 if (unlikely(!e)) { in writecache_map_write()
1470 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING); in writecache_map_write()
1471 writecache_map_remap_origin(wc, bio, e); in writecache_map_write()
1480 write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count); in writecache_map_write()
1481 writecache_insert_entry(wc, e); in writecache_map_write()
1486 bio_copy_block(wc, bio, memory_data(wc, e)); in writecache_map_write()
1489 writecache_bio_copy_ssd(wc, bio, e, search_used); in writecache_map_write()
1685 struct wc_entry *e; in __writecache_endio_pmem() local
1697 e = wb->wc_list[i]; in __writecache_endio_pmem()
1698 BUG_ON(!e->write_in_progress); in __writecache_endio_pmem()
1699 e->write_in_progress = false; in __writecache_endio_pmem()
1700 INIT_LIST_HEAD(&e->lru); in __writecache_endio_pmem()
1702 writecache_free_entry(wc, e); in __writecache_endio_pmem()
1723 struct wc_entry *e; in __writecache_endio_ssd() local
1732 e = c->e; in __writecache_endio_ssd()
1734 BUG_ON(!e->write_in_progress); in __writecache_endio_ssd()
1735 e->write_in_progress = false; in __writecache_endio_ssd()
1736 INIT_LIST_HEAD(&e->lru); in __writecache_endio_ssd()
1738 writecache_free_entry(wc, e); in __writecache_endio_ssd()
1742 e++; in __writecache_endio_ssd()
1796 static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e) in wc_add_block() argument
1800 void *address = memory_data(wc, e); in wc_add_block()
1831 struct wc_entry *e, *f; in __writecache_writeback_pmem() local
1838 e = container_of(wbl->list.prev, struct wc_entry, lru); in __writecache_writeback_pmem()
1839 list_del(&e->lru); in __writecache_writeback_pmem()
1841 max_pages = e->wc_list_contiguous; in __writecache_writeback_pmem()
1848 bio->bi_iter.bi_sector = read_original_sector(wc, e); in __writecache_writeback_pmem()
1860 BUG_ON(!wc_add_block(wb, e)); in __writecache_writeback_pmem()
1862 wb->wc_list[0] = e; in __writecache_writeback_pmem()
1868 read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT)) in __writecache_writeback_pmem()
1875 e = f; in __writecache_writeback_pmem()
1895 struct wc_entry *e, *f; in __writecache_writeback_ssd() local
1903 e = container_of(wbl->list.prev, struct wc_entry, lru); in __writecache_writeback_ssd()
1904 list_del(&e->lru); in __writecache_writeback_ssd()
1906 n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT); in __writecache_writeback_ssd()
1909 from.sector = cache_sector(wc, e); in __writecache_writeback_ssd()
1912 to.sector = read_original_sector(wc, e); in __writecache_writeback_ssd()
1917 c->e = e; in __writecache_writeback_ssd()
1918 c->n_entries = e->wc_list_contiguous; in __writecache_writeback_ssd()
1923 BUG_ON(f != e + 1); in __writecache_writeback_ssd()
1925 e = f; in __writecache_writeback_ssd()
1946 struct wc_entry *f, *g, *e = NULL; in writecache_writeback() local
2008 if (unlikely(!e)) { in writecache_writeback()
2010 e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node); in writecache_writeback()
2012 e = g; in writecache_writeback()
2014 e = container_of(wc->lru.prev, struct wc_entry, lru); in writecache_writeback()
2015 BUG_ON(e->write_in_progress); in writecache_writeback()
2016 if (unlikely(!writecache_entry_is_committed(wc, e))) in writecache_writeback()
2019 node = rb_prev(&e->rb_node); in writecache_writeback()
2023 read_original_sector(wc, e))) { in writecache_writeback()
2025 list_move(&e->lru, &skipped); in writecache_writeback()
2031 list_move(&e->lru, &wbl.list); in writecache_writeback()
2033 e->write_in_progress = true; in writecache_writeback()
2034 e->wc_list_contiguous = 1; in writecache_writeback()
2036 f = e; in writecache_writeback()
2071 e->wc_list_contiguous++; in writecache_writeback()
2072 if (unlikely(e->wc_list_contiguous == BIO_MAX_VECS)) { in writecache_writeback()
2117 struct wc_entry e; in calculate_memory_size() local
2137 e.index = n_blocks; in calculate_memory_size()
2138 if (e.index != n_blocks) in calculate_memory_size()