Lines Matching refs:has

591 static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,  in has_pfn_is_backed()  argument
597 if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn)) in has_pfn_is_backed()
601 list_for_each_entry(gap, &has->gap_list, list) { in has_pfn_is_backed()
613 struct hv_hotadd_state *has; in hv_page_offline_check() local
622 list_for_each_entry(has, &dm_device.ha_region_list, list) { in hv_page_offline_check()
623 while ((pfn >= has->start_pfn) && in hv_page_offline_check()
624 (pfn < has->end_pfn) && in hv_page_offline_check()
627 if (has_pfn_is_backed(has, pfn)) in hv_page_offline_check()
688 static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg) in hv_page_online_one() argument
690 if (!has_pfn_is_backed(has, page_to_pfn(pg))) { in hv_page_online_one()
705 static void hv_bring_pgs_online(struct hv_hotadd_state *has, in hv_bring_pgs_online() argument
712 hv_page_online_one(has, pfn_to_page(start_pfn + i)); in hv_bring_pgs_online()
717 struct hv_hotadd_state *has) in hv_mem_hot_add() argument
730 has->ha_end_pfn += HA_CHUNK; in hv_mem_hot_add()
740 has->covered_end_pfn += processed_pfn; in hv_mem_hot_add()
762 has->ha_end_pfn -= HA_CHUNK; in hv_mem_hot_add()
763 has->covered_end_pfn -= processed_pfn; in hv_mem_hot_add()
783 struct hv_hotadd_state *has; in hv_online_page() local
788 list_for_each_entry(has, &dm_device.ha_region_list, list) { in hv_online_page()
790 if ((pfn < has->start_pfn) || in hv_online_page()
791 (pfn + (1UL << order) > has->end_pfn)) in hv_online_page()
794 hv_bring_pgs_online(has, pfn, 1UL << order); in hv_online_page()
802 struct hv_hotadd_state *has; in pfn_covered() local
809 list_for_each_entry(has, &dm_device.ha_region_list, list) { in pfn_covered()
814 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) in pfn_covered()
821 if (has->covered_end_pfn != start_pfn) { in pfn_covered()
829 gap->start_pfn = has->covered_end_pfn; in pfn_covered()
831 list_add_tail(&gap->list, &has->gap_list); in pfn_covered()
833 has->covered_end_pfn = start_pfn; in pfn_covered()
840 if ((start_pfn + pfn_cnt) > has->end_pfn) { in pfn_covered()
841 residual = (start_pfn + pfn_cnt - has->end_pfn); in pfn_covered()
849 has->end_pfn += new_inc; in pfn_covered()
866 struct hv_hotadd_state *has; in handle_pg_range() local
875 list_for_each_entry(has, &dm_device.ha_region_list, list) { in handle_pg_range()
880 if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) in handle_pg_range()
883 old_covered_state = has->covered_end_pfn; in handle_pg_range()
885 if (start_pfn < has->ha_end_pfn) { in handle_pg_range()
891 pgs_ol = has->ha_end_pfn - start_pfn; in handle_pg_range()
895 has->covered_end_pfn += pgs_ol; in handle_pg_range()
905 if (start_pfn > has->start_pfn && in handle_pg_range()
907 hv_bring_pgs_online(has, start_pfn, pgs_ol); in handle_pg_range()
911 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) { in handle_pg_range()
919 size = (has->end_pfn - has->ha_end_pfn); in handle_pg_range()
928 hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has); in handle_pg_range()
935 res = has->covered_end_pfn - old_covered_state; in handle_pg_range()
2048 struct hv_hotadd_state *has, *tmp; in balloon_remove() local
2077 list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) { in balloon_remove()
2078 list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) { in balloon_remove()
2082 list_del(&has->list); in balloon_remove()
2083 kfree(has); in balloon_remove()