/linux-6.3-rc2/drivers/ufs/core/ |
A D | ufshpb.c | 195 rgn->reads++; in ufshpb_iterate_rgn() 650 if (rgn) in ufshpb_set_unmap_cmd() 855 rgn->read_timeout = in ufshpb_add_lru_info() 886 victim_rgn = rgn; in ufshpb_victim_lru_info() 921 int rgn_idx = rgn ? rgn->rgn_idx : 0; in ufshpb_issue_umap_req() 964 rgn->rgn_idx); in ufshpb_evict_region() 1201 (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) { in ufshpb_rsp_req_region_update() 1499 rgn->reads = 0; in ufshpb_normalization_work_handler() 1508 if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads) in ufshpb_normalization_work_handler() 1591 if (!rgn->srgn_tbl) in ufshpb_alloc_subregion_tbl() [all …]
|
A D | ufshpb.h | 141 #define for_each_sub_region(rgn, i, srgn) \ argument 143 ((i) < (rgn)->srgn_cnt) && ((srgn) = &(rgn)->srgn_tbl[i]); \
|
/linux-6.3-rc2/tools/testing/memblock/tests/ |
A D | basic_api.c | 43 struct memblock_region *rgn; in memblock_add_simple_check() local 57 ASSERT_EQ(rgn->base, r.base); in memblock_add_simple_check() 58 ASSERT_EQ(rgn->size, r.size); in memblock_add_simple_check() 76 struct memblock_region *rgn; in memblock_add_node_simple_check() local 90 ASSERT_EQ(rgn->base, r.base); in memblock_add_node_simple_check() 91 ASSERT_EQ(rgn->size, r.size); in memblock_add_node_simple_check() 93 ASSERT_EQ(rgn->nid, 1); in memblock_add_node_simple_check() 1295 ASSERT_EQ(rgn->base, 0); in memblock_remove_only_region_check() 1296 ASSERT_EQ(rgn->size, 0); in memblock_remove_only_region_check() 1723 ASSERT_EQ(rgn->base, 0); in memblock_free_only_region_check() [all …]
|
A D | alloc_helpers_api.c | 35 ASSERT_EQ(rgn->size, size); in alloc_from_simple_generic_check() 36 ASSERT_EQ(rgn->base, min_addr); in alloc_from_simple_generic_check() 79 ASSERT_EQ(rgn->size, size); in alloc_from_misaligned_generic_check() 124 ASSERT_EQ(rgn->size, size); in alloc_from_top_down_high_addr_check() 172 ASSERT_EQ(rgn->size, total_size); in alloc_from_top_down_no_space_above_check() 207 ASSERT_EQ(rgn->base, start_addr); in alloc_from_top_down_min_addr_cap_check() 208 ASSERT_EQ(rgn->size, MEM_SIZE); in alloc_from_top_down_min_addr_cap_check() 250 ASSERT_EQ(rgn->size, size); in alloc_from_bottom_up_high_addr_check() 297 ASSERT_EQ(rgn->size, r1_size); in alloc_from_bottom_up_no_space_above_check() 329 ASSERT_EQ(rgn->base, start_addr); in alloc_from_bottom_up_min_addr_cap_check() [all …]
|
A D | alloc_api.c | 41 ASSERT_EQ(rgn->size, size); in alloc_top_down_simple_check() 144 ASSERT_EQ(rgn->size, total_size); in alloc_top_down_before_check() 192 ASSERT_EQ(rgn->size, total_size); in alloc_top_down_after_check() 432 ASSERT_EQ(rgn->size, MEM_SIZE); in alloc_limited_space_generic_check() 460 ASSERT_EQ(rgn->size, 0); in alloc_no_memory_generic_check() 461 ASSERT_EQ(rgn->base, 0); in alloc_no_memory_generic_check() 493 ASSERT_EQ(rgn->size, 0); in alloc_too_large_generic_check() 494 ASSERT_EQ(rgn->base, 0); in alloc_too_large_generic_check() 520 ASSERT_EQ(rgn->size, SZ_2); in alloc_bottom_up_simple_check() 666 ASSERT_EQ(rgn->base, r1.base); in alloc_bottom_up_after_check() [all …]
|
A D | alloc_nid_api.c | 82 rgn_end = rgn->base + rgn->size; in alloc_nid_top_down_simple_check() 87 ASSERT_EQ(rgn->size, size); in alloc_nid_top_down_simple_check() 135 rgn_end = rgn->base + rgn->size; in alloc_nid_top_down_end_misaligned_check() 140 ASSERT_EQ(rgn->size, size); in alloc_nid_top_down_end_misaligned_check() 185 rgn_end = rgn->base + rgn->size; in alloc_nid_exact_address_generic_check() 190 ASSERT_EQ(rgn->size, size); in alloc_nid_exact_address_generic_check() 240 ASSERT_EQ(rgn->size, size); in alloc_nid_top_down_narrow_range_check() 671 ASSERT_EQ(rgn->size, size); in alloc_nid_top_down_cap_max_check() 708 ASSERT_EQ(rgn->size, size); in alloc_nid_top_down_cap_min_check() 751 rgn_end = rgn->base + rgn->size; in alloc_nid_bottom_up_simple_check() [all …]
|
A D | common.h | 121 static inline phys_addr_t __maybe_unused region_end(struct memblock_region *rgn) in region_end() argument 123 return rgn->base + rgn->size; in region_end()
|
/linux-6.3-rc2/arch/powerpc/platforms/powernv/ |
A D | opal-fadump.c | 130 base = be64_to_cpu(fdm->rgn[i].src); in opal_fadump_get_config() 131 size = be64_to_cpu(fdm->rgn[i].size); in opal_fadump_get_config() 164 i, be64_to_cpu(fdm->rgn[i].src), in opal_fadump_get_config() 165 be64_to_cpu(fdm->rgn[i].size)); in opal_fadump_get_config() 200 opal_fdm->rgn[i].dest = cpu_to_be64(addr); in opal_fadump_init_mem_struct() 282 be64_to_cpu(opal_fdm->rgn[i].src), in opal_fadump_register() 283 be64_to_cpu(opal_fdm->rgn[i].dest), in opal_fadump_register() 284 be64_to_cpu(opal_fdm->rgn[i].size)); in opal_fadump_register() 581 dumped_bytes = be64_to_cpu(fdm_ptr->rgn[i].size); in opal_fadump_region_show() 584 be64_to_cpu(fdm_ptr->rgn[i].src), in opal_fadump_region_show() [all …]
|
A D | opal-fadump.h | 42 struct opal_mpipl_region rgn[FADUMP_MAX_MEM_REGS]; member
|
/linux-6.3-rc2/mm/ |
A D | memblock.c | 557 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); in memblock_insert_region() 558 rgn->base = base; in memblock_insert_region() 559 rgn->size = size; in memblock_insert_region() 560 rgn->flags = flags; in memblock_insert_region() 780 rgn->base = base; in memblock_isolate_range() 785 rgn->flags); in memblock_isolate_range() 791 rgn->base = end; in memblock_isolate_range() 796 rgn->flags); in memblock_isolate_range() 1914 base = rgn->base; in memblock_dump() 1915 size = rgn->size; in memblock_dump() [all …]
|
/linux-6.3-rc2/drivers/mtd/nand/onenand/ |
A D | onenand_bbt.c | 65 int rgn; in create_bbt() local 113 rgn = flexonenand_region(mtd, from); in create_bbt() 114 from += mtd->eraseregions[rgn].erasesize; in create_bbt()
|
/linux-6.3-rc2/drivers/of/ |
A D | fdt.c | 979 struct memblock_region rgn[MAX_USABLE_RANGES] = {0}; in early_init_dt_check_for_usable_mem_range() local 995 rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop); in early_init_dt_check_for_usable_mem_range() 996 rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop); in early_init_dt_check_for_usable_mem_range() 999 i, &rgn[i].base, &rgn[i].size); in early_init_dt_check_for_usable_mem_range() 1002 memblock_cap_memory_range(rgn[0].base, rgn[0].size); in early_init_dt_check_for_usable_mem_range() 1003 for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++) in early_init_dt_check_for_usable_mem_range() 1004 memblock_add(rgn[i].base, rgn[i].size); in early_init_dt_check_for_usable_mem_range()
|
/linux-6.3-rc2/drivers/iommu/ |
A D | mtk_iommu.c | 511 const struct mtk_iommu_iova_region *rgn = plat_data->iova_region; in mtk_iommu_get_iova_region_id() local 520 for (i = 0; i < plat_data->iova_region_nr; i++, rgn++) { in mtk_iommu_get_iova_region_id() 522 if (dma_rgn->dma_start == rgn->iova_base && in mtk_iommu_get_iova_region_id() 523 dma_end == rgn->iova_base + rgn->size - 1) in mtk_iommu_get_iova_region_id() 526 if (dma_rgn->dma_start >= rgn->iova_base && in mtk_iommu_get_iova_region_id() 527 dma_end < rgn->iova_base + rgn->size) in mtk_iommu_get_iova_region_id()
|