/linux-6.3-rc2/arch/powerpc/mm/nohash/ |
A D | kaslr_booke.c | 23 struct regions { struct 38 struct regions __initdata regions; argument 113 if (regions.reserved_mem < 0) in overlaps_reserved_region() 141 if (base >= regions.pa_end) in overlaps_reserved_region() 160 regions.dtb_end)) in overlaps_region() 164 regions.initrd_end)) in overlaps_region() 168 regions.crash_end)) in overlaps_region() 320 if (regions.reserved_mem >= 0) in kaslr_choose_location() 325 regions.pa_start = memstart_addr; in kaslr_choose_location() 327 regions.dtb_start = __pa(dt_ptr); in kaslr_choose_location() [all …]
|
/linux-6.3-rc2/drivers/mtd/chips/ |
A D | jedec_probe.c | 306 .regions = { 318 .regions = { 333 .regions = { 348 .regions = { 363 .regions = { 378 .regions = { 394 .regions = { 411 .regions = { 428 .regions = { 443 .regions = { [all …]
|
/linux-6.3-rc2/mm/damon/ |
A D | vaddr-test.h | 69 struct damon_addr_range regions[3] = {0,}; in damon_test_three_regions_in_vmas() local 84 __damon_va_three_regions(&mm, regions); in damon_test_three_regions_in_vmas() 87 KUNIT_EXPECT_EQ(test, 25ul, regions[0].end); in damon_test_three_regions_in_vmas() 89 KUNIT_EXPECT_EQ(test, 220ul, regions[1].end); in damon_test_three_regions_in_vmas() 91 KUNIT_EXPECT_EQ(test, 330ul, regions[2].end); in damon_test_three_regions_in_vmas() 130 unsigned long *regions, int nr_regions, in damon_do_test_apply_three_regions() argument 140 r = damon_new_region(regions[i * 2], regions[i * 2 + 1]); in damon_do_test_apply_three_regions() 173 damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions), in damon_test_apply_three_regions1() 195 damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions), in damon_test_apply_three_regions2() 219 damon_do_test_apply_three_regions(test, regions, ARRAY_SIZE(regions), in damon_test_apply_three_regions3() [all …]
|
A D | vaddr.c | 117 struct damon_addr_range regions[3]) in __damon_va_three_regions() 158 regions[0].start = ALIGN(start, DAMON_MIN_REGION); in __damon_va_three_regions() 159 regions[0].end = ALIGN(first_gap.start, DAMON_MIN_REGION); in __damon_va_three_regions() 163 regions[2].end = ALIGN(prev->vm_end, DAMON_MIN_REGION); in __damon_va_three_regions() 174 struct damon_addr_range regions[3]) in damon_va_three_regions() 184 rc = __damon_va_three_regions(mm, regions); in damon_va_three_regions() 238 struct damon_addr_range regions[3]; in __damon_va_init_regions() local 242 if (damon_va_three_regions(t, regions)) { in __damon_va_init_regions() 253 sz += regions[i].end - regions[i].start; in __damon_va_init_regions() 261 r = damon_new_region(regions[i].start, regions[i].end); in __damon_va_init_regions() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/nouveau/nvkm/nvfw/ |
A D | acr.c | 130 hdr->regions.no_regions); in flcn_acr_desc_dump() 135 hdr->regions.region_props[i].start_addr); in flcn_acr_desc_dump() 137 hdr->regions.region_props[i].end_addr); in flcn_acr_desc_dump() 139 hdr->regions.region_props[i].region_id); in flcn_acr_desc_dump() 141 hdr->regions.region_props[i].read_mask); in flcn_acr_desc_dump() 143 hdr->regions.region_props[i].write_mask); in flcn_acr_desc_dump() 173 hdr->regions.no_regions); in flcn_acr_desc_v1_dump() 178 hdr->regions.region_props[i].start_addr); in flcn_acr_desc_v1_dump() 180 hdr->regions.region_props[i].end_addr); in flcn_acr_desc_v1_dump() 182 hdr->regions.region_props[i].region_id); in flcn_acr_desc_v1_dump() [all …]
|
/linux-6.3-rc2/drivers/vfio/platform/ |
A D | vfio_platform_common.c | 146 if (!vdev->regions) in vfio_platform_regions_init() 153 vdev->regions[i].addr = res->start; in vfio_platform_regions_init() 154 vdev->regions[i].size = resource_size(res); in vfio_platform_regions_init() 155 vdev->regions[i].flags = 0; in vfio_platform_regions_init() 162 vdev->regions[i].flags |= in vfio_platform_regions_init() 170 !(vdev->regions[i].size & ~PAGE_MASK)) in vfio_platform_regions_init() 171 vdev->regions[i].flags |= in vfio_platform_regions_init() 187 kfree(vdev->regions); in vfio_platform_regions_init() 196 iounmap(vdev->regions[i].ioaddr); in vfio_platform_regions_cleanup() 199 kfree(vdev->regions); in vfio_platform_regions_cleanup() [all …]
|
/linux-6.3-rc2/tools/testing/memblock/tests/ |
A D | alloc_exact_nid_api.c | 30 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_simple_check() 31 struct memblock_region *req_node = &memblock.memory.regions[nid_req]; in alloc_exact_nid_top_down_numa_simple_check() 82 struct memblock_region *new_rgn = &memblock.reserved.regions[1]; in alloc_exact_nid_top_down_numa_part_reserved_check() 143 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_split_range_low_check() 200 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_no_overlap_split_check() 202 struct memblock_region *node2 = &memblock.memory.regions[6]; in alloc_exact_nid_top_down_numa_no_overlap_split_check() 258 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_top_down_numa_no_overlap_low_check() 260 struct memblock_region *min_node = &memblock.memory.regions[2]; in alloc_exact_nid_top_down_numa_no_overlap_low_check() 261 struct memblock_region *max_node = &memblock.memory.regions[5]; in alloc_exact_nid_top_down_numa_no_overlap_low_check() 299 struct memblock_region *new_rgn = &memblock.reserved.regions[0]; in alloc_exact_nid_bottom_up_numa_simple_check() [all …]
|
A D | alloc_nid_api.c | 66 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_simple_check() 118 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_end_misaligned_check() 169 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_exact_address_generic_check() 221 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_narrow_range_check() 311 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_min_reserved_generic_check() 363 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_max_reserved_generic_check() 416 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; in alloc_nid_top_down_reserved_with_space_check() 417 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; in alloc_nid_top_down_reserved_with_space_check() 481 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_reserved_full_merge_generic_check() 652 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_nid_top_down_cap_max_check() [all …]
|
A D | basic_api.c | 17 ASSERT_NE(memblock.memory.regions, NULL); in memblock_initialization_check() 45 rgn = &memblock.memory.regions[0]; in memblock_add_simple_check() 78 rgn = &memblock.memory.regions[0]; in memblock_add_node_simple_check() 121 rgn1 = &memblock.memory.regions[0]; in memblock_add_disjoint_check() 122 rgn2 = &memblock.memory.regions[1]; in memblock_add_disjoint_check() 175 rgn = &memblock.memory.regions[0]; in memblock_add_overlap_top_check() 227 rgn = &memblock.memory.regions[0]; in memblock_add_overlap_bottom_check() 276 rgn = &memblock.memory.regions[0]; in memblock_add_within_check() 347 rgn = &memblock.memory.regions[0]; in memblock_add_between_check() 401 rgn = &memblock.memory.regions[0]; in memblock_add_near_max_check() [all …]
|
A D | alloc_api.c | 26 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_simple_check() 73 struct memblock_region *rgn1 = &memblock.reserved.regions[1]; in alloc_top_down_disjoint_check() 74 struct memblock_region *rgn2 = &memblock.reserved.regions[0]; in alloc_top_down_disjoint_check() 125 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_before_check() 168 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_after_check() 217 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_top_down_second_fit_check() 266 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_in_between_generic_check() 416 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_limited_space_generic_check() 450 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_no_memory_generic_check() 484 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_too_large_generic_check() [all …]
|
A D | alloc_helpers_api.c | 20 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_simple_generic_check() 63 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_misaligned_generic_check() 110 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_high_addr_check() 153 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_no_space_above_check() 190 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_top_down_min_addr_cap_check() 236 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_bottom_up_high_addr_check() 278 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_bottom_up_no_space_above_check() 314 struct memblock_region *rgn = &memblock.reserved.regions[0]; in alloc_from_bottom_up_min_addr_cap_check()
|
/linux-6.3-rc2/mm/ |
A D | memblock.c | 193 type->regions[i].size)) in memblock_overlaps_region() 346 memmove(&type->regions[r], &type->regions[r + 1], in memblock_remove_region() 354 type->regions[0].base = 0; in memblock_remove_region() 355 type->regions[0].size = 0; in memblock_remove_region() 356 type->regions[0].flags = 0; in memblock_remove_region() 476 old_array = type->regions; in memblock_double_array() 477 type->regions = new_array; in memblock_double_array() 1243 r = &type->regions[*idx]; in __next_mem_pfn_range() 1685 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); in memblock_end_of_DRAM() 1825 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); in memblock_search_pfn_nid() [all …]
|
/linux-6.3-rc2/drivers/virt/nitro_enclaves/ |
A D | ne_misc_dev_test.c | 117 phys_contig_mem_regions.regions = kunit_kcalloc(test, MAX_PHYS_REGIONS, in ne_misc_dev_test_merge_phys_contig_memory_regions() 118 sizeof(*phys_contig_mem_regions.regions), in ne_misc_dev_test_merge_phys_contig_memory_regions() 120 KUNIT_ASSERT_TRUE(test, phys_contig_mem_regions.regions); in ne_misc_dev_test_merge_phys_contig_memory_regions() 135 KUNIT_EXPECT_EQ(test, phys_contig_mem_regions.regions[num - 1].start, in ne_misc_dev_test_merge_phys_contig_memory_regions() 137 KUNIT_EXPECT_EQ(test, range_len(&phys_contig_mem_regions.regions[num - 1]), in ne_misc_dev_test_merge_phys_contig_memory_regions() 141 kunit_kfree(test, phys_contig_mem_regions.regions); in ne_misc_dev_test_merge_phys_contig_memory_regions()
|
/linux-6.3-rc2/drivers/vfio/fsl-mc/ |
A D | vfio_fsl_mc.c | 32 if (!vdev->regions) in vfio_fsl_mc_open_device() 36 struct resource *res = &mc_dev->regions[i]; in vfio_fsl_mc_open_device() 39 vdev->regions[i].addr = res->start; in vfio_fsl_mc_open_device() 40 vdev->regions[i].size = resource_size(res); in vfio_fsl_mc_open_device() 41 vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS; in vfio_fsl_mc_open_device() 47 !(vdev->regions[i].size & ~PAGE_MASK)) in vfio_fsl_mc_open_device() 48 vdev->regions[i].flags |= in vfio_fsl_mc_open_device() 64 iounmap(vdev->regions[i].ioaddr); in vfio_fsl_mc_regions_cleanup() 65 kfree(vdev->regions); in vfio_fsl_mc_regions_cleanup() 255 region = &vdev->regions[index]; in vfio_fsl_mc_read() [all …]
|
/linux-6.3-rc2/drivers/net/dsa/sja1105/ |
A D | sja1105_devlink.c | 85 priv->regions = kcalloc(num_regions, sizeof(struct devlink_region *), in sja1105_setup_devlink_regions() 87 if (!priv->regions) in sja1105_setup_devlink_regions() 97 dsa_devlink_region_destroy(priv->regions[i]); in sja1105_setup_devlink_regions() 99 kfree(priv->regions); in sja1105_setup_devlink_regions() 103 priv->regions[i] = region; in sja1105_setup_devlink_regions() 115 dsa_devlink_region_destroy(priv->regions[i]); in sja1105_teardown_devlink_regions() 117 kfree(priv->regions); in sja1105_teardown_devlink_regions()
|
/linux-6.3-rc2/Documentation/admin-guide/device-mapper/ |
A D | dm-clone.rst | 68 dm-clone divides the source and destination devices in fixed sized regions. 112 of regions being copied, the default being 1 region. 119 batches of this many regions. 170 hydration_threshold <#regions> Maximum number of regions being copied from 176 batches of this many regions. 185 <region size> <#hydrated regions>/<#total regions> <#hydrating regions> 194 #hydrated regions Number of regions that have finished hydrating 195 #total regions Total number of regions to hydrate 196 #hydrating regions Number of regions currently hydrating 219 `hydration_threshold <#regions>` [all …]
|
/linux-6.3-rc2/drivers/soc/qcom/ |
A D | smem.c | 287 struct smem_region regions[]; member 430 header = smem->regions[0].virt_base; in qcom_smem_alloc_global() 517 header = smem->regions[0].virt_base; in qcom_smem_get_global() 525 region = &smem->regions[i]; in qcom_smem_get_global() 716 if (ret > __smem->regions[0].size) in qcom_smem_get_free_space() 762 area = &__smem->regions[i]; in qcom_smem_virt_to_phys() 780 header = smem->regions[0].virt_base; in qcom_smem_get_sbl_version() 999 smem->regions[0].size = size; in qcom_smem_map_global() 1002 if (!smem->regions[0].virt_base) in qcom_smem_map_global() 1087 smem->regions[i].size); in qcom_smem_probe() [all …]
|
/linux-6.3-rc2/drivers/perf/ |
A D | marvell_cn10k_tad_pmu.c | 33 struct tad_region *regions; member 54 new += readq(tad_pmu->regions[i].base + in tad_pmu_event_counter_read() 72 writeq_relaxed(0, tad_pmu->regions[i].base + in tad_pmu_event_counter_stop() 93 writeq_relaxed(0, tad_pmu->regions[i].base + in tad_pmu_event_counter_start() 259 struct tad_region *regions; in tad_pmu_probe() local 300 regions = devm_kcalloc(&pdev->dev, tad_cnt, in tad_pmu_probe() 301 sizeof(*regions), GFP_KERNEL); in tad_pmu_probe() 302 if (!regions) in tad_pmu_probe() 307 regions[i].base = devm_ioremap(&pdev->dev, in tad_pmu_probe() 310 if (!regions[i].base) { in tad_pmu_probe() [all …]
|
/linux-6.3-rc2/Documentation/admin-guide/mm/damon/ |
A D | lru_sort.rst | 33 memory regions that showing no access for a time that longer than a 179 Minimum number of monitoring regions. 190 Maximum number of monitoring regions. 225 Number of hot memory regions that tried to be LRU-sorted. 230 Total bytes of hot memory regions that tried to be LRU-sorted. 235 Number of hot memory regions that successfully be LRU-sorted. 240 Total bytes of hot memory regions that successfully be LRU-sorted. 250 Number of cold memory regions that tried to be LRU-sorted. 255 Total bytes of cold memory regions that tried to be LRU-sorted. 260 Number of cold memory regions that successfully be LRU-sorted. [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/nouveau/nvkm/subdev/acr/ |
A D | gp102.c | 203 desc->regions.no_regions = 2; in gp102_acr_load_setup() 204 desc->regions.region_props[0].start_addr = acr->wpr_start >> 8; in gp102_acr_load_setup() 205 desc->regions.region_props[0].end_addr = acr->wpr_end >> 8; in gp102_acr_load_setup() 206 desc->regions.region_props[0].region_id = 1; in gp102_acr_load_setup() 207 desc->regions.region_props[0].read_mask = 0xf; in gp102_acr_load_setup() 208 desc->regions.region_props[0].write_mask = 0xc; in gp102_acr_load_setup() 209 desc->regions.region_props[0].client_mask = 0x2; in gp102_acr_load_setup() 210 desc->regions.region_props[0].shadow_mem_start_addr = acr->shadow_start >> 8; in gp102_acr_load_setup()
|
/linux-6.3-rc2/Documentation/networking/devlink/ |
A D | devlink-region.rst | 7 ``devlink`` regions enable access to driver defined address regions using 10 Each device can create and register its own supported address regions. The 22 address regions that are otherwise inaccessible to the user. 54 # Show all of the exposed regions with region sizes: 81 As regions are likely very device or driver specific, no generic regions are 83 specific regions a driver supports.
|
/linux-6.3-rc2/drivers/virt/acrn/ |
A D | mm.c | 20 struct vm_memory_region_batch *regions; in modify_region() local 23 regions = kzalloc(sizeof(*regions), GFP_KERNEL); in modify_region() 24 if (!regions) in modify_region() 27 regions->vmid = vm->vmid; in modify_region() 28 regions->regions_num = 1; in modify_region() 29 regions->regions_gpa = virt_to_phys(region); in modify_region() 31 ret = hcall_set_memory_regions(virt_to_phys(regions)); in modify_region() 36 kfree(regions); in modify_region()
|
/linux-6.3-rc2/drivers/gpu/drm/i915/gem/selftests/ |
A D | i915_gem_dmabuf.c | 95 struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM_0]; in igt_dmabuf_import_same_driver_lmem() 217 struct intel_memory_region **regions, in igt_dmabuf_import_same_driver() argument 231 regions, num_regions); in igt_dmabuf_import_same_driver() 275 if (obj->mm.region != i915->mm.regions[INTEL_REGION_SMEM]) { in igt_dmabuf_import_same_driver() 323 struct intel_memory_region *smem = i915->mm.regions[INTEL_REGION_SMEM]; in igt_dmabuf_import_same_driver_smem() 331 struct intel_memory_region *regions[2]; in igt_dmabuf_import_same_driver_lmem_smem() local 333 if (!i915->mm.regions[INTEL_REGION_LMEM_0]) in igt_dmabuf_import_same_driver_lmem_smem() 336 regions[0] = i915->mm.regions[INTEL_REGION_LMEM_0]; in igt_dmabuf_import_same_driver_lmem_smem() 337 regions[1] = i915->mm.regions[INTEL_REGION_SMEM]; in igt_dmabuf_import_same_driver_lmem_smem() 338 return igt_dmabuf_import_same_driver(i915, regions, 2); in igt_dmabuf_import_same_driver_lmem_smem()
|
/linux-6.3-rc2/drivers/net/wireless/ath/ath10k/ |
A D | coredump.c | 1297 .regions = qca6174_hw10_mem_regions, 1306 .regions = qca6174_hw10_mem_regions, 1315 .regions = qca6174_hw10_mem_regions, 1324 .regions = qca6174_hw21_mem_regions, 1333 .regions = qca6174_hw30_mem_regions, 1342 .regions = qca6174_hw30_mem_regions, 1360 .regions = qca6174_hw30_mem_regions, 1369 .regions = qca988x_hw20_mem_regions, 1378 .regions = qca9984_hw10_mem_regions, 1387 .regions = qca9984_hw10_mem_regions, [all …]
|
/linux-6.3-rc2/Documentation/mm/damon/ |
A D | design.rst | 51 address regions is just wasteful. However, because DAMON can deal with some 52 level of noise using the adaptive regions adjustment mechanism, tracking every 58 distinct regions that cover every mapped area of the address space. The two 59 gaps between the three regions are the two biggest unmapped areas in the given 69 (small mmap()-ed regions and munmap()-ed regions) 98 ``update interval``, ``minimum number of regions``, and ``maximum number of 99 regions``. 140 to set the minimum and the maximum number of regions for the trade-off. 149 Even somehow the initial monitoring target regions are well constructed to 158 splits each region into two or three regions if the total number of regions [all …]
|