/hafnium-2.8-rc0/inc/hf/ |
A D | mm.h | 117 bool mm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end, 119 void *mm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end, 122 bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end, 124 bool mm_vm_identity_prepare(struct mm_ptable *t, paddr_t begin, paddr_t end, 126 void mm_vm_identity_commit(struct mm_ptable *t, paddr_t begin, paddr_t end, 128 bool mm_vm_unmap(struct mm_ptable *t, paddr_t begin, paddr_t end, 133 bool mm_vm_get_mode(struct mm_ptable *t, ipaddr_t begin, ipaddr_t end, 135 bool mm_get_mode(struct mm_ptable *t, vaddr_t begin, vaddr_t end, 142 paddr_t end, uint32_t mode, struct mpool *ppool); 143 bool mm_unmap(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end,
|
A D | vm.h | 272 bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end, 274 bool vm_identity_prepare(struct vm_locked vm_locked, paddr_t begin, paddr_t end, 276 void vm_identity_commit(struct vm_locked vm_locked, paddr_t begin, paddr_t end, 278 bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end, 286 bool vm_mem_get_mode(struct vm_locked vm_locked, ipaddr_t begin, ipaddr_t end,
|
A D | addr.h | 68 static inline size_t pa_difference(paddr_t start, paddr_t end) in pa_difference() argument 70 return pa_addr(end) - pa_addr(start); in pa_difference()
|
A D | boot_params.h | 24 paddr_t end; member
|
A D | fdt_handler.h | 30 bool fdt_find_initrd(const struct fdt *fdt, paddr_t *begin, paddr_t *end);
|
/hafnium-2.8-rc0/src/ |
A D | mm.c | 168 va_init(end)); in mm_invalidate_tlb() 171 ipa_init(end)); in mm_invalidate_tlb() 374 if (end > level_end) { in mm_map_level() 375 end = level_end; in mm_map_level() 379 while (begin < end) { in mm_map_level() 446 while (begin < end) { in mm_map_root() 480 end = ptable_end; in mm_ptable_identity_map() 769 end = level_end; in mm_ptable_get_attrs_level() 773 while (begin < end) { in mm_ptable_get_attrs_level() 820 end = mm_round_up_to_page(end); in mm_get_attrs() [all …]
|
A D | fdt_handler.c | 37 bool fdt_find_initrd(const struct fdt *fdt, paddr_t *begin, paddr_t *end) in fdt_find_initrd() argument 59 *end = pa_init(initrd_end); in fdt_find_initrd() 155 mem_ranges[mem_range_index].end = in fdt_find_memory_ranges() 222 paddr_t end = pa_add(begin, fdt_size(fdt)); in fdt_unmap() local 224 if (!mm_unmap(stage1_locked, begin, end, ppool)) { in fdt_unmap() 255 *size = pa_difference(mem_range.begin, mem_range.end); in fdt_get_memory_size()
|
A D | fdt_handler_test.cc | 110 EXPECT_THAT(pa_addr(params.mem_ranges[0].end), Eq(0x20000000)); in TEST() 112 EXPECT_THAT(pa_addr(params.mem_ranges[1].end), Eq(0x30010000)); in TEST() 114 EXPECT_THAT(pa_addr(params.mem_ranges[2].end), Eq(0x30030000)); in TEST()
|
A D | load.c | 71 paddr_t end, const struct manifest_vm *manifest_vm, in load_kernel() argument 85 if (pa_difference(begin, end) < size) { in load_kernel() 338 params->mem_ranges[i].end, in load_primary() 353 params->device_mem_ranges[i].end, in load_primary() 403 paddr_t end, size_t fdt_max_size, in load_secondary_fdt() argument 851 *found_end = mem_ranges[i].end; in carve_out_mem_range() 854 mem_ranges[i].end = *found_begin; in carve_out_mem_range() 886 .end = after[i].begin; in update_reserved_ranges() 889 if (pa_addr(after[i].end) < pa_addr(before[i].end)) { in update_reserved_ranges() 897 .begin = after[i].end; in update_reserved_ranges() [all …]
|
A D | vm.c | 282 if (!vm_identity_prepare(vm_locked, begin, end, mode, ppool)) { in vm_identity_map() 286 vm_identity_commit(vm_locked, begin, end, mode, ppool, ipa); in vm_identity_map() 305 return mm_identity_prepare(&vm_locked.vm->ptable, begin, end, in vm_identity_prepare() 308 return mm_vm_identity_prepare(&vm_locked.vm->ptable, begin, end, mode, in vm_identity_prepare() 321 mm_identity_commit(&vm_locked.vm->ptable, begin, end, mode, in vm_identity_commit() 333 mm_vm_identity_commit(&vm_locked.vm->ptable, begin, end, mode, in vm_identity_commit() 336 plat_iommu_identity_map(vm_locked, begin, end, mode); in vm_identity_commit() 345 bool vm_unmap(struct vm_locked vm_locked, paddr_t begin, paddr_t end, in vm_unmap() argument 350 return vm_identity_map(vm_locked, begin, end, mode, ppool, NULL); in vm_unmap() 433 va_from_pa(pa_from_ipa(end)), mode); in vm_mem_get_mode() [all …]
|
A D | fdt_patch.c | 44 static bool add_mem_reservation(void *fdt, paddr_t begin, paddr_t end) in add_mem_reservation() argument 46 size_t len = pa_difference(begin, end); in add_mem_reservation() 131 rsv &= add_mem_reservation(fdt, range.begin, range.end); in fdt_patch()
|
A D | init.c | 100 pa_addr(params.mem_ranges[i].end) - 1); in one_time_init()
|
A D | mpool_test.cc | 36 sort(allocs.begin(), allocs.end()); in check_allocs() 37 sort(chunks.begin(), chunks.end(), in check_allocs()
|
A D | ffa_memory.c | 468 ipaddr_t end = ipa_add(begin, size); in constituents_get_mode() local 473 !is_aligned(ipa_addr(end), PAGE_SIZE)) { in constituents_get_mode() 481 if (!vm_mem_get_mode(vm, begin, end, ¤t_mode)) { in constituents_get_mode() 786 static bool clear_memory(paddr_t begin, paddr_t end, struct mpool *ppool, in clear_memory() argument 797 void *ptr = mm_identity_map(stage1_locked, begin, end, in clear_memory() 801 size_t size = pa_difference(begin, end); in clear_memory() 809 mm_unmap(stage1_locked, begin, end, ppool); in clear_memory() 852 paddr_t end = pa_add(begin, size); in ffa_clear_memory_constituents() local 854 if (!clear_memory(begin, end, &local_page_pool, in ffa_clear_memory_constituents()
|
/hafnium-2.8-rc0/src/boot_flow/ |
A D | spmc.c | 40 paddr_t *end) in plat_boot_flow_get_initrd_range() argument 45 *end = pa_init(0); in plat_boot_flow_get_initrd_range()
|
A D | linux.c | 41 paddr_t *end) in plat_boot_flow_get_initrd_range() argument 43 return fdt_find_initrd(fdt, begin, end); in plat_boot_flow_get_initrd_range()
|
/hafnium-2.8-rc0/src/iommu/ |
A D | absent.c | 30 paddr_t end, uint32_t mode) in plat_iommu_identity_map() argument 34 (void)end; in plat_iommu_identity_map()
|
/hafnium-2.8-rc0/test/hftest/ |
A D | mm.c | 75 paddr_t end = pa_add(start, size); in hftest_mm_identity_map() local 77 if (mm_identity_map(stage1_locked, start, end, mode, &ppool) != base) { in hftest_mm_identity_map()
|
/hafnium-2.8-rc0/src/arch/aarch64/ |
A D | mm.c | 301 uintvaddr_t end = va_addr(va_end); in arch_mm_invalidate_stage1_range() local 312 if ((end - begin) > (MAX_TLBI_OPS * PAGE_SIZE)) { in arch_mm_invalidate_stage1_range() 320 end >>= 12; in arch_mm_invalidate_stage1_range() 322 for (it = begin; it < end; in arch_mm_invalidate_stage1_range() 356 uintpaddr_t end = ipa_addr(va_end); in arch_mm_invalidate_stage2_range() local 381 if ((end - begin) > (MAX_TLBI_OPS * PAGE_SIZE)) { in arch_mm_invalidate_stage2_range() 389 end >>= 12; in arch_mm_invalidate_stage2_range() 396 for (it = begin; it < end; in arch_mm_invalidate_stage2_range() 439 uintptr_t end = (uintptr_t)base + size; in arch_mm_flush_dcache() local 441 while (line_begin < end) { in arch_mm_flush_dcache()
|
/hafnium-2.8-rc0/src/arch/aarch64/hftest/ |
A D | el0_entry.S | 9 .macro ffa_mem_perm_set start:req end:req perm:req 13 adrp x30, \end 14 add x30, x30, :lo12:\end
|
/hafnium-2.8-rc0/inc/hf/plat/ |
A D | boot_flow.h | 21 paddr_t *end);
|
A D | iommu.h | 47 paddr_t end, uint32_t mode);
|
/hafnium-2.8-rc0/build/image/ |
A D | image.ld | 48 * Collect together read-only data including relocations at the end 101 * placed at the end of the image and will not be mapped automatically 122 * Collect together the read-write data including .bss at the end which
|
/hafnium-2.8-rc0/build/docker/ |
A D | Dockerfile | 16 # Install dependencies. Clear APT cache at the end to save space.
|
/hafnium-2.8-rc0/build/toolchain/ |
A D | host.gni | 90 …a_ldflags} {{ldflags}} -o $outfile -Wl,--start-group @$rspfile {{solibs}} -Wl,--end-group {{libs}}"
|