Home
last modified time | relevance | path

Searched refs:offset (Results 1 – 25 of 34) sorted by relevance

12

/hyp/vm/vgic/src/
A Dvdevice.c169 assert(offset >= base_offset); in vgic_read_irqbits()
358 offset); in gicd_vdevice_read()
373 offset); in gicd_vdevice_read()
378 offset); in gicd_vdevice_read()
427 offset, val); in gicd_vdevice_write()
620 (offset <= OFS_GICD_ICFGR(63U))) { in gicd_vdevice_write()
710 if ((offset & (size - 1U)) != 0UL) { in gicd_access_allowed()
879 offset - offsetof(gicr_t, sgi)); in gicr_vdevice_read()
1087 (offset <= in gicr_vdevice_write()
1127 if ((offset & (size - 1U)) != 0UL) { in gicr_access_allowed()
[all …]
/hyp/vm/vrtc_pl031/src/
A Dvrtc_pl031.c66 if (offset == offsetof(vrtc_pl031_t, RTCDR)) { in vrtc_pl031_reg_read()
71 } else if (offset == offsetof(vrtc_pl031_t, RTCLR)) { in vrtc_pl031_reg_read()
73 } else if (offset == offsetof(vrtc_pl031_t, RTCCR)) { in vrtc_pl031_reg_read()
77 (offset <= offsetof(vrtc_pl031_t, RTCPeriphID3))) { in vrtc_pl031_reg_read()
79 uint8_t id = (uint8_t)((offset - in vrtc_pl031_reg_read()
85 (offset <= offsetof(vrtc_pl031_t, RTCPCellID3))) { in vrtc_pl031_reg_read()
87 uint8_t id = (uint8_t)((offset - in vrtc_pl031_reg_read()
100 if (offset == offsetof(vrtc_pl031_t, RTCLR)) { in vrtc_pl031_reg_write()
139 size_t offset = (size_t)(ipa - vrtc->ipa); in vrtc_pl031_handle_vdevice_access_fixed_addr() local
142 vrtc_pl031_reg_write(vrtc, offset, value); in vrtc_pl031_handle_vdevice_access_fixed_addr()
[all …]
/hyp/vm/vetm/src/
A Dvetm.c46 vetm_access_allowed(size_t size, size_t offset) in vetm_access_allowed() argument
51 if ((offset & (size - 1U)) != 0UL) { in vetm_access_allowed()
54 ret = (offset <= (ETM_SIZE_PERCPU - size)); in vetm_access_allowed()
56 ret = (offset <= (ETM_SIZE_PERCPU - size)); in vetm_access_allowed()
93 if (offset == offsetof(etm_t, trcprgctlr)) { in vetm_vdevice_write()
95 } else if (offset == offsetof(etm_t, trcvictlr)) { in vetm_vdevice_write()
113 etm_get_reg(pcpu, offset, val, access_size); in vetm_vdevice_read()
139 size_t offset = in vetm_handle_vdevice_access_fixed_addr() local
143 vetm_access_allowed(access_size, offset)) { in vetm_handle_vdevice_access_fixed_addr()
145 ret = vetm_vdevice_write(vcpu, pcpu, offset, in vetm_handle_vdevice_access_fixed_addr()
[all …]
/hyp/core/util/aarch64/src/
A Dmemset.S85 .equ LOCAL(offset), LOCAL(offset) + 16
117 .equ LOCAL(offset), LOCAL(offset) + 16
197 prfm pstl1keep, [x0, LOCAL(offset)]
198 .equ LOCAL(offset), LOCAL(offset) + (1 << CPU_L1D_LINE_BITS)
208 prfm pstl1keep, [x0, LOCAL(offset)]
209 .equ LOCAL(offset), LOCAL(offset) + (1 << CPU_L1D_LINE_BITS)
212 .equ LOCAL(offset), 0
214 stp x1, x1, [x0, LOCAL(offset)]
215 .equ LOCAL(offset), LOCAL(offset) + 0x10
228 .equ LOCAL(offset), 0x10 - LOCAL(chunk)
[all …]
A Dmemcpy.S84 .equ LOCAL(offset), 1 << CPU_L1D_LINE_BITS
86 prfm pldl1strm, [x1, LOCAL(offset)]
87 prfm pstl1keep, [x0, LOCAL(offset)]
88 .equ LOCAL(offset), LOCAL(offset) + (1 << CPU_L1D_LINE_BITS)
97 prfm pldl1strm, [x1, LOCAL(offset)]
98 .equ LOCAL(offset), LOCAL(offset) + (1 << CPU_L1D_LINE_BITS)
129 .equ LOCAL(offset), 0
131 prfm pstl1keep, [x0, LOCAL(offset) + 0x10]
133 .equ LOCAL(offset), LOCAL(offset) + (1 << CPU_L1D_LINE_BITS)
161 .equ LOCAL(offset), 0
[all …]
/hyp/interfaces/memextent/include/
A Dmemextent.h25 memextent_configure_derive(memextent_t *me, memextent_t *parent, size_t offset,
39 memextent_donate_child(memextent_t *me, size_t offset, size_t size,
47 memextent_donate_sibling(memextent_t *from, memextent_t *to, size_t offset,
71 size_t offset, size_t size,
92 vmaddr_t vm_base, size_t offset, size_t size);
105 memextent_zero_range(memextent_t *me, size_t offset, size_t size);
109 memextent_cache_clean_range(memextent_t *me, size_t offset, size_t size);
113 memextent_cache_flush_range(memextent_t *me, size_t offset, size_t size);
132 vmaddr_t vm_base, size_t offset, size_t size,
152 memextent_derive(memextent_t *parent, paddr_t offset, size_t size,
/hyp/mem/memextent/src/
A Dmemextent.c169 ((parent->phys_base + offset) >= in memextent_configure_derive()
171 ((offset + size) > parent->size)) { in memextent_configure_derive()
207 paddr_t phys_base = parent->phys_base + offset; in memextent_configure_derive()
312 paddr_t phys = me->phys_base + offset; in memextent_donate_child()
348 paddr_t phys = from->phys_base + offset; in memextent_donate_sibling()
437 if ((offset + (size - 1U)) >= extent->size) { in memextent_map_partial()
452 offset, size, in memextent_map_partial()
500 if ((offset + (size - 1U)) >= extent->size) { in memextent_unmap_partial()
572 paddr_t phys = extent->phys_base + offset; in memextent_clean_range()
603 return memextent_clean_range(me, offset, size, in memextent_cache_clean_range()
[all …]
A Dhypercalls.c23 memextent_modify_flags_t flags, size_t offset, in hypercall_memextent_modify() argument
49 err = memextent_zero_range(memextent, offset, size); in hypercall_memextent_modify()
52 err = memextent_cache_clean_range(memextent, offset, size); in hypercall_memextent_modify()
55 err = memextent_cache_flush_range(memextent, offset, size); in hypercall_memextent_modify()
114 size_t offset, size_t size, in hypercall_memextent_configure_derive() argument
148 err = memextent_configure_derive(target_me, parent, offset, in hypercall_memextent_configure_derive()
166 size_t offset, size_t size, bool reverse) in hypercall_memextent_donate_child() argument
210 err = memextent_donate_child(child.r, offset, size, reverse); in hypercall_memextent_donate_child()
240 err = memextent_donate_sibling(m1.r, m2.r, offset, size); in hypercall_memextent_donate_sibling()
251 cap_id_t to, size_t offset, size_t size) in hypercall_memextent_donate() argument
[all …]
A Dmemextent_basic.c76 assert(!util_add_overflows(map->vbase, offset)); in memextent_do_map()
84 s, map->vbase + offset, size, me->phys_base + offset, in memextent_do_map()
325 size_t offset, size_t size) in memextent_do_unmap() argument
336 me->phys_base + offset); in memextent_do_unmap()
356 size_t offset = base - args->me->phys_base; in memextent_map_range() local
383 size_t offset = base - args->me->phys_base; in memextent_unmap_range() local
720 size_t offset = 0U; in memextent_restore_parent_mappings() local
721 while (offset < me->size) { in memextent_restore_parent_mappings()
722 paddr_t phys = me->phys_base + offset; in memextent_restore_parent_mappings()
723 size_t size = me->size - offset; in memextent_restore_parent_mappings()
[all …]
A Dmemextent_tests.c255 size_t offset = 0; in tests_memextent_test1() local
260 me_ret = memextent_derive(me2, offset, size3, memtype, access, in tests_memextent_test1()
304 me_ret = memextent_derive(me_d, offset, size3, memtype, access, in tests_memextent_test1()
313 me_ret = memextent_derive(me_d2, offset, size3, memtype, access, in tests_memextent_test1()
471 size_t offset = 4096; in tests_memextent_test2() local
473 paddr_t vm_base2 = vm_base + offset; in tests_memextent_test2()
477 me_ret = memextent_derive(me, offset, size3, memtype, access, in tests_memextent_test2()
/hyp/core/base/templates/
A Dhypcontainers.h.tmpl12 #for $name, $type, $offset in $d.layout
16 _Static_assert(offsetof(${outer_type}, ${name}) == $offset,
17 "Generated offset for ${name} in ${outer_type} is incorrect");
18 return ((${outer_type} *)((uintptr_t)(ptr) - $offset));
A Dhypconstants.h.tmpl12 #set offset = base + ofs
14 \#define ${prefix}_${upper_name} $offset
16 \#define ${prefix}_${upper_name}(n) ($offset + ($type.base_type.size * (n)))
22 $print_offsets($p, $offset, $d)
/hyp/vm/rootvm_package/src/
A Dpackage.c94 size_t offset = phys_base - PLATFORM_ROOTVM_LMA_BASE; in rootvm_package_load_elf() local
159 offset += size; in rootvm_package_load_elf()
278 size_t offset = 0U; in rootvm_package_handle_rootvm_init() local
292 pkg_hdr->list[i].offset); in rootvm_package_handle_rootvm_init()
294 if (pkg_hdr->list[i].offset > map_size) { in rootvm_package_handle_rootvm_init()
299 map_size - pkg_hdr->list[i].offset; in rootvm_package_handle_rootvm_init()
306 runtime_ipa = ipa + offset; in rootvm_package_handle_rootvm_init()
313 app_ipa = ipa + offset; in rootvm_package_handle_rootvm_init()
325 offset = load_next - PLATFORM_ROOTVM_LMA_BASE; in rootvm_package_handle_rootvm_init()
342 vmaddr_t env_data_ipa = ipa + offset; in rootvm_package_handle_rootvm_init()
[all …]
/hyp/platform/etm/include/
A Detm.h6 etm_set_reg(cpu_index_t cpu, size_t offset, register_t val, size_t access_size);
9 etm_get_reg(cpu_index_t cpu, size_t offset, register_t *val,
/hyp/vm/virtio_mmio/src/
A Dvdevice.c26 virtio_mmio_access_allowed(size_t size, size_t offset) in virtio_mmio_access_allowed() argument
31 if ((offset & (size - 1U)) != 0UL) { in virtio_mmio_access_allowed()
38 ret = ((offset >= (size_t)OFS_VIRTIO_MMIO_REGS_DEVICE_CONFIG) && in virtio_mmio_access_allowed()
39 (offset <= in virtio_mmio_access_allowed()
247 virtio_mmio_vdevice_write(virtio_mmio_t *virtio_mmio, size_t offset, in virtio_mmio_vdevice_write() argument
252 switch (offset) { in virtio_mmio_vdevice_write()
329 (const virtio_mmio_t *)virtio_mmio, offset, val, in virtio_mmio_vdevice_write()
338 virtio_mmio_handle_vdevice_access(vdevice_t *vdevice, size_t offset, in virtio_mmio_handle_vdevice_access() argument
359 if (!virtio_mmio_access_allowed(access_size, offset)) { in virtio_mmio_handle_vdevice_access()
364 ret = virtio_mmio_vdevice_write(virtio_mmio, offset, (uint32_t)*value, in virtio_mmio_handle_vdevice_access()
/hyp/mem/memextent_sparse/src/
A Dmemextent_sparse.c380 size_t offset = 0U; in apply_mappings() local
381 while (offset < size) { in apply_mappings()
421 *fail_offset = offset; in apply_mappings()
454 offset += curr_size; in apply_mappings()
502 size_t offset = 0U; in do_mapping_transfer() local
503 while (offset < size) { in do_mapping_transfer()
593 offset += curr_size; in do_mapping_transfer()
718 assert(offset != NULL); in lookup_phys_range()
804 size_t offset = 0U; in memextent_activate_derive_sparse() local
1127 size_t offset = 0U; in memextent_unmap_all_sparse() local
[all …]
A Dmemextent_tests.c62 create_memextent(memextent_t *parent, size_t offset, size_t size, bool sparse) in create_memextent() argument
83 if (memextent_configure_derive(ret.r, parent, offset, size, in create_memextent()
88 if (memextent_configure(ret.r, offset, size, attrs) != OK) { in create_memextent()
133 map_memextent(memextent_t *me, addrspace_t *as, vmaddr_t vbase, size_t offset, in map_memextent() argument
142 return memextent_map_partial(me, as, vbase, offset, size, map_attrs); in map_memextent()
/hyp/mem/useraccess/aarch64/src/
A Duseraccess.c196 size_t offset = 0U; in useraccess_copy_from_to_guest_ipa() local
204 while (offset < size) { in useraccess_copy_from_to_guest_ipa()
217 &addrspace->vm_pgtable, ipa + offset, &mapped_base, in useraccess_copy_from_to_guest_ipa()
234 size_t mapping_offset = (ipa + offset) & (mapped_size - 1U); in useraccess_copy_from_to_guest_ipa()
241 uint8_t *hyp_va = (uint8_t *)hvaddr + offset; in useraccess_copy_from_to_guest_ipa()
242 size_t hyp_size = size - offset; in useraccess_copy_from_to_guest_ipa()
270 offset += copied_size; in useraccess_copy_from_to_guest_ipa()
274 return (size_result_t){ .e = ret, .r = offset }; in useraccess_copy_from_to_guest_ipa()
/hyp/interfaces/util/include/
A Dutil.h73 #define util_offset_in_range(offset, type, member) \ argument
74 (((offset) >= offsetof(type, member)) && \
75 ((offset) < \
/hyp/platform/etm/src/
A Detm.c203 etm_set_reg(cpu_index_t cpu, size_t offset, register_t val, size_t access_size) in etm_set_reg() argument
209 assert(offset < (sizeof(*mapped_etms[cpu]) - access_size)); in etm_set_reg()
213 _Atomic uint32_t *reg = (_Atomic uint32_t *)(base + offset); in etm_set_reg()
216 _Atomic uint64_t *reg = (_Atomic uint64_t *)(base + offset); in etm_set_reg()
224 etm_get_reg(cpu_index_t cpu, size_t offset, register_t *val, size_t access_size) in etm_get_reg() argument
236 _Atomic uint32_t *reg = (_Atomic uint32_t *)(base + offset); in etm_get_reg()
240 _Atomic uint64_t *reg = (_Atomic uint64_t *)(base + offset); in etm_get_reg()
/hyp/mem/hyp_aspace/armv8/src/
A Dhyp_aspace.c353 for (size_t offset = 0U; offset < size; in hyp_aspace_allocate() local
354 offset += HYP_ASPACE_ALLOCATE_SIZE) { in hyp_aspace_allocate()
355 ret.e = pgtable_hyp_preallocate(hyp_partition, virt + offset, in hyp_aspace_allocate()
358 virt_range_t vr = { .base = (virt + offset), in hyp_aspace_allocate()
600 size_t offset = 0U; in hyp_aspace_is_mapped() local
601 while (offset < size) { in hyp_aspace_is_mapped()
602 uintptr_t curr = virt + offset; in hyp_aspace_is_mapped()
639 if (util_add_overflows(offset, mapped_size)) { in hyp_aspace_is_mapped()
642 offset += mapped_size; in hyp_aspace_is_mapped()
/hyp/mem/addrspace/src/
A Dhypercalls.c121 addrspace_map_flags_t map_flags, size_t offset, in hypercall_addrspace_map() argument
152 ret = memextent_map_partial(memextent, addrspace, vbase, offset, in hypercall_addrspace_map()
173 size_t offset, size_t size) in hypercall_addrspace_unmap() argument
203 offset, size); in hypercall_addrspace_unmap()
225 size_t offset, size_t size) in hypercall_addrspace_update_access() argument
256 vbase, offset, size, in hypercall_addrspace_update_access()
365 ret.offset = phys_start - memextent->phys_base; in hypercall_addrspace_lookup()
/hyp/vm/arm_pv_time/src/
A Darm_pv_time.c70 size_t offset = offsetof(addrspace_info_area_layout_t, in smccc_pv_time_st() local
72 assert((offset + sizeof(pv_time_data_t)) <= in smccc_pv_time_st()
74 ret = current->addrspace->info_area.ipa + offset; in smccc_pv_time_st()
/hyp/misc/gpt/src/
A Dgpt.c428 size_t offset = (size_t)i << entry_shifts; in try_clean() local
430 offset)) { in try_clean()
868 size_t offset = 0U; in gpt_do_write() local
869 while ((ret.e == OK) && (offset < size)) { in gpt_do_write()
871 base + offset, size - offset, x, y, match); in gpt_do_write()
873 offset += ret.r; in gpt_do_write()
881 ret.r = offset; in gpt_do_write()
957 size_t offset = 0U; in gpt_read() local
958 while ((ret.e == OK) && (offset < size)) { in gpt_read()
960 size - offset, op, data); in gpt_read()
[all …]
/hyp/misc/gpt/tests/
A Dhost_tests.c154 size_t offset) in trigger_gpt_value_add_offset_event() argument
158 gpt_tests_add_offset(type, value, offset); in trigger_gpt_value_add_offset_event()

Completed in 70 milliseconds

12