| /kernel/lib/libc/ |
| A D | strtol.c | 44 strtol(const char *nptr, char **endptr, int base) in strtol() argument 68 if ((base == 0 || base == 16) && in strtol() 72 base = 16; in strtol() 74 if (base == 0) in strtol() 75 base = c == '0' ? 8 : 10; in strtol() 95 cutlim = cutoff % base; in strtol() 96 cutoff /= base; in strtol() 99 cutlim -= base; in strtol() 111 if (c >= base) in strtol() 122 acc *= base; in strtol() [all …]
|
| A D | strtoll.c | 45 strtoll(const char *nptr, char **endptr, int base) in strtoll() argument 69 if ((base == 0 || base == 16) && in strtoll() 73 base = 16; in strtoll() 75 if (base == 0) in strtoll() 76 base = c == '0' ? 8 : 10; in strtoll() 97 cutlim = cutoff % base; in strtoll() 98 cutoff /= base; in strtoll() 101 cutlim -= base; in strtoll() 113 if (c >= base) in strtoll() 124 acc *= base; in strtoll() [all …]
|
| A D | atoi.c | 105 unsigned long strtoul(const char *nptr, char **endptr, int base) in strtoul() argument 110 if (base < 0 || base == 1 || base > 36) { in strtoul() 126 if ((base == 0 || base == 16) && nptr[0] == '0' && nptr[1] == 'x') { in strtoul() 127 base = 16; in strtoul() 129 } else if (base == 0 && nptr[0] == '0') { in strtoul() 130 base = 8; in strtoul() 132 } else if (base == 0) { in strtoul() 133 base = 10; in strtoul() 149 if (v < 0 || v >= base) { in strtoul() 156 new_ret = ret * base; in strtoul() [all …]
|
| /kernel/platform/pc/ |
| A D | memory.cpp | 32 uint64_t base; member 47 uint64_t base; member 55 reserved_mmio_entries[reserved_mmio_count].base = base; in mark_mmio_region_to_reserve() 74 uint64_t base; member 122 uint64_t base = ROUNDUP(range->base, PAGE_SIZE); in mem_arena_init() local 145 arena.base = base; in mem_arena_init() 212 void* base; member 308 if (a1->base < a2->base) in addr_range_cmp() 310 else if (a1->base == a2->base) in addr_range_cmp() 402 entry->base = range.base; in pc_mem_init() [all …]
|
| A D | smbios.cpp | 37 *base = p; in FindEntryPoint() 41 *base = p; in FindEntryPoint() 51 *base = p; in FindEntryPoint() 56 *base = p; in FindEntryPoint() 67 paddr_t base = ep->struct_table_phys; in MapStructs2_1() local 68 paddr_t end = base + ep->struct_table_length; in MapStructs2_1() 69 const size_t subpage_offset = base & (PAGE_SIZE - 1); in MapStructs2_1() 70 base -= subpage_offset; in MapStructs2_1() 71 size_t len = ROUNDUP(end - base, PAGE_SIZE); in MapStructs2_1() 75 zx_status_t status = VmObjectPhysical::Create(base, len, &vmo); in MapStructs2_1() [all …]
|
| /kernel/vm/ |
| A D | pmm_arena.cpp | 39 auto status = boot_reserve_range_search(base(), size(), page_array_size, &range); in Init() 45 DEBUG_ASSERT(range.pa >= base() && range.len <= page_array_size); in Init() 57 size_t array_start_index = (PAGE_ALIGN(range.pa) - info_.base) / PAGE_SIZE; in Init() 71 p.paddr_priv = base() + i * PAGE_SIZE; in Init() 90 size_t index = (pa - base()) / PAGE_SIZE; in FindSpecific() 102 paddr_t rounded_base = ROUNDUP(base(), 1UL << alignment_log2); in FindFreeContiguous() 103 if (rounded_base < base() || rounded_base > base() + size() - 1) { in FindFreeContiguous() 107 paddr_t aligned_offset = (rounded_base - base()) / PAGE_SIZE; in FindFreeContiguous() 110 LTRACEF("arena base %#" PRIxPTR " size %zu\n", base(), size()); in FindFreeContiguous() 179 base() + i * PAGE_SIZE); in Dump() [all …]
|
| A D | vm_address_region.cpp | 650 } else if (base >= itr->base() + itr->size()) { in UpperBoundInternalLocked() 672 aspace_->vdso_code_mapping_->base() >= base && in UnmapInternalLocked() 673 aspace_->vdso_code_mapping_->base() - base < size) { in UnmapInternalLocked() 687 itr->base() < base || itr_end > end_addr)) { in UnmapInternalLocked() 701 const vaddr_t unmap_base = fbl::max(curr->base(), base); in UnmapInternalLocked() 798 if (!begin.IsValid() || begin->base() + begin->size() <= base) { in Protect() 832 const vaddr_t protect_base = fbl::max(itr->base(), base); in Protect() 1028 if (sub_overflow(after_iter->base(), size, &base) || in CompactRandomizedRegionAllocatorLocked() 1029 sub_overflow(base, PAGE_SIZE * gap_pages, &base)) { in CompactRandomizedRegionAllocatorLocked() 1040 if (add_overflow(before_iter->base(), before_iter->size(), &base) || in CompactRandomizedRegionAllocatorLocked() [all …]
|
| A D | vm_mapping.cpp | 80 if (!IS_PAGE_ALIGNED(base)) { in Protect() 140 if (base_ == base && size_ == size) { in ProtectLocked() 148 if (base_ == base) { in ProtectLocked() 168 if (base_ + size_ == base + size) { in ProtectLocked() 222 if (!IS_PAGE_ALIGNED(base)) { in Unmap() 243 if (base == base_ && size == size_) { in Unmap() 247 return UnmapLocked(base, size); in Unmap() 254 DEBUG_ASSERT(base >= base_ && base - base_ < size_); in UnmapLocked() 272 if (base_ == base || base + size == base_ + size_) { in UnmapLocked() 296 const vaddr_t new_base = base + size; in UnmapLocked() [all …]
|
| A D | pmm_arena.h | 30 paddr_t base() const { return info_.base; } in base() function 50 return (page->paddr() >= base() && page->paddr() < (base() + size())); in page_belongs_to_arena() 54 return (address >= info_.base && address <= info_.base + info_.size - 1); in address_in_arena()
|
| A D | kstack.cpp | 72 unsafe ? "unsafe" : "safe", kstack_vmar->base()); in allocate_vmar() 88 unsafe ? "unsafe" : "safe", kstack_mapping->base()); in allocate_vmar() 106 DEBUG_ASSERT(stack->base == 0); in vm_allocate_kstack() 122 stack->base = mapping->base(); in vm_allocate_kstack() 123 stack->top = mapping->base() + DEFAULT_STACK_SIZE; in vm_allocate_kstack() 135 stack->unsafe_base = mapping->base(); in vm_allocate_kstack() 145 stack->base = 0; in vm_free_kstack()
|
| A D | vm.cpp | 74 return vm_mapping->Protect(vm_mapping->base(), vm_mapping->size(), arch_mmu_flags); in ProtectRegion() 126 vaddr_t base; in vm_init() member 132 .base = (vaddr_t)__code_start, in vm_init() 138 .base = (vaddr_t)__rodata_start, in vm_init() 144 .base = (vaddr_t)__data_start, in vm_init() 150 .base = (vaddr_t)__bss_start, in vm_init() 158 ASSERT(IS_PAGE_ALIGNED(region->base)); in vm_init() 161 region->base, region->base + region->size, region->arch_mmu_flags, region->name); in vm_init() 163 zx_status_t status = aspace->ReserveSpace(region->name, region->size, region->base); in vm_init() 165 status = ProtectRegion(aspace, region->base, region->arch_mmu_flags); in vm_init()
|
| A D | vm_object_physical.cpp | 25 VmObjectPhysical::VmObjectPhysical(paddr_t base, uint64_t size) in VmObjectPhysical() argument 26 : size_(size), base_(base) { in VmObjectPhysical() 37 zx_status_t VmObjectPhysical::Create(paddr_t base, uint64_t size, fbl::RefPtr<VmObject>* obj) { in Create() argument 38 if (!IS_PAGE_ALIGNED(base) || !IS_PAGE_ALIGNED(size) || size == 0) { in Create() 44 if (add_overflow(base, size - 1, &safe_base)) { in Create() 49 auto vmo = fbl::AdoptRef<VmObject>(new (&ac) VmObjectPhysical(base, size)); in Create()
|
| A D | vm_aspace.cpp | 82 return (vaddr >= aspace.base() && vaddr <= aspace.base() + aspace.size() - 1); in is_inside() 87 if (!is_inside(aspace, r.base())) { in is_inside() 96 if (r.base() + r.size() - 1 < r.base()) { in is_inside() 101 if (r.base() + r.size() - 1 > aspace.base() + aspace.size() - 1) { in is_inside() 115 size_t offset = vaddr - aspace.base(); in trim_to_aspace() 139 DEBUG_ASSERT(base + size - 1 >= base); in VmAspace() 173 vaddr_t base; in Create() local 177 base = USER_ASPACE_BASE; in Create() 181 base = KERNEL_ASPACE_BASE; in Create() 185 base = 0; in Create() [all …]
|
| /kernel/object/include/object/ |
| A D | resource.h | 22 zx_status_t validate_ranged_resource(zx_handle_t handle, uint32_t kind, uint64_t base, size_t len); 26 static inline zx_status_t validate_resource_ioport(zx_handle_t handle, uint64_t base, size_t len) { in validate_resource_ioport() argument 27 return validate_ranged_resource(handle, ZX_RSRC_KIND_IOPORT, base, len); in validate_resource_ioport() 32 static inline zx_status_t validate_resource_mmio(zx_handle_t handle, uint64_t base, size_t len) { in validate_resource_mmio() argument 33 return validate_ranged_resource(handle, ZX_RSRC_KIND_MMIO, base, len); in validate_resource_mmio()
|
| /kernel/arch/arm64/include/arch/ |
| A D | defines.h | 34 #define BM(base, count, val) (((val) & ((1UL << (count)) - 1)) << (base)) argument 36 #define BM(base, count, val) (((val) & ((0x1 << (count)) - 1)) << (base)) argument
|
| /kernel/vm/include/vm/ |
| A D | vm_address_region.h | 105 vaddr_t base() const { return base_; } in base() function 123 vaddr_t GetKey() const { return base(); } in GetKey() 156 bool is_in_range(vaddr_t base, size_t size) const { in is_in_range() argument 157 const size_t offset = base - base_; in is_in_range() 229 virtual zx_status_t Unmap(vaddr_t base, size_t size); 234 zx_status_t UnmapAllowPartial(vaddr_t base, size_t size); 305 ChildList::iterator UpperBoundInternalLocked(vaddr_t base); 319 bool IsRangeAvailableLocked(vaddr_t base, size_t size); 379 zx_status_t Unmap(vaddr_t base, size_t size) override { in Unmap() argument 440 zx_status_t Unmap(vaddr_t base, size_t size); [all …]
|
| /kernel/dev/iommu/intel/ |
| A D | device_context.cpp | 88 uint64_t base = 1ull << 20; in InitCommon() local 89 uint64_t size = aspace_size() - base; in InitCommon() 241 paddr_t base = region->base; in SecondLevelMapDiscontiguous() local 245 size_t allocated = base - region->base; in SecondLevelMapDiscontiguous() 268 base += chunk_size; in SecondLevelMapDiscontiguous() 275 *virt_paddr = region->base; in SecondLevelMapDiscontiguous() 332 *virt_paddr = region->base; in SecondLevelMapContiguous() 345 DEBUG_ASSERT(IS_PAGE_ALIGNED(base)); in SecondLevelMapIdentity() 366 status = second_level_pt_.MapPagesContiguous(base, base, map_len, flags, &mapped); in SecondLevelMapIdentity() 399 if (region->base < virt_paddr || region->base + region->size > virt_paddr + size) { in SecondLevelUnmap() [all …]
|
| /kernel/lib/libc/include/ |
| A D | stdlib.h | 26 long strtol(const char *nptr, char **endptr, int base); 27 long long strtoll(const char *nptr, char **endptr, int base); 40 void *bsearch(const void *key, const void *base, size_t num_elems, size_t size, 42 unsigned long int strtoul(const char *nptr, char **endptr, int base);
|
| /kernel/object/ |
| A D | resource_dispatcher.cpp | 40 uint64_t base, in Create() argument 70 if (base || size) { in Create() 75 status = rallocs[kind].GetRegion({.base = base, .size = size}, region_uptr); in Create() 95 if (Intersects(base, size, rsrc.get_base(), rsrc.get_size())) { in Create() 97 base, size, rsrc.get_base(), rsrc.get_size()); in Create() 128 LTRACEF("%s [%u, %#lx, %zu] resource created.\n", kLogTag, kind, base, size); in Create() 133 uint64_t base, in ResourceDispatcher() argument 139 : kind_(kind), base_(base), size_(size), flags_(flags), in ResourceDispatcher() 181 uint64_t base, in InitializeAllocator() argument 211 status = rallocs[kind].AddRegion({.base = base, .size = size}); in InitializeAllocator() [all …]
|
| A D | resource.cpp | 52 uintptr_t base, in validate_ranged_resource() argument 79 …LTRACEF("req [base %#lx size %#lx] and resource [base %#lx size %#lx]\n", base, size, rbase, rsize… in validate_ranged_resource() 85 if (!GetIntersect(base, size, rbase, rsize, &ibase, &isize) || in validate_ranged_resource() 87 ibase != base) { in validate_ranged_resource()
|
| /kernel/dev/pcie/ |
| A D | pcie_bridge.cpp | 131 uint32_t base, limit; in ParseBusWindowsLocked() local 134 base = cfg_->Read(PciConfig::kIoBase); in ParseBusWindowsLocked() 137 supports_32bit_pio_ = ((base & 0xF) == 0x1) && ((base & 0xF) == (limit& 0xF)); in ParseBusWindowsLocked() 138 io_base_ = (base & ~0xF) << 8; in ParseBusWindowsLocked() 152 base = cfg_->Read(PciConfig::kPrefetchableMemoryBase); in ParseBusWindowsLocked() 155 bool supports_64bit_pf_mem = ((base & 0xF) == 0x1) && ((base & 0xF) == (limit& 0xF)); in ParseBusWindowsLocked() 156 pf_mem_base_ = (base & ~0xF) << 16; in ParseBusWindowsLocked() 222 ret = upstream->pio_regions().GetRegion({ .base = io_base_, .size = size }, pio_window_); in AllocateBridgeWindowsLocked() 235 ret = upstream->mmio_lo_regions().GetRegion({ .base = mem_base_, .size = size }, in AllocateBridgeWindowsLocked() 252 ret = upstream->pf_mmio_regions().GetRegion({ .base = pf_mem_base_, .size = size }, in AllocateBridgeWindowsLocked() [all …]
|
| /kernel/arch/x86/include/arch/x86/ |
| A D | descriptor.h | 62 void set_global_desc_64(seg_sel_t sel, uint64_t base, uint32_t limit, 72 static inline void gdt_load(uintptr_t base) { in gdt_load() argument 82 struct gdtr gdtr = { .limit = 0xffff, .address = base }; in gdt_load()
|
| /kernel/arch/x86/ |
| A D | descriptor.cpp | 80 void set_global_desc_64(seg_sel_t sel, uint64_t base, uint32_t limit, in set_global_desc_64() argument 111 entry.base_15_0 = base & 0x0000ffff; in set_global_desc_64() 112 entry.base_23_16 = (base & 0x00ff0000) >> 16; in set_global_desc_64() 113 entry.base_31_24 = (base & 0xff000000) >> 24; in set_global_desc_64() 114 entry.base_63_32 = (uint32_t)(base >> 32); in set_global_desc_64() 167 memcpy((void*)mapping->base(), _temp_gdt, gdt_real_size); in gdt_setup() 168 gdt = mapping->base(); in gdt_setup()
|
| /kernel/lib/vdso/ |
| A D | rodso.cpp | 78 vmar->vmar()->base() + vmar_offset, len, status); in MapSegment() 80 DEBUG_ASSERT(mapping->base() == vmar->vmar()->base() + vmar_offset); in MapSegment() 83 mapping->base(), mapping->base() + len); in MapSegment()
|
| /kernel/lib/unittest/include/lib/unittest/ |
| A D | user_memory.h | 30 void* out() { return reinterpret_cast<void*>(mapping_->base()); } in out() 31 const void* in() { return reinterpret_cast<void*>(mapping_->base()); } in in()
|