| /kernel/lib/libc/string/arch/x86/ |
| A D | selector.cpp | 33 intptr_t offset; in x86_memcpy_select() local 35 offset = reinterpret_cast<intptr_t>(memcpy_erms) - jmp_from_address; in x86_memcpy_select() 37 offset = reinterpret_cast<intptr_t>(memcpy_quad) - jmp_from_address; in x86_memcpy_select() 39 DEBUG_ASSERT(offset >= -128 && offset <= 127); in x86_memcpy_select() 41 patch->dest_addr[1] = static_cast<uint8_t>(offset); in x86_memcpy_select() 55 intptr_t offset; in x86_memset_select() local 57 offset = reinterpret_cast<intptr_t>(memset_erms) - jmp_from_address; in x86_memset_select() 59 offset = reinterpret_cast<intptr_t>(memset_quad) - jmp_from_address; in x86_memset_select() 61 DEBUG_ASSERT(offset >= -128 && offset <= 127); in x86_memset_select() 63 patch->dest_addr[1] = static_cast<uint8_t>(offset); in x86_memset_select()
|
| A D | tests.cpp | 121 for (size_t offset = 0; offset < 8; ++offset) { in memset_func_test() local 126 set(dst + offset, static_cast<int>(kBufLen - offset), kBufLen - offset); in memset_func_test() 127 for (size_t i = 0; i < offset; ++i) { in memset_func_test() 130 for (size_t i = offset; i < kBufLen; ++i) { in memset_func_test() 131 ASSERT_EQ(static_cast<char>(kBufLen - offset), dst[i], "buffer mismatch"); in memset_func_test()
|
| /kernel/vm/ |
| A D | vm_page_list.cpp | 46 VmPageListNode::VmPageListNode(uint64_t offset) in VmPageListNode() argument 47 : obj_offset_(offset) { in VmPageListNode() 244 VmPageSpliceList res(offset, length); in TakePages() 245 const uint64_t end = offset + length; in TakePages() 249 while (offset_to_node_index(offset) != 0 && offset < end) { in TakePages() 251 if (RemovePage(offset, &page)) { in TakePages() 254 offset += PAGE_SIZE; in TakePages() 268 while (offset < end) { in TakePages() 270 if (RemovePage(offset, &page)) { in TakePages() 273 offset += PAGE_SIZE; in TakePages() [all …]
|
| A D | vm_object_paged.cpp | 359 if (off >= offset && off < offset + new_len) { in AllocatedPagesInRange() 381 if (offset >= size_) { in AddPageLocked() 408 if (offset >= size_) { in GetPageLocked() 583 DEBUG_ASSERT(end > offset); in CommitRange() 584 offset = ROUNDDOWN(offset, PAGE_SIZE); in CommitRange() 616 RangeChangeUpdateLocked(offset, end - offset); in CommitRange() 664 DEBUG_ASSERT(end > offset); in DecommitRange() 690 return PinLocked(offset, len); in Pin() 739 UnpinLocked(offset, len); in Unpin() 874 parent_offset_ = offset; in SetParentOffsetLocked() [all …]
|
| A D | pinned_vm_object.cpp | 17 zx_status_t PinnedVmObject::Create(fbl::RefPtr<VmObject> vmo, size_t offset, size_t size, in Create() argument 20 DEBUG_ASSERT(IS_PAGE_ALIGNED(offset) && IS_PAGE_ALIGNED(size)); in Create() 24 zx_status_t status = vmo->CommitRange(offset, size); in Create() 29 status = vmo->Pin(offset, size); in Create() 37 out_pinned_vmo->offset_ = offset; in Create()
|
| A D | vm_object_physical.cpp | 73 zx_status_t VmObjectPhysical::GetPageLocked(uint64_t offset, uint pf_flags, list_node* free_list, in GetPageLocked() argument 81 if (offset >= size_) { in GetPageLocked() 85 uint64_t pa = base_ + ROUNDDOWN(offset, PAGE_SIZE); in GetPageLocked() 95 zx_status_t VmObjectPhysical::Lookup(uint64_t offset, uint64_t len, in Lookup() argument 104 if (unlikely(!InRange(offset, len, size_))) { in Lookup() 108 uint64_t cur_offset = ROUNDDOWN(offset, PAGE_SIZE); in Lookup() 109 uint64_t end = offset + len; in Lookup()
|
| /kernel/vm/include/vm/ |
| A D | vm_object_paged.h | 64 zx_status_t CommitRange(uint64_t offset, uint64_t len) override; 65 zx_status_t DecommitRange(uint64_t offset, uint64_t len) override; 67 zx_status_t Pin(uint64_t offset, uint64_t len) override; 68 void Unpin(uint64_t offset, uint64_t len) override; 70 zx_status_t Read(void* ptr, uint64_t offset, size_t len) override; 72 zx_status_t Lookup(uint64_t offset, uint64_t len, 126 zx_status_t AddPage(vm_page_t* p, uint64_t offset); 127 zx_status_t AddPageLocked(vm_page_t* p, uint64_t offset) TA_REQ(lock_); 132 zx_status_t PinLocked(uint64_t offset, uint64_t len) TA_REQ(lock_); 133 void UnpinLocked(uint64_t offset, uint64_t len) TA_REQ(lock_); [all …]
|
| A D | vm_object.h | 63 virtual size_t AllocatedPagesInRange(uint64_t offset, uint64_t len) const { in AllocatedPagesInRange() argument 72 virtual zx_status_t CommitRange(uint64_t offset, uint64_t len) { in CommitRange() argument 77 virtual zx_status_t DecommitRange(uint64_t offset, uint64_t len) { in DecommitRange() argument 83 virtual zx_status_t Pin(uint64_t offset, uint64_t len) { in Pin() argument 90 virtual void Unpin(uint64_t offset, uint64_t len) { in Unpin() argument 95 virtual zx_status_t Read(void* ptr, uint64_t offset, size_t len) { in Read() argument 98 virtual zx_status_t Write(const void* ptr, uint64_t offset, size_t len) { in Write() argument 103 virtual zx_status_t Lookup(uint64_t offset, uint64_t len, in Lookup() argument 150 virtual zx_status_t SyncCache(const uint64_t offset, const uint64_t len) { in SyncCache() argument 174 zx_status_t GetPage(uint64_t offset, uint pf_flags, list_node* free_list, in GetPage() argument [all …]
|
| A D | vm_page_list.h | 21 explicit VmPageListNode(uint64_t offset); 29 uint64_t offset() const { return obj_offset_; } in offset() function 122 VmPageSpliceList(uint64_t offset, uint64_t length); 147 zx_status_t status = pl.ForEveryPage(per_page_func, pl.offset(), in ForEveryPage() 148 pl.offset() + pl.kPageFanOut * PAGE_SIZE); in ForEveryPage() 163 zx_status_t status = pl.ForEveryPage(per_page_func, pl.offset(), in ForEveryPage() 164 pl.offset() + pl.kPageFanOut * PAGE_SIZE); in ForEveryPage() 221 zx_status_t AddPage(vm_page*, uint64_t offset); 222 vm_page* GetPage(uint64_t offset); 225 bool RemovePage(uint64_t offset, vm_page** page); [all …]
|
| A D | pinned_vm_object.h | 14 static zx_status_t Create(fbl::RefPtr<VmObject> vmo, size_t offset, size_t size, 23 size_t offset() const { return offset_; } in offset() function
|
| /kernel/include/kernel/ |
| A D | range_check.h | 17 static inline bool InRange(O offset, L len, O trim_to_len) { in InRange() argument 22 if (offset + len < offset) { in InRange() 27 if (offset > trim_to_len) { in InRange() 32 if (offset + len > trim_to_len) { in InRange() 45 static inline bool TrimRange(O offset, L len, O trim_to_len, L* len_out) { in TrimRange() argument 53 if (offset + len < offset) { in TrimRange() 58 if (offset > trim_to_len) { in TrimRange() 63 if (offset + len > trim_to_len) { in TrimRange() 64 *len_out = static_cast<L>(trim_to_len - offset); in TrimRange()
|
| /kernel/dev/hw_rng/ |
| A D | debug.cpp | 34 size_t offset = 0; in cmd_rng() local 36 while (offset < argv[1].u) { in cmd_rng() 40 todo = fbl::min(sizeof(bytes), argv[1].u - offset); in cmd_rng() 44 hexdump8_ex(bytes, done, offset); in cmd_rng() 45 offset += done; in cmd_rng() 49 offset, offset == 1 ? "" : "s"); in cmd_rng()
|
| /kernel/platform/pc/ |
| A D | pcie_quirks.cpp | 46 uint16_t offset; in pcie_tolud_quirk() member 49 {.match = 0x808629c0, .mask = 0xFFFFFFFF, .offset = 0x0}, in pcie_tolud_quirk() 51 {.match = 0x80861237, .mask = 0xFFFFFFFF, .offset = 0x0}, in pcie_tolud_quirk() 53 {.match = 0x80860100, .mask = 0xFFFFFF00, .offset = 0xBC}, in pcie_tolud_quirk() 55 {.match = 0x80861900, .mask = 0xFFFFFF00, .offset = 0xBC}, in pcie_tolud_quirk() 64 {.match = 0x80865900, .mask = 0xFFFFFF00, .offset = 0xBC}, in pcie_tolud_quirk() 115 uint16_t offset = TOLUD_CHIPSET_LUT[i].offset; in pcie_tolud_quirk() local 116 if (offset) { in pcie_tolud_quirk() 118 auto tolud_reg = PciReg32(offset); in pcie_tolud_quirk()
|
| /kernel/dev/iommu/dummy/ |
| A D | dummy_iommu.cpp | 43 uint64_t offset, size_t size, uint32_t perms, in Map() argument 48 if (!IS_PAGE_ALIGNED(offset) || size == 0) { in Map() 57 if (offset + size < offset || offset + size > vmo->size()) { in Map() 61 auto lookup_fn = [](void* ctx, size_t offset, size_t index, paddr_t pa) { in Map() argument 68 zx_status_t status = vmo->Lookup(offset, fbl::min<size_t>(PAGE_SIZE, size), lookup_fn, &paddr); in Map() 87 uint64_t offset, size_t size, uint32_t perms, in MapContiguous() argument 92 if (!IS_PAGE_ALIGNED(offset) || size == 0) { in MapContiguous() 102 if (add_overflow(offset, size, &end) || end > vmo->size()) { in MapContiguous() 110 auto lookup_fn = [](void* ctx, size_t offset, size_t index, paddr_t pa) { in MapContiguous() argument 117 zx_status_t status = vmo->Lookup(offset, PAGE_SIZE, lookup_fn, &paddr); in MapContiguous()
|
| /kernel/object/ |
| A D | vm_object_dispatcher.cpp | 70 uint64_t offset) { in Read() argument 73 return vmo_->ReadUser(user_data, offset, length); in Read() 78 uint64_t offset) { in Write() argument 81 return vmo_->WriteUser(user_data, offset, length); in Write() 143 auto status = vmo_->CommitRange(offset, size); in RangeOp() 151 auto status = vmo_->DecommitRange(offset, size); in RangeOp() 163 return vmo_->SyncCache(offset, size); in RangeOp() 171 return vmo_->InvalidateCache(offset, size); in RangeOp() 176 return vmo_->CleanCache(offset, size); in RangeOp() 181 return vmo_->CleanInvalidateCache(offset, size); in RangeOp() [all …]
|
| /kernel/arch/arm64/include/arch/arm64/ |
| A D | mp.h | 60 static inline uint32_t arm64_read_percpu_u32(size_t offset) { in arm64_read_percpu_u32() argument 68 : [offset] "Ir"(offset)); in arm64_read_percpu_u32() 72 static inline void arm64_write_percpu_u32(size_t offset, uint32_t val) { in arm64_write_percpu_u32() argument 74 ::[val] "r"(val), [offset] "Ir"(offset) in arm64_write_percpu_u32()
|
| /kernel/dev/pcie/include/dev/ |
| A D | pci_config.h | 18 constexpr explicit PciReg8(uint16_t offset) in PciReg8() argument 19 : offset_(offset) {} in PciReg8() 22 constexpr uint16_t offset() const { return offset_; } in offset() function 30 constexpr explicit PciReg16(uint16_t offset) in PciReg16() argument 31 : offset_(offset) {}; in PciReg16() 34 constexpr uint16_t offset() const { return offset_; } in offset() function 42 constexpr explicit PciReg32(uint16_t offset) in PciReg32() argument 43 : offset_(offset) {}; in PciReg32() 46 constexpr uint16_t offset() const { return offset_; } in offset() function 86 …static constexpr uint8_t kStdCfgEnd = static_cast<uint8_t>(kMaxLatency.offset() + sizeof(uint8_t));
|
| /kernel/lib/pci/ |
| A D | pio.cpp | 50 uint8_t offset, uint32_t* val, size_t width) { in PioCfgRead() argument 51 return PioCfgRead(PciBdfRawAddr(bus, dev, func, offset), val, width); in PioCfgRead() 76 uint8_t offset, uint32_t val, size_t width) { in PioCfgWrite() argument 77 return PioCfgWrite(PciBdfRawAddr(bus, dev, func, offset), val, width); in PioCfgWrite() 85 zx_status_t PioCfgRead(uint8_t bus, uint8_t dev, uint8_t func, uint8_t offset, 94 zx_status_t PioCfgWrite(uint8_t bus, uint8_t dev, uint8_t func, uint8_t offset,
|
| /kernel/arch/x86/ |
| A D | idt.cpp | 39 static inline void idt_set_offset(struct idt_entry* entry, uintptr_t offset) { in idt_set_offset() argument 40 uint32_t low_16 = offset & 0xffff; in idt_set_offset() 41 uint32_t mid_16 = (offset >> 16) & 0xffff; in idt_set_offset() 44 uint32_t high_32 = (uint32_t)(offset >> 32); in idt_set_offset() 100 uintptr_t offset = _isr_table[i] + clac_shift; in idt_setup() local 110 idt_set_vector(idt, (uint8_t)i, sel, offset, dpl, typ); in idt_setup()
|
| /kernel/arch/x86/hypervisor/ |
| A D | pvclock.cpp | 149 auto offset = guest_ptr.as<PvClockOffset>(); in pvclock_populate_offset() local 150 ZX_DEBUG_ASSERT(offset != nullptr); in pvclock_populate_offset() 151 memset(offset, 0, sizeof(*offset)); in pvclock_populate_offset() 154 offset->sec = time / ZX_SEC(1); in pvclock_populate_offset() 155 offset->nsec = time % ZX_SEC(1); in pvclock_populate_offset() 156 offset->tsc = tsc; in pvclock_populate_offset()
|
| /kernel/syscalls/ |
| A D | vmo.cpp | 75 uint64_t offset, size_t len) { in sys_vmo_read() argument 77 handle, _data.get(), offset, len); in sys_vmo_read() 107 return vmo->Read(_data, len, offset); in sys_vmo_read() 112 uint64_t offset, size_t len) { in sys_vmo_write() argument 114 handle, _data.get(), offset, len); in sys_vmo_write() 144 return vmo->Write(_data, len, offset); in sys_vmo_write() 190 zx_status_t sys_vmo_op_range(zx_handle_t handle, uint32_t op, uint64_t offset, uint64_t size, in sys_vmo_op_range() argument 194 handle, op, offset, size, _buffer.get(), buffer_size); in sys_vmo_op_range() 207 return vmo->RangeOp(op, offset, size, _buffer, buffer_size, rights); in sys_vmo_op_range() 232 uint64_t offset, uint64_t size, in sys_vmo_clone() argument [all …]
|
| /kernel/dev/pcie/ |
| A D | pci_config.cpp | 82 zx_status_t status = Pci::PioCfgRead(static_cast<uint32_t>(base_ + addr.offset()), &val, 8u); in Read() 88 zx_status_t status = Pci::PioCfgRead(static_cast<uint32_t>(base_ + addr.offset()), &val, 16u); in Read() 94 zx_status_t status = Pci::PioCfgRead(static_cast<uint32_t>(base_ + addr.offset()), &val, 32u); in Read() 99 zx_status_t status = Pci::PioCfgWrite(static_cast<uint32_t>(base_ + addr.offset()), val, 8u); in Write() 103 zx_status_t status = Pci::PioCfgWrite(static_cast<uint32_t>(base_ + addr.offset()), val, 16u); in Write() 128 auto reg = reinterpret_cast<const volatile uint8_t*>(base_ + addr.offset()); in Read() 133 auto reg = reinterpret_cast<const volatile uint16_t*>(base_ + addr.offset()); in Read() 138 auto reg = reinterpret_cast<const volatile uint32_t*>(base_ + addr.offset()); in Read() 143 auto reg = reinterpret_cast<volatile uint8_t*>(base_ + addr.offset()); in Write() 148 auto reg = reinterpret_cast<volatile uint16_t*>(base_ + addr.offset()); in Write() [all …]
|
| /kernel/object/include/object/ |
| A D | vm_object_dispatcher.h | 39 uint64_t offset); 41 uint64_t offset); 44 zx_status_t RangeOp(uint32_t op, uint64_t offset, uint64_t size, user_inout_ptr<void> buffer, 47 uint32_t options, uint64_t offset, uint64_t size, bool copy_name,
|
| /kernel/arch/arm64/ |
| A D | periphmap.cpp | 60 uint64_t offset = paddr - range.base_phys; in periph_paddr_to_vaddr() local 61 if (offset < range.length) { in periph_paddr_to_vaddr() 62 return range.base_virt + offset; in periph_paddr_to_vaddr()
|
| /kernel/lib/debuglog/ |
| A D | debuglog.cpp | 149 size_t offset = (log->head & DLOG_MASK); in dlog_write() local 151 size_t fifospace = DLOG_SIZE - offset; in dlog_write() 155 memcpy(log->data + offset, &hdr, sizeof(hdr)); in dlog_write() 159 memcpy(log->data + offset, &hdr, fifospace); in dlog_write() 164 memcpy(log->data + offset, &hdr, sizeof(hdr)); in dlog_write() 165 offset += sizeof(hdr); in dlog_write() 167 memcpy(log->data + offset, ptr, fifospace); in dlog_write() 227 size_t offset = (rtail & DLOG_MASK); in dlog_read() local 231 size_t fifospace = DLOG_SIZE - offset; in dlog_read() 234 memcpy(ptr, log->data + offset, actual); in dlog_read() [all …]
|