Lines Matching refs:offset
266 zx_status_t VmObjectPaged::CloneCOW(bool resizable, uint64_t offset, uint64_t size, in CloneCOW() argument
268 LTRACEF("vmo %p offset %#" PRIx64 " size %#" PRIx64 "\n", this, offset, size); in CloneCOW()
300 status = vmo->SetParentOffsetLocked(offset); in CloneCOW()
336 auto f = [depth](const auto p, uint64_t offset) { in Dump() argument
340 printf("offset %#" PRIx64 " page %p paddr %#" PRIxPTR "\n", offset, p, p->paddr()); in Dump()
347 size_t VmObjectPaged::AllocatedPagesInRange(uint64_t offset, uint64_t len) const { in AllocatedPagesInRange() argument
351 if (!TrimRange(offset, len, size_, &new_len)) { in AllocatedPagesInRange()
358 [&count, offset, new_len](const auto p, uint64_t off) { in AllocatedPagesInRange()
359 if (off >= offset && off < offset + new_len) { in AllocatedPagesInRange()
367 zx_status_t VmObjectPaged::AddPage(vm_page_t* p, uint64_t offset) { in AddPage() argument
370 return AddPageLocked(p, offset); in AddPage()
373 zx_status_t VmObjectPaged::AddPageLocked(vm_page_t* p, uint64_t offset) { in AddPageLocked() argument
377 LTRACEF("vmo %p, offset %#" PRIx64 ", page %p (%#" PRIxPTR ")\n", this, offset, p, p->paddr()); in AddPageLocked()
381 if (offset >= size_) { in AddPageLocked()
385 zx_status_t err = page_list_.AddPage(p, offset); in AddPageLocked()
391 RangeChangeUpdateLocked(offset, PAGE_SIZE); in AddPageLocked()
403 zx_status_t VmObjectPaged::GetPageLocked(uint64_t offset, uint pf_flags, list_node* free_list, in GetPageLocked() argument
408 if (offset >= size_) { in GetPageLocked()
416 p = page_list_.GetPage(offset); in GetPageLocked()
428 LTRACEF("vmo %p, offset %#" PRIx64 ", pf_flags %#x (%s)\n", this, offset, pf_flags, in GetPageLocked()
434 bool overflowed = add_overflow(parent_offset_, offset, &parent_offset); in GetPageLocked()
485 status = AddPageLocked(p_clone, offset); in GetPageLocked()
546 zx_status_t status = AddPageLocked(p, offset); in GetPageLocked()
550 RangeChangeUpdateLocked(offset, PAGE_SIZE); in GetPageLocked()
564 zx_status_t VmObjectPaged::CommitRange(uint64_t offset, uint64_t len) { in CommitRange() argument
566 LTRACEF("offset %#" PRIx64 ", len %#" PRIx64 "\n", offset, len); in CommitRange()
572 if (!TrimRange(offset, len, size_, &new_len)) { in CommitRange()
582 uint64_t end = ROUNDUP_PAGE_SIZE(offset + new_len); in CommitRange()
583 DEBUG_ASSERT(end > offset); in CommitRange()
584 offset = ROUNDDOWN(offset, PAGE_SIZE); in CommitRange()
588 uint64_t expected_next_off = offset; in CommitRange()
616 RangeChangeUpdateLocked(offset, end - offset); in CommitRange()
619 for (uint64_t o = offset; o < end; o += PAGE_SIZE) { in CommitRange()
640 zx_status_t VmObjectPaged::DecommitRange(uint64_t offset, uint64_t len) { in DecommitRange() argument
642 LTRACEF("offset %#" PRIx64 ", len %#" PRIx64 "\n", offset, len); in DecommitRange()
652 if (!TrimRange(offset, len, size_, &new_len)) { in DecommitRange()
662 uint64_t start = ROUNDDOWN(offset, PAGE_SIZE); in DecommitRange()
663 uint64_t end = ROUNDUP_PAGE_SIZE(offset + new_len); in DecommitRange()
664 DEBUG_ASSERT(end > offset); in DecommitRange()
686 zx_status_t VmObjectPaged::Pin(uint64_t offset, uint64_t len) { in Pin() argument
690 return PinLocked(offset, len); in Pin()
693 zx_status_t VmObjectPaged::PinLocked(uint64_t offset, uint64_t len) { in PinLocked() argument
697 if (unlikely(!InRange(offset, len, size_))) { in PinLocked()
705 const uint64_t start_page_offset = ROUNDDOWN(offset, PAGE_SIZE); in PinLocked()
706 const uint64_t end_page_offset = ROUNDUP(offset + len, PAGE_SIZE); in PinLocked()
737 void VmObjectPaged::Unpin(uint64_t offset, uint64_t len) { in Unpin() argument
739 UnpinLocked(offset, len); in Unpin()
742 void VmObjectPaged::UnpinLocked(uint64_t offset, uint64_t len) { in UnpinLocked() argument
747 ASSERT(InRange(offset, len, size_)); in UnpinLocked()
753 const uint64_t start_page_offset = ROUNDDOWN(offset, PAGE_SIZE); in UnpinLocked()
754 const uint64_t end_page_offset = ROUNDUP(offset + len, PAGE_SIZE); in UnpinLocked()
775 bool VmObjectPaged::AnyPagesPinnedLocked(uint64_t offset, size_t len) { in AnyPagesPinnedLocked() argument
778 DEBUG_ASSERT(IS_PAGE_ALIGNED(offset)); in AnyPagesPinnedLocked()
781 const uint64_t start_page_offset = offset; in AnyPagesPinnedLocked()
782 const uint64_t end_page_offset = offset + len; in AnyPagesPinnedLocked()
858 zx_status_t VmObjectPaged::SetParentOffsetLocked(uint64_t offset) { in SetParentOffsetLocked() argument
862 if (!IS_PAGE_ALIGNED(offset)) { in SetParentOffsetLocked()
870 if (add_overflow(offset, size_, &end)) { in SetParentOffsetLocked()
874 parent_offset_ = offset; in SetParentOffsetLocked()
882 zx_status_t VmObjectPaged::ReadWriteInternal(uint64_t offset, size_t len, bool write, T copyfunc) { in ReadWriteInternal() argument
894 if (add_overflow(offset, len, &end_offset) || end_offset > size_) { in ReadWriteInternal()
899 uint64_t src_offset = offset; in ReadWriteInternal()
931 zx_status_t VmObjectPaged::Read(void* _ptr, uint64_t offset, size_t len) { in Read() argument
941 auto read_routine = [ptr](const void* src, size_t offset, size_t len) -> zx_status_t { in Read() argument
942 memcpy(ptr + offset, src, len); in Read()
946 return ReadWriteInternal(offset, len, false, read_routine); in Read()
949 zx_status_t VmObjectPaged::Write(const void* _ptr, uint64_t offset, size_t len) { in Write() argument
959 auto write_routine = [ptr](void* dst, size_t offset, size_t len) -> zx_status_t { in Write() argument
960 memcpy(dst, ptr + offset, len); in Write()
964 return ReadWriteInternal(offset, len, true, write_routine); in Write()
967 zx_status_t VmObjectPaged::Lookup(uint64_t offset, uint64_t len, in Lookup() argument
977 if (unlikely(!InRange(offset, len, size_))) { in Lookup()
981 const uint64_t start_page_offset = ROUNDDOWN(offset, PAGE_SIZE); in Lookup()
982 const uint64_t end_page_offset = ROUNDUP(offset + len, PAGE_SIZE); in Lookup()
1045 zx_status_t VmObjectPaged::ReadUser(user_out_ptr<void> ptr, uint64_t offset, size_t len) { in ReadUser() argument
1049 auto read_routine = [ptr](const void* src, size_t offset, size_t len) -> zx_status_t { in ReadUser() argument
1050 return ptr.byte_offset(offset).copy_array_to_user(src, len); in ReadUser()
1053 return ReadWriteInternal(offset, len, false, read_routine); in ReadUser()
1056 zx_status_t VmObjectPaged::WriteUser(user_in_ptr<const void> ptr, uint64_t offset, size_t len) { in WriteUser() argument
1060 auto write_routine = [ptr](void* dst, size_t offset, size_t len) -> zx_status_t { in WriteUser() argument
1061 return ptr.byte_offset(offset).copy_array_from_user(dst, len); in WriteUser()
1064 return ReadWriteInternal(offset, len, true, write_routine); in WriteUser()
1067 zx_status_t VmObjectPaged::InvalidateCache(const uint64_t offset, const uint64_t len) { in InvalidateCache() argument
1068 return CacheOp(offset, len, CacheOpType::Invalidate); in InvalidateCache()
1071 zx_status_t VmObjectPaged::CleanCache(const uint64_t offset, const uint64_t len) { in CleanCache() argument
1072 return CacheOp(offset, len, CacheOpType::Clean); in CleanCache()
1075 zx_status_t VmObjectPaged::CleanInvalidateCache(const uint64_t offset, const uint64_t len) { in CleanInvalidateCache() argument
1076 return CacheOp(offset, len, CacheOpType::CleanInvalidate); in CleanInvalidateCache()
1079 zx_status_t VmObjectPaged::SyncCache(const uint64_t offset, const uint64_t len) { in SyncCache() argument
1080 return CacheOp(offset, len, CacheOpType::Sync); in SyncCache()
1183 void VmObjectPaged::RangeChangeUpdateFromParentLocked(const uint64_t offset, const uint64_t len) { in RangeChangeUpdateFromParentLocked() argument
1187 offset, len, parent_offset_, size_); in RangeChangeUpdateFromParentLocked()
1193 if (!GetIntersect(parent_offset_, size_, offset, len, in RangeChangeUpdateFromParentLocked()