| /mm/ |
| A D | swapfile.c | 691 offset++; in cluster_reclaim_range() 710 for (offset = start; offset < end; offset++) in cluster_reclaim_range() 728 for (offset = start; offset < end; offset++) { in cluster_scan_range() 788 for (end -= nr_pages; offset <= end; offset += nr_pages) { in alloc_swap_scan_cluster() 855 offset++; in swap_reclaim_full_clusters() 1146 offset++; in swap_range_free() 1191 offset = this_cpu_read(percpu_swap_cluster.offset[order]); in swap_alloc_fast() 1829 for (offset = start_offset; offset < end_offset; offset += nr) { in free_swap_and_cache_nr() 1846 nr = ALIGN(offset + 1, nr) - offset; in free_swap_and_cache_nr() 2646 for (offset = 0; offset < end; offset += SWAPFILE_CLUSTER) { in wait_for_allocation() [all …]
|
| A D | swap_cgroup.c | 25 pgoff_t offset) in __swap_cgroup_id_lookup() argument 37 pgoff_t offset, in __swap_cgroup_id_xchg() argument 69 pgoff_t offset, end; in swap_cgroup_record() local 72 offset = swp_offset(ent); in swap_cgroup_record() 73 end = offset + nr_ents; in swap_cgroup_record() 79 } while (++offset != end); in swap_cgroup_record() 95 pgoff_t offset, end; in swap_cgroup_clear() local 99 offset = swp_offset(ent); in swap_cgroup_clear() 100 end = offset + nr_ents; in swap_cgroup_clear() 104 old = __swap_cgroup_id_xchg(map, offset, 0); in swap_cgroup_clear() [all …]
|
| A D | fadvise.c | 72 endbyte = (u64)offset + (u64)len; in generic_fadvise() 98 start_index = offset >> PAGE_SHIFT; in generic_fadvise() 114 __filemap_fdatawrite_range(mapping, offset, endbyte, in generic_fadvise() 122 start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT; in generic_fadvise() 183 return file->f_op->fadvise(file, offset, len, advice); in vfs_fadvise() 185 return generic_fadvise(file, offset, len, advice); in vfs_fadvise() 191 int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) in ksys_fadvise64_64() argument 198 return vfs_fadvise(fd_file(f), offset, len, advice); in ksys_fadvise64_64() 203 return ksys_fadvise64_64(fd, offset, len, advice); in SYSCALL_DEFINE4() 210 return ksys_fadvise64_64(fd, offset, len, advice); in SYSCALL_DEFINE4() [all …]
|
| A D | usercopy.c | 87 bool to_user, unsigned long offset, in usercopy_abort() argument 95 offset, len); in usercopy_abort() 166 unsigned long offset; in check_heap_object() local 170 offset = offset_in_page(ptr); in check_heap_object() 171 if (n > PAGE_SIZE - offset) in check_heap_object() 172 usercopy_abort("kmap", NULL, to_user, offset, n); in check_heap_object() 183 offset = addr - area->va_start; in check_heap_object() 184 usercopy_abort("vmalloc", NULL, to_user, offset, n); in check_heap_object() 198 offset = ptr - folio_address(folio); in check_heap_object() 199 if (n > folio_size(folio) - offset) in check_heap_object() [all …]
|
| A D | page_frag_cache.c | 98 unsigned int size, offset; in __page_frag_alloc_align() local 116 nc->offset = 0; in __page_frag_alloc_align() 120 offset = __ALIGN_KERNEL_MASK(nc->offset, ~align_mask); in __page_frag_alloc_align() 121 if (unlikely(offset + fragsz > size)) { in __page_frag_alloc_align() 151 offset = 0; in __page_frag_alloc_align() 155 nc->offset = offset + fragsz; in __page_frag_alloc_align() 157 return encoded_page_decode_virt(encoded_page) + offset; in __page_frag_alloc_align()
|
| A D | truncate.c | 195 unsigned int offset, length; in truncate_inode_partial_folio() local 199 offset = start - pos; in truncate_inode_partial_folio() 201 offset = 0; in truncate_inode_partial_folio() 203 length = size - offset; in truncate_inode_partial_folio() 205 length = end + 1 - pos - offset; in truncate_inode_partial_folio() 219 folio_zero_range(folio, offset, length); in truncate_inode_partial_folio() 222 folio_invalidate(folio, offset, length); in truncate_inode_partial_folio() 235 if (offset + length == size) in truncate_inode_partial_folio() 849 unsigned int offset, end; in pagecache_isize_extended() local 851 offset = from - folio_pos(folio); in pagecache_isize_extended() [all …]
|
| A D | page_reporting.c | 148 struct scatterlist *sgl, unsigned int *offset) in page_reporting_cycle() argument 200 if (*offset) { in page_reporting_cycle() 207 --(*offset); in page_reporting_cycle() 208 sg_set_page(&sgl[*offset], page, page_len, 0); in page_reporting_cycle() 228 *offset = PAGE_REPORTING_CAPACITY; in page_reporting_cycle() 263 unsigned int order, mt, leftover, offset = PAGE_REPORTING_CAPACITY; in page_reporting_process_zone() local 286 sgl, &offset); in page_reporting_process_zone() 293 leftover = PAGE_REPORTING_CAPACITY - offset; in page_reporting_process_zone() 295 sgl = &sgl[offset]; in page_reporting_process_zone()
|
| A D | swap_state.c | 507 unsigned long offset, in __swapin_nr_pages() argument 526 if (offset != prev_offset + 1 && offset != prev_offset - 1) in __swapin_nr_pages() 546 static unsigned long swapin_nr_pages(unsigned long offset) in swapin_nr_pages() argument 561 WRITE_ONCE(prev_offset, offset); in swapin_nr_pages() 590 unsigned long offset = entry_offset; in swap_cluster_readahead() local 598 mask = swapin_nr_pages(offset) - 1; in swap_cluster_readahead() 603 start_offset = offset & ~mask; in swap_cluster_readahead() 604 end_offset = offset | mask; in swap_cluster_readahead() 611 for (offset = start_offset; offset <= end_offset ; offset++) { in swap_cluster_readahead() 614 swp_entry(swp_type(entry), offset), in swap_cluster_readahead() [all …]
|
| A D | ioremap.c | 18 unsigned long offset, vaddr; in generic_ioremap_prot() local 32 offset = phys_addr & (~PAGE_MASK); in generic_ioremap_prot() 33 phys_addr -= offset; in generic_ioremap_prot() 34 size = PAGE_ALIGN(size + offset); in generic_ioremap_prot() 48 return (void __iomem *)(vaddr + offset); in generic_ioremap_prot()
|
| A D | shrinker.c | 191 static inline int calc_shrinker_id(int index, int offset) in calc_shrinker_id() argument 193 return index * SHRINKER_UNIT_BITS + offset; in calc_shrinker_id() 287 int nid, index, offset; in reparent_shrinker_deferred() local 305 for (offset = 0; offset < SHRINKER_UNIT_BITS; offset++) { in reparent_shrinker_deferred() 306 nr = atomic_long_read(&child_unit->nr_deferred[offset]); in reparent_shrinker_deferred() 307 atomic_long_add(nr, &parent_unit->nr_deferred[offset]); in reparent_shrinker_deferred() 474 int offset, index = 0; in shrink_slab_memcg() local 527 for_each_set_bit(offset, unit->map, SHRINKER_UNIT_BITS) { in shrink_slab_memcg() 534 int shrinker_id = calc_shrinker_id(index, offset); in shrink_slab_memcg() 539 clear_bit(offset, unit->map); in shrink_slab_memcg() [all …]
|
| A D | early_ioremap.c | 102 unsigned long offset; in __early_ioremap() local 131 offset = offset_in_page(phys_addr); in __early_ioremap() 156 __func__, &phys_addr, size, slot, offset, slot_virt[slot]); in __early_ioremap() 158 prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]); in __early_ioremap() 165 unsigned long offset; in early_iounmap() local 194 offset = offset_in_page(virt_addr); in early_iounmap() 195 nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT; in early_iounmap()
|
| A D | dmapool.c | 305 unsigned int next_boundary = pool->boundary, offset = 0; in pool_initialise_page() local 309 while (offset + pool->size <= pool->allocation) { in pool_initialise_page() 310 if (offset + pool->size > next_boundary) { in pool_initialise_page() 311 offset = next_boundary; in pool_initialise_page() 316 block = page->vaddr + offset; in pool_initialise_page() 317 block->dma = page->dma + offset; in pool_initialise_page() 326 offset += pool->size; in pool_initialise_page()
|
| A D | process_vm_access.c | 28 unsigned offset, in process_vm_rw_pages() argument 36 size_t copy = PAGE_SIZE - offset; in process_vm_rw_pages() 43 copied = copy_page_from_iter(page, offset, copy, iter); in process_vm_rw_pages() 45 copied = copy_page_to_iter(page, offset, copy, iter); in process_vm_rw_pages() 50 offset = 0; in process_vm_rw_pages()
|
| A D | shmem.c | 2327 pgoff_t offset; in shmem_swapin_folio() local 2814 unsigned long offset; in shmem_get_unmapped_area() local 2891 if (offset && offset + len < 2 * hpage_size) in shmem_get_unmapped_area() 3469 if (!offset) in shmem_file_read_iter() 3570 .offset = offset, in splice_zeropage_into_pipe() 3699 if (offset < 0) in shmem_file_llseek() 3704 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); in shmem_file_llseek() 3705 if (offset >= 0) in shmem_file_llseek() 3706 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); in shmem_file_llseek() 3708 return offset; in shmem_file_llseek() [all …]
|
| A D | zsmalloc.c | 1287 int i, nr_zpdesc, offset; in obj_malloc() local 1299 offset = obj * class->size; in obj_malloc() 1300 nr_zpdesc = offset >> PAGE_SHIFT; in obj_malloc() 1301 m_offset = offset_in_page(offset); in obj_malloc() 1536 unsigned int offset; in find_alloced_obj() local 1541 offset = get_first_obj_offset(zpdesc); in find_alloced_obj() 1542 offset += class->size * index; in find_alloced_obj() 1544 while (offset < PAGE_SIZE) { in find_alloced_obj() 1548 offset += class->size; in find_alloced_obj() 1736 unsigned int offset; in zs_page_migrate() local [all …]
|
| A D | filemap.c | 824 pgoff_t offset = old->index; in replace_page_cache_folio() local 833 new->index = offset; in replace_page_cache_folio() 2748 fsize - offset); in filemap_read() 2919 offset %= PAGE_SIZE; in splice_folio_into_pipe() 2928 .offset = offset, in splice_folio_into_pipe() 2935 offset = 0; in splice_folio_into_pipe() 3078 offset += bsz; in folio_seek_hole_data() 3079 } while (offset < folio_size(folio)); in folio_seek_hole_data() 4166 offset = pos & (chunk - 1); in generic_perform_write() 4167 bytes = min(chunk - offset, bytes); in generic_perform_write() [all …]
|
| A D | zswap.c | 1068 pgoff_t offset = swp_offset(swpentry); in zswap_writeback_entry() local 1109 if (entry != xa_load(tree, offset)) { in zswap_writeback_entry() 1119 xa_erase(tree, offset); in zswap_writeback_entry() 1627 pgoff_t offset = swp_offset(swp); in zswap_store() local 1632 tree = swap_zswap_tree(swp_entry(type, offset + index)); in zswap_store() 1633 entry = xa_erase(tree, offset + index); in zswap_store() 1665 pgoff_t offset = swp_offset(swp); in zswap_load() local 1685 entry = xa_load(tree, offset); in zswap_load() 1714 xa_erase(tree, offset); in zswap_load() 1724 pgoff_t offset = swp_offset(swp); in zswap_invalidate() local [all …]
|
| A D | readahead.c | 693 ssize_t ksys_readahead(int fd, loff_t offset, size_t count) in ksys_readahead() argument 722 return vfs_fadvise(fd_file(f), offset, count, POSIX_FADV_WILLNEED); in ksys_readahead() 725 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) in SYSCALL_DEFINE3() argument 727 return ksys_readahead(fd, offset, count); in SYSCALL_DEFINE3() 731 COMPAT_SYSCALL_DEFINE4(readahead, int, fd, compat_arg_u64_dual(offset), size_t, count) in COMPAT_SYSCALL_DEFINE4() argument 733 return ksys_readahead(fd, compat_arg_u64_glue(offset), count); in COMPAT_SYSCALL_DEFINE4()
|
| A D | hmm.c | 715 size_t offset = idx * map->dma_entry_size; in hmm_dma_map_pfn() local 731 return state->addr + offset; in hmm_dma_map_pfn() 760 ret = dma_iova_link(dev, state, paddr, offset, in hmm_dma_map_pfn() 766 ret = dma_iova_sync(dev, state, offset, map->dma_entry_size); in hmm_dma_map_pfn() 768 dma_iova_unlink(dev, state, offset, map->dma_entry_size, in hmm_dma_map_pfn() 773 dma_addr = state->addr + offset; in hmm_dma_map_pfn()
|
| A D | secretmem.c | 54 pgoff_t offset = vmf->pgoff; in secretmem_fault() local 67 folio = filemap_lock_folio(mapping, offset); in secretmem_fault() 83 err = filemap_add_folio(mapping, folio, offset, gfp); in secretmem_fault()
|
| A D | util.c | 609 unsigned long flag, unsigned long offset) in vm_mmap() argument 611 if (unlikely(offset + PAGE_ALIGN(len) < offset)) in vm_mmap() 613 if (unlikely(offset_in_page(offset))) in vm_mmap() 616 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); in vm_mmap()
|
| A D | swap.h | 114 pgoff_t offset = swp_offset(entry); in non_swapcache_batch() local 123 if ((si->swap_map[offset + i] & SWAP_HAS_CACHE)) in non_swapcache_batch()
|
| A D | madvise.c | 290 loff_t offset; in madvise_willneed() local 322 offset = (loff_t)(start - vma->vm_start) in madvise_willneed() 325 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); in madvise_willneed() 1013 loff_t offset; in madvise_remove() local 1035 offset = (loff_t)(start - vma->vm_start) in madvise_remove() 1051 offset, end - start); in madvise_remove()
|
| /mm/kasan/ |
| A D | report_generic.c | 265 unsigned long offset; in print_decoded_frame_descr() local 270 &offset)) in print_decoded_frame_descr() 288 pr_err(" [%lu, %lu) '%s'", offset, offset + size, token); in print_decoded_frame_descr() 294 unsigned long *offset, in get_address_stack_frame_info() argument 331 *offset = (unsigned long)addr - (unsigned long)frame; in get_address_stack_frame_info() 340 unsigned long offset; in kasan_print_address_stack_frame() local 350 if (!get_address_stack_frame_info(addr, &offset, &frame_descr, in kasan_print_address_stack_frame() 354 pr_err(" and is located at offset %lu in frame:\n", offset); in kasan_print_address_stack_frame()
|
| /mm/kmsan/ |
| A D | hooks.c | 339 void kmsan_handle_dma(struct page *page, size_t offset, size_t size, in kmsan_handle_dma() argument 346 addr = (u64)page_address(page) + offset; in kmsan_handle_dma() 369 kmsan_handle_dma(sg_page(item), item->offset, item->length, in kmsan_handle_dma_sg()
|