Home
last modified time | relevance | path

Searched refs:pos (Results 1 – 23 of 23) sorted by relevance

/mm/kmsan/
A Dcore.c261 size_t pos = 0; in kmsan_internal_check_memory() local
266 while (pos < size) { in kmsan_internal_check_memory()
267 chunk_size = min(size - pos, in kmsan_internal_check_memory()
268 PAGE_SIZE - ((addr64 + pos) % PAGE_SIZE)); in kmsan_internal_check_memory()
278 cur_off_start, pos - 1, user_addr, in kmsan_internal_check_memory()
283 pos += chunk_size; in kmsan_internal_check_memory()
294 cur_off_start, pos + i - 1, in kmsan_internal_check_memory()
312 cur_off_start, pos + i - 1, in kmsan_internal_check_memory()
316 cur_off_start = pos + i; in kmsan_internal_check_memory()
319 pos += chunk_size; in kmsan_internal_check_memory()
[all …]
A Dreport.c70 int pos = 0, len = strlen(descr); in pretty_descr() local
77 report_local_descr[pos] = descr[i]; in pretty_descr()
78 if (pos + 1 == DESCR_SIZE) in pretty_descr()
80 pos++; in pretty_descr()
82 report_local_descr[pos] = 0; in pretty_descr()
/mm/kasan/
A Dreport_tags.c33 u64 pos; in kasan_complete_mode_report_info() local
44 pos = atomic64_read(&stack_ring.pos); in kasan_complete_mode_report_info()
55 for (u64 i = pos - 1; i != pos - 1 - stack_ring.size; i--) { in kasan_complete_mode_report_info()
A Dtags.c102 u64 pos; in save_stack_info() local
116 pos = atomic64_fetch_add(1, &stack_ring.pos); in save_stack_info()
117 entry = &stack_ring.entries[pos % stack_ring.size]; in save_stack_info()
A Dkasan.h308 atomic64_t pos; member
/mm/
A Dpage_idle.c121 loff_t pos, size_t count) in page_idle_bitmap_read() argument
128 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) in page_idle_bitmap_read()
131 pfn = pos * BITS_PER_BYTE; in page_idle_bitmap_read()
166 loff_t pos, size_t count) in page_idle_bitmap_write() argument
173 if (pos % BITMAP_CHUNK_SIZE || count % BITMAP_CHUNK_SIZE) in page_idle_bitmap_write()
176 pfn = pos * BITS_PER_BYTE; in page_idle_bitmap_write()
A Dpage_io.c176 unsigned int pos, last_pos; in is_folio_zero_filled() local
192 for (pos = 0; pos < last_pos; pos++) { in is_folio_zero_filled()
193 if (data[pos]) { in is_folio_zero_filled()
379 loff_t pos = swap_dev_pos(folio->swap); in swap_writepage_fs() local
386 sio->iocb.ki_pos + sio->len != pos) { in swap_writepage_fs()
395 sio->iocb.ki_pos = pos; in swap_writepage_fs()
542 loff_t pos = swap_dev_pos(folio->swap); in swap_read_folio_fs() local
548 sio->iocb.ki_pos + sio->len != pos) { in swap_read_folio_fs()
556 sio->iocb.ki_pos = pos; in swap_read_folio_fs()
A Dlist_lru.c518 struct mem_cgroup *pos, *parent; in memcg_list_lru_alloc() local
535 pos = memcg; in memcg_list_lru_alloc()
536 parent = parent_mem_cgroup(pos); in memcg_list_lru_alloc()
538 pos = parent; in memcg_list_lru_alloc()
539 parent = parent_mem_cgroup(pos); in memcg_list_lru_alloc()
547 xas_set(&xas, pos->kmemcg_id); in memcg_list_lru_alloc()
550 if (!xas_load(&xas) && !css_is_dying(&pos->css)) { in memcg_list_lru_alloc()
557 } while (pos != memcg && !css_is_dying(&pos->css)); in memcg_list_lru_alloc()
A Dfilemap.c2445 pos = 0; in filemap_range_uptodate()
2447 pos -= folio_pos(folio); in filemap_range_uptodate()
2793 loff_t pos = iocb->ki_pos; in kiocb_write_and_wait() local
3127 if (start < pos) { in mapping_seek_hole_data()
3130 start = pos; in mapping_seek_hole_data()
3134 pos = round_up((u64)pos + 1, seek_size); in mapping_seek_hole_data()
3137 if (start < pos) in mapping_seek_hole_data()
4133 pos += written; in generic_file_direct_write()
4139 iocb->ki_pos = pos; in generic_file_direct_write()
4150 loff_t pos = iocb->ki_pos; in generic_perform_write() local
[all …]
A Dtruncate.c193 loff_t pos = folio_pos(folio); in truncate_inode_partial_folio() local
198 if (pos < start) in truncate_inode_partial_folio()
199 offset = start - pos; in truncate_inode_partial_folio()
202 if (pos + size <= (u64)end) in truncate_inode_partial_folio()
205 length = end + 1 - pos - offset; in truncate_inode_partial_folio()
A Dvmstat.c1504 static void *frag_start(struct seq_file *m, loff_t *pos) in frag_start() argument
1507 loff_t node = *pos; in frag_start()
1517 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) in frag_next() argument
1521 (*pos)++; in frag_next()
1882 static void *vmstat_start(struct seq_file *m, loff_t *pos) in vmstat_start() argument
1887 if (*pos >= NR_VMSTAT_ITEMS) in vmstat_start()
1924 return (unsigned long *)m->private + *pos; in vmstat_start()
1927 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) in vmstat_next() argument
1929 (*pos)++; in vmstat_next()
1930 if (*pos >= NR_VMSTAT_ITEMS) in vmstat_next()
[all …]
A Dslab_common.c1086 static void *slab_start(struct seq_file *m, loff_t *pos) in slab_start() argument
1089 return seq_list_start(&slab_caches, *pos); in slab_start()
1092 static void *slab_next(struct seq_file *m, void *p, loff_t *pos) in slab_next() argument
1094 return seq_list_next(p, &slab_caches, pos); in slab_next()
1482 struct llist_node *page_list, *pos, *n; in drain_page_cache() local
1493 llist_for_each_safe(pos, n, page_list) { in drain_page_cache()
1494 free_page((unsigned long)pos); in drain_page_cache()
A Dslub.c2557 *pos += 1; in next_freelist_entry()
2559 *pos = 0; in next_freelist_entry()
6634 long start, end, pos; in add_location() local
6654 if (pos == end) in add_location()
6657 l = &t->loc[pos]; in add_location()
6685 end = pos; in add_location()
6687 end = pos; in add_location()
6690 end = pos; in add_location()
6692 start = pos; in add_location()
6701 l = t->loc + pos; in add_location()
[all …]
A Dshmem.c750 LIST_HEAD(list), *pos, *next; in shmem_unused_huge_shrink()
761 list_for_each_safe(pos, next, &sbinfo->shrinklist) { in shmem_unused_huge_shrink()
762 info = list_entry(pos, struct shmem_inode_info, shrinklist); in shmem_unused_huge_shrink()
781 list_for_each_safe(pos, next, &list) { in shmem_unused_huge_shrink()
786 info = list_entry(pos, struct shmem_inode_info, shrinklist); in shmem_unused_huge_shrink()
3335 loff_t pos, unsigned len, in shmem_write_begin() argument
3340 pgoff_t index = pos >> PAGE_SHIFT; in shmem_write_begin()
3369 loff_t pos, unsigned len, unsigned copied, in shmem_write_end() argument
3374 if (pos + copied > inode->i_size) in shmem_write_end()
3375 i_size_write(inode, pos + copied); in shmem_write_end()
[all …]
A Dkmemleak.c1934 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) in kmemleak_seq_start() argument
1937 loff_t n = *pos; in kmemleak_seq_start()
1960 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) in kmemleak_seq_next() argument
1966 ++(*pos); in kmemleak_seq_next()
A Dvmscan.c3188 struct ctrl_pos *pos) in read_ctrl_pos() argument
3194 pos->gain = gain; in read_ctrl_pos()
3195 pos->refaulted = pos->total = 0; in read_ctrl_pos()
3198 pos->refaulted += lrugen->avg_refaulted[type][i] + in read_ctrl_pos()
3200 pos->total += lrugen->avg_total[type][i] + in read_ctrl_pos()
4982 struct hlist_nulls_node *pos; in shrink_many() local
5030 if (!is_a_nulls(pos)) in shrink_many()
5034 if (gen != get_nulls_value(pos)) in shrink_many()
5333 loff_t nr_to_skip = *pos; in lru_gen_seq_start()
5366 ++*pos; in lru_gen_seq_next()
[all …]
A Dshrinker_debug.c106 size_t size, loff_t *pos) in shrinker_debugfs_scan_write() argument
A Dmemcontrol.c1007 struct mem_cgroup *pos; in mem_cgroup_iter() local
1036 pos = READ_ONCE(iter->position); in mem_cgroup_iter()
1038 pos = prev; in mem_cgroup_iter()
1040 css = pos ? &pos->css : NULL; in mem_cgroup_iter()
1060 if (cmpxchg(&iter->position, pos, next) != pos) { in mem_cgroup_iter()
A Dmm_init.c2705 #define adj_init_size(start, end, size, pos, adj) \ in mem_init_print_info() argument
2707 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \ in mem_init_print_info()
A Dswapfile.c2850 static void *swap_start(struct seq_file *swap, loff_t *pos) in swap_start() argument
2854 loff_t l = *pos; in swap_start()
2871 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) in swap_next() argument
2881 ++(*pos); in swap_next()
A Dpercpu.c2252 struct pcpu_chunk *pos; in free_percpu() local
2254 list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list) in free_percpu()
2255 if (pos != chunk) { in free_percpu()
A Dpage_alloc.c6124 void *pos; in free_reserved_area() local
6129 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { in free_reserved_area()
6130 struct page *page = virt_to_page(pos); in free_reserved_area()
/mm/kfence/
A Dcore.c736 static void *start_object(struct seq_file *seq, loff_t *pos) in start_object() argument
738 if (*pos < CONFIG_KFENCE_NUM_OBJECTS) in start_object()
739 return (void *)((long)*pos + 1); in start_object()
747 static void *next_object(struct seq_file *seq, void *v, loff_t *pos) in next_object() argument
749 ++*pos; in next_object()
750 if (*pos < CONFIG_KFENCE_NUM_OBJECTS) in next_object()
751 return (void *)((long)*pos + 1); in next_object()

Completed in 122 milliseconds