| /kernel/events/ |
| A D | uprobes.c | 142 return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC; in valid_vma() 147 return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT); in offset_to_vaddr() 152 return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start); in vaddr_to_offset() 284 vma->vm_file && in valid_ref_ctr_vma() 1294 if (!vma || !valid_vma(vma, is_register) || in register_for_each_vma() 1477 uprobe->offset >= offset + vma->vm_end - vma->vm_start) in unapply_uprobe() 1609 build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); in uprobe_mmap() 1715 if (IS_ERR(vma)) { in xol_add_vma() 2369 if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) in mmf_recalc_uprobes() 2418 if (!vma) in find_active_uprobe_speculative() [all …]
|
| A D | core.c | 6710 if (vma->vm_pgoff) in perf_mmap_open() 6959 vma_size = vma->vm_end - vma->vm_start; in perf_mmap() 9283 struct vm_area_struct *vma = mmap_event->vma; in perf_event_mmap_match() local 9359 struct vm_area_struct *vma = mmap_event->vma; in perf_event_mmap_event() local 9414 if (vma->vm_ops && vma->vm_ops->name) in perf_event_mmap_event() 9415 name = (char *) vma->vm_ops->name(vma); in perf_event_mmap_event() 9492 unsigned long vma_size = vma->vm_end - vma->vm_start; in perf_addr_filter_vma_adjust() 9521 if (!vma->vm_file) in __perf_addr_filters_adjust() 9570 .vma = vma, in perf_event_mmap() 9582 .len = vma->vm_end - vma->vm_start, in perf_event_mmap() [all …]
|
| /kernel/bpf/ |
| A D | sysfs_btf.c | 20 struct vm_area_struct *vma) in btf_sysfs_vmlinux_mmap() argument 23 size_t vm_size = vma->vm_end - vma->vm_start; in btf_sysfs_vmlinux_mmap() 30 if (vma->vm_pgoff) in btf_sysfs_vmlinux_mmap() 33 if (vma->vm_flags & (VM_WRITE | VM_EXEC | VM_MAYSHARE)) in btf_sysfs_vmlinux_mmap() 42 vm_flags_mod(vma, VM_DONTDUMP, VM_MAYEXEC | VM_MAYWRITE); in btf_sysfs_vmlinux_mmap() 43 return remap_pfn_range(vma, vma->vm_start, pfn, vm_size, vma->vm_page_prot); in btf_sysfs_vmlinux_mmap()
|
| A D | arena.c | 223 struct vm_area_struct *vma; member 236 vma->vm_private_data = vml; in remember_vma() 237 vml->vma = vma; in remember_vma() 260 vma->vm_private_data = NULL; in arena_vm_close() 369 if (WARN_ON_ONCE(vma->vm_end - vma->vm_start > SZ_4G || vma->vm_pgoff)) in arena_map_mmap() 372 if (remember_vma(arena, vma)) in arena_map_mmap() 375 arena->user_vm_start = vma->vm_start; in arena_map_mmap() 376 arena->user_vm_end = vma->vm_end; in arena_map_mmap() 382 vm_flags_set(vma, VM_DONTEXPAND); in arena_map_mmap() 383 vma->vm_ops = &arena_vm_ops; in arena_map_mmap() [all …]
|
| A D | task_iter.c | 413 struct vm_area_struct *vma; member 442 curr_vma = info->vma; in task_vma_seq_get_next() 554 info->vma = curr_vma; in task_vma_seq_get_next() 572 info->vma = NULL; in task_vma_seq_get_next() 580 struct vm_area_struct *vma; in task_vma_seq_start() local 583 if (vma && *pos == 0) in task_vma_seq_start() 586 return vma; in task_vma_seq_start() 620 ctx.vma = info->vma; in DEFINE_BPF_ITER_FUNC() 755 struct vm_area_struct *vma; in BPF_CALL_5() local 775 vma = find_vma(mm, start); in BPF_CALL_5() [all …]
|
| A D | ringbuf.c | 262 static int ringbuf_map_mmap_kern(struct bpf_map *map, struct vm_area_struct *vma) in ringbuf_map_mmap_kern() argument 268 if (vma->vm_flags & VM_WRITE) { in ringbuf_map_mmap_kern() 270 if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE) in ringbuf_map_mmap_kern() 274 return remap_vmalloc_range(vma, rb_map->rb, in ringbuf_map_mmap_kern() 275 vma->vm_pgoff + RINGBUF_PGOFF); in ringbuf_map_mmap_kern() 278 static int ringbuf_map_mmap_user(struct bpf_map *map, struct vm_area_struct *vma) in ringbuf_map_mmap_user() argument 284 if (vma->vm_flags & VM_WRITE) { in ringbuf_map_mmap_user() 285 if (vma->vm_pgoff == 0) in ringbuf_map_mmap_user() 293 return remap_vmalloc_range(vma, rb_map->rb, vma->vm_pgoff + RINGBUF_PGOFF); in ringbuf_map_mmap_user()
|
| A D | stackmap.c | 127 static int fetch_build_id(struct vm_area_struct *vma, unsigned char *build_id, bool may_fault) in fetch_build_id() argument 129 return may_fault ? build_id_parse(vma, build_id, NULL) in fetch_build_id() 130 : build_id_parse_nofault(vma, build_id, NULL); in fetch_build_id() 149 struct vm_area_struct *vma, *prev_vma = NULL; in stack_map_get_build_id_offset() local 170 vma = prev_vma; in stack_map_get_build_id_offset() 174 vma = find_vma(current->mm, ip); in stack_map_get_build_id_offset() 175 if (!vma || fetch_build_id(vma, id_offs[i].build_id, may_fault)) { in stack_map_get_build_id_offset() 182 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ip - vma->vm_start; in stack_map_get_build_id_offset() 184 prev_vma = vma; in stack_map_get_build_id_offset()
|
| A D | arraymap.c | 555 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) in array_map_mmap() argument 563 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > in array_map_mmap() 567 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), in array_map_mmap() 568 vma->vm_pgoff + pgoff); in array_map_mmap()
|
| A D | syscall.c | 1041 if (vma->vm_flags & VM_MAYWRITE) in bpf_map_mmap_open() 1050 if (vma->vm_flags & VM_MAYWRITE) in bpf_map_mmap_close() 1067 if (!(vma->vm_flags & VM_SHARED)) in bpf_map_mmap() 1072 if (vma->vm_flags & VM_WRITE) { in bpf_map_mmap() 1094 vma->vm_ops = &bpf_map_default_vmops; in bpf_map_mmap() 1095 vma->vm_private_data = map; in bpf_map_mmap() 1096 vm_flags_clear(vma, VM_MAYEXEC); in bpf_map_mmap() 1104 if (!(vma->vm_flags & VM_WRITE)) in bpf_map_mmap() 1105 vm_flags_clear(vma, VM_MAYWRITE); in bpf_map_mmap() 1107 err = map->ops->map_mmap(map, vma); in bpf_map_mmap() [all …]
|
| /kernel/dma/ |
| A D | ops_helpers.c | 35 int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, in dma_common_mmap() argument 40 unsigned long user_count = vma_pages(vma); in dma_common_mmap() 42 unsigned long off = vma->vm_pgoff; in dma_common_mmap() 46 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in dma_common_mmap() 48 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_common_mmap() 54 return remap_pfn_range(vma, vma->vm_start, in dma_common_mmap() 55 page_to_pfn(page) + vma->vm_pgoff, in dma_common_mmap() 56 user_count << PAGE_SHIFT, vma->vm_page_prot); in dma_common_mmap()
|
| A D | coherent.c | 235 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) in __dma_mmap_from_coherent() argument 239 unsigned long off = vma->vm_pgoff; in __dma_mmap_from_coherent() 241 unsigned long user_count = vma_pages(vma); in __dma_mmap_from_coherent() 247 *ret = remap_pfn_range(vma, vma->vm_start, pfn, in __dma_mmap_from_coherent() 249 vma->vm_page_prot); in __dma_mmap_from_coherent() 271 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, in dma_mmap_from_dev_coherent() argument 276 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); in dma_mmap_from_dev_coherent() 301 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, in dma_mmap_from_global_coherent() argument 307 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, in dma_mmap_from_global_coherent()
|
| A D | direct.c | 540 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, in dma_direct_mmap() argument 544 unsigned long user_count = vma_pages(vma); in dma_direct_mmap() 549 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in dma_direct_mmap() 551 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); in dma_direct_mmap() 553 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_direct_mmap() 555 if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) in dma_direct_mmap() 558 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) in dma_direct_mmap() 560 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, in dma_direct_mmap() 561 user_count << PAGE_SHIFT, vma->vm_page_prot); in dma_direct_mmap()
|
| A D | mapping.c | 567 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, in dma_mmap_attrs() argument 574 return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, in dma_mmap_attrs() 577 return iommu_dma_mmap(dev, vma, cpu_addr, dma_addr, size, in dma_mmap_attrs() 581 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in dma_mmap_attrs() 746 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, in dma_mmap_pages() argument 751 if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) in dma_mmap_pages() 753 return remap_pfn_range(vma, vma->vm_start, in dma_mmap_pages() 754 page_to_pfn(page) + vma->vm_pgoff, in dma_mmap_pages() 755 vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot); in dma_mmap_pages() 853 return iommu_dma_mmap_noncontiguous(dev, vma, size, sgt); in dma_mmap_noncontiguous() [all …]
|
| A D | dummy.c | 7 static int dma_dummy_mmap(struct device *dev, struct vm_area_struct *vma, in dma_dummy_mmap() argument
|
| A D | direct.h | 17 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
|
| /kernel/ |
| A D | relay.c | 36 struct rchan_buf *buf = vmf->vma->vm_private_data; in relay_buf_fault() 83 static int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma) in relay_mmap_buf() argument 85 unsigned long length = vma->vm_end - vma->vm_start; in relay_mmap_buf() 93 vma->vm_ops = &relay_file_mmap_ops; in relay_mmap_buf() 94 vm_flags_set(vma, VM_DONTEXPAND); in relay_mmap_buf() 95 vma->vm_private_data = buf; in relay_mmap_buf() 758 static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma) in relay_file_mmap() argument 761 return relay_mmap_buf(buf, vma); in relay_file_mmap()
|
| A D | kcov.c | 487 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma) in kcov_mmap() argument 490 struct kcov *kcov = vma->vm_file->private_data; in kcov_mmap() 497 if (kcov->area == NULL || vma->vm_pgoff != 0 || in kcov_mmap() 498 vma->vm_end - vma->vm_start != size) { in kcov_mmap() 503 vm_flags_set(vma, VM_DONTEXPAND); in kcov_mmap() 506 res = vm_insert_page(vma, vma->vm_start + off, page); in kcov_mmap()
|
| A D | acct.c | 595 struct vm_area_struct *vma; in acct_collect() local 598 for_each_vma(vmi, vma) in acct_collect() 599 vsize += vma->vm_end - vma->vm_start; in acct_collect()
|
| A D | fork.c | 1227 struct vm_area_struct *vma; in replace_mm_exe_file() local 1236 for_each_vma(vmi, vma) { in replace_mm_exe_file() 1237 if (!vma->vm_file) in replace_mm_exe_file() 1239 if (path_equal(&vma->vm_file->f_path, in replace_mm_exe_file()
|
| A D | sys.c | 2189 struct vm_area_struct *vma; in prctl_set_mm() local 2222 vma = find_vma(mm, addr); in prctl_set_mm() 2292 if (!vma) { in prctl_set_mm()
|
| /kernel/sched/ |
| A D | fair.c | 3257 pids = vma->numab_state->pids_active[0] | vma->numab_state->pids_active[1]; in vma_is_accessed() 3372 vma = vma_next(&vmi); in task_numa_work() 3373 if (!vma) { in task_numa_work() 3377 vma = vma_next(&vmi); in task_numa_work() 3380 for (; vma; vma = vma_next(&vmi)) { in task_numa_work() 3381 if (!vma_migratable(vma) || !vma_policy_mof(vma) || in task_numa_work() 3382 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { in task_numa_work() 3393 if (!vma->vm_mm || in task_numa_work() 3394 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) { in task_numa_work() 3427 vma->numab_state->pids_active_reset = vma->numab_state->next_scan + in task_numa_work() [all …]
|
| /kernel/time/ |
| A D | namespace.c | 195 struct page *find_timens_vvar_page(struct vm_area_struct *vma) in find_timens_vvar_page() argument 197 if (likely(vma->vm_mm == current->mm)) in find_timens_vvar_page()
|
| /kernel/trace/ |
| A D | ring_buffer.c | 7118 struct vm_area_struct *vma) in __rb_map_vma() argument 7127 if (vma->vm_flags & VM_WRITE || vma->vm_flags & VM_EXEC || in __rb_map_vma() 7128 !(vma->vm_flags & VM_MAYSHARE)) in __rb_map_vma() 7141 vm_flags_mod(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP, in __rb_map_vma() 7153 nr_vma_pages = vma_pages(vma); in __rb_map_vma() 7175 vma->vm_start + (PAGE_SIZE * p); in __rb_map_vma() 7204 err = vm_insert_pages(vma, vma->vm_start, pages, &nr_pages); in __rb_map_vma() 7210 struct vm_area_struct *vma) in __rb_map_vma() argument 7217 struct vm_area_struct *vma) in ring_buffer_map() argument 7231 err = __rb_map_vma(cpu_buffer, vma); in ring_buffer_map() [all …]
|
| A D | trace_output.c | 407 const struct vm_area_struct *vma; in seq_print_user_ip() local 410 vma = find_vma(mm, ip); in seq_print_user_ip() 411 if (vma) { in seq_print_user_ip() 412 file = vma->vm_file; in seq_print_user_ip() 413 vmstart = vma->vm_start; in seq_print_user_ip()
|
| /kernel/futex/ |
| A D | core.c | 338 struct vm_area_struct *vma = vma_lookup(mm, addr); in __futex_key_to_node() local 342 if (!vma) in __futex_key_to_node() 345 mpol = vma_policy(vma); in __futex_key_to_node()
|