Lines Matching refs:vma
6702 static void perf_mmap_open(struct vm_area_struct *vma) in perf_mmap_open() argument
6704 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open()
6710 if (vma->vm_pgoff) in perf_mmap_open()
6714 mapped(event, vma->vm_mm); in perf_mmap_open()
6727 static void perf_mmap_close(struct vm_area_struct *vma) in perf_mmap_close() argument
6729 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close()
6739 unmapped(event, vma->vm_mm); in perf_mmap_close()
6745 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff && in perf_mmap_close()
6757 atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm); in perf_mmap_close()
6832 atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm); in perf_mmap_close()
6845 static int perf_mmap_may_split(struct vm_area_struct *vma, unsigned long addr) in perf_mmap_may_split() argument
6861 static int map_range(struct perf_buffer *rb, struct vm_area_struct *vma) in map_range() argument
6863 unsigned long nr_pages = vma_pages(vma); in map_range()
6906 unsigned long va = vma->vm_start + PAGE_SIZE * pagenum; in map_range()
6907 struct page *page = perf_mmap_to_page(rb, vma->vm_pgoff + pagenum); in map_range()
6915 err = remap_pfn_range(vma, va, page_to_pfn(page), PAGE_SIZE, in map_range()
6916 vm_get_page_prot(vma->vm_flags & ~VM_SHARED)); in map_range()
6924 zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE, NULL); in map_range()
6930 static int perf_mmap(struct file *file, struct vm_area_struct *vma) in perf_mmap() argument
6952 if (!(vma->vm_flags & VM_SHARED)) in perf_mmap()
6959 vma_size = vma->vm_end - vma->vm_start; in perf_mmap()
6983 if (vma->vm_pgoff == 0) { in perf_mmap()
7039 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT) in perf_mmap()
7043 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff) in perf_mmap()
7094 locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra; in perf_mmap()
7104 if (vma->vm_flags & VM_WRITE) in perf_mmap()
7128 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
7139 atomic64_add(extra, &vma->vm_mm->pinned_vm); in perf_mmap()
7158 vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP); in perf_mmap()
7159 vma->vm_ops = &perf_mmap_vmops; in perf_mmap()
7163 mapped(event, vma->vm_mm); in perf_mmap()
7171 ret = map_range(rb, vma); in perf_mmap()
7173 perf_mmap_close(vma); in perf_mmap()
9257 struct vm_area_struct *vma; member
9283 struct vm_area_struct *vma = mmap_event->vma; in perf_event_mmap_match() local
9284 int executable = vma->vm_flags & VM_EXEC; in perf_event_mmap_match()
9359 struct vm_area_struct *vma = mmap_event->vma; in perf_event_mmap_event() local
9360 struct file *file = vma->vm_file; in perf_event_mmap_event()
9369 if (vma->vm_flags & VM_READ) in perf_event_mmap_event()
9371 if (vma->vm_flags & VM_WRITE) in perf_event_mmap_event()
9373 if (vma->vm_flags & VM_EXEC) in perf_event_mmap_event()
9376 if (vma->vm_flags & VM_MAYSHARE) in perf_event_mmap_event()
9381 if (vma->vm_flags & VM_LOCKED) in perf_event_mmap_event()
9383 if (is_vm_hugetlb_page(vma)) in perf_event_mmap_event()
9405 inode = file_inode(vma->vm_file); in perf_event_mmap_event()
9414 if (vma->vm_ops && vma->vm_ops->name) in perf_event_mmap_event()
9415 name = (char *) vma->vm_ops->name(vma); in perf_event_mmap_event()
9417 name = (char *)arch_vma_name(vma); in perf_event_mmap_event()
9419 if (vma_is_initial_heap(vma)) in perf_event_mmap_event()
9421 else if (vma_is_initial_stack(vma)) in perf_event_mmap_event()
9450 if (!(vma->vm_flags & VM_EXEC)) in perf_event_mmap_event()
9456 build_id_parse_nofault(vma, mmap_event->build_id, &mmap_event->build_id_size); in perf_event_mmap_event()
9489 struct vm_area_struct *vma, in perf_addr_filter_vma_adjust() argument
9492 unsigned long vma_size = vma->vm_end - vma->vm_start; in perf_addr_filter_vma_adjust()
9493 unsigned long off = vma->vm_pgoff << PAGE_SHIFT; in perf_addr_filter_vma_adjust()
9494 struct file *file = vma->vm_file; in perf_addr_filter_vma_adjust()
9500 fr->start = vma->vm_start; in perf_addr_filter_vma_adjust()
9503 fr->start = vma->vm_start + filter->offset - off; in perf_addr_filter_vma_adjust()
9504 fr->size = min(vma->vm_end - fr->start, filter->size); in perf_addr_filter_vma_adjust()
9513 struct vm_area_struct *vma = data; in __perf_addr_filters_adjust() local
9521 if (!vma->vm_file) in __perf_addr_filters_adjust()
9526 if (perf_addr_filter_vma_adjust(filter, vma, in __perf_addr_filters_adjust()
9544 static void perf_addr_filters_adjust(struct vm_area_struct *vma) in perf_addr_filters_adjust() argument
9552 if (!(vma->vm_flags & VM_EXEC)) in perf_addr_filters_adjust()
9558 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true); in perf_addr_filters_adjust()
9562 void perf_event_mmap(struct vm_area_struct *vma) in perf_event_mmap() argument
9570 .vma = vma, in perf_event_mmap()
9581 .start = vma->vm_start, in perf_event_mmap()
9582 .len = vma->vm_end - vma->vm_start, in perf_event_mmap()
9583 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT, in perf_event_mmap()
9593 perf_addr_filters_adjust(vma); in perf_event_mmap()
11383 struct vm_area_struct *vma; in perf_addr_filter_apply() local
11386 for_each_vma(vmi, vma) { in perf_addr_filter_apply()
11387 if (!vma->vm_file) in perf_addr_filter_apply()
11390 if (perf_addr_filter_vma_adjust(filter, vma, fr)) in perf_addr_filter_apply()