| /linux/include/linux/ |
| A D | mmap_lock.h | 12 .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock), 65 rwsem_assert_held(&mm->mmap_lock); in mmap_assert_locked() 70 rwsem_assert_held_write(&mm->mmap_lock); in mmap_assert_write_locked() 100 init_rwsem(&mm->mmap_lock); in mmap_init_lock() 106 down_write(&mm->mmap_lock); in mmap_write_lock() 131 up_write(&mm->mmap_lock); in mmap_write_unlock() 138 downgrade_write(&mm->mmap_lock); in mmap_write_downgrade() 144 down_read(&mm->mmap_lock); in mmap_read_lock() 153 ret = down_read_killable(&mm->mmap_lock); in mmap_read_lock_killable() 171 up_read(&mm->mmap_lock); in mmap_read_unlock() [all …]
|
| A D | mm_types.h | 876 struct rw_semaphore mmap_lock; member
|
| /linux/include/trace/events/ |
| A D | mmap_lock.h | 3 #define TRACE_SYSTEM mmap_lock 16 DECLARE_EVENT_CLASS(mmap_lock, 43 DEFINE_EVENT_FN(mmap_lock, name, \
|
| /linux/drivers/media/common/videobuf2/ |
| A D | videobuf2-core.c | 900 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 933 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 1005 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 1030 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 1066 mutex_lock(&q->mmap_lock); in vb2_core_create_bufs() 1130 mutex_lock(&q->mmap_lock); in vb2_core_create_bufs() 1740 mutex_lock(&q->mmap_lock); in vb2_core_remove_bufs() 2519 mutex_lock(&q->mmap_lock); in vb2_mmap() 2574 mutex_lock(&q->mmap_lock); in vb2_get_unmapped_area() 2652 mutex_init(&q->mmap_lock); in vb2_core_queue_init() [all …]
|
| /linux/kernel/bpf/ |
| A D | mmap_unlock_work.h | 60 rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_); in bpf_mmap_unlock_mm()
|
| /linux/mm/ |
| A D | init-mm.c | 33 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, init_mm.mmap_lock),
|
| A D | Makefile | 58 debug.o gup.o mmap_lock.o $(mmu-y)
|
| A D | mmu_notifier.c | 980 might_lock(&mm->mmap_lock); in mmu_interval_notifier_insert()
|
| A D | vma.c | 1885 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); in vm_lock_anon_vma() 1915 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); in vm_lock_mapping()
|
| /linux/tools/perf/util/bpf_skel/ |
| A D | lock_contention.bpf.c | 116 struct rw_semaphore mmap_lock; member 297 if (bpf_core_field_exists(mm_new->mmap_lock)) { in check_lock_type() 298 if (&mm_new->mmap_lock == (void *)lock) in check_lock_type()
|
| /linux/drivers/infiniband/hw/cxgb4/ |
| A D | iw_cxgb4.h | 525 spinlock_t mmap_lock; member 558 spin_lock(&ucontext->mmap_lock); in remove_mmap() 564 spin_unlock(&ucontext->mmap_lock); in remove_mmap() 570 spin_unlock(&ucontext->mmap_lock); in remove_mmap() 603 spin_lock(&ucontext->mmap_lock); in insert_mmap() 607 spin_unlock(&ucontext->mmap_lock); in insert_mmap()
|
| A D | provider.c | 89 spin_lock_init(&context->mmap_lock); in c4iw_alloc_ucontext() 103 spin_lock(&context->mmap_lock); in c4iw_alloc_ucontext() 106 spin_unlock(&context->mmap_lock); in c4iw_alloc_ucontext()
|
| A D | cq.c | 1110 spin_lock(&ucontext->mmap_lock); in c4iw_create_cq() 1120 spin_unlock(&ucontext->mmap_lock); in c4iw_create_cq()
|
| A D | qp.c | 2262 spin_lock(&ucontext->mmap_lock); in c4iw_create_qp() 2279 spin_unlock(&ucontext->mmap_lock); in c4iw_create_qp() 2773 spin_lock(&ucontext->mmap_lock); in c4iw_create_srq() 2778 spin_unlock(&ucontext->mmap_lock); in c4iw_create_srq()
|
| /linux/tools/perf/util/bpf_skel/vmlinux/ |
| A D | vmlinux.h | 97 struct rw_semaphore mmap_lock; member
|
| /linux/Documentation/mm/ |
| A D | transhuge.rst | 94 mmap_lock in read (or write) mode to be sure a huge pmd cannot be 96 takes the mmap_lock in write mode in addition to the anon_vma lock). If
|
| /linux/Documentation/admin-guide/mm/ |
| A D | numa_memory_policy.rst | 381 task's mm's mmap_lock for read during the query. The set_mempolicy() and 382 mbind() APIs [see below] always acquire the mmap_lock for write when 388 we hold them mmap_lock for read. Again, because replacing the task or vma 389 policy requires that the mmap_lock be held for write, the policy can't be 393 shared memory policy while another task, with a distinct mmap_lock, is
|
| A D | multigen_lru.rst | 41 theoretically worsen lock contention (mmap_lock). If it is
|
| /linux/include/media/ |
| A D | videobuf2-core.h | 646 struct mutex mmap_lock; member
|
| /linux/arch/x86/kernel/ |
| A D | tboot.c | 98 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, tboot_mm.mmap_lock),
|
| /linux/drivers/gpu/drm/etnaviv/ |
| A D | etnaviv_gem.c | 648 might_lock_read(¤t->mm->mmap_lock); in etnaviv_gem_userptr_get_pages()
|
| /linux/Documentation/kernel-hacking/ |
| A D | false-sharing.rst | 48 false sharing. One of these is a rw_semaphore 'mmap_lock' inside
|
| /linux/arch/x86/mm/ |
| A D | fault.c | 1500 prefetchw(¤t->mm->mmap_lock); in DEFINE_IDTENTRY_RAW_ERRORCODE()
|
| /linux/drivers/firmware/efi/ |
| A D | efi.c | 68 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
|
| /linux/Documentation/filesystems/ |
| A D | locking.rst | 653 ops mmap_lock PageLocked(page) 678 The mmap_lock may not be held when this method is called.
|