Lines Matching refs:flags
126 static unsigned int calculate_alignment(slab_flags_t flags, in calculate_alignment() argument
136 if (flags & SLAB_HWCACHE_ALIGN) { in calculate_alignment()
155 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) in slab_unmergeable()
176 slab_flags_t flags, const char *name, void (*ctor)(void *)) in find_mergeable() argument
186 flags = kmem_cache_flags(flags, name); in find_mergeable()
188 if (flags & SLAB_NEVER_MERGE) in find_mergeable()
192 align = calculate_alignment(flags, align, size); in find_mergeable()
202 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME)) in find_mergeable()
222 slab_flags_t flags) in create_cache() argument
231 !(flags & SLAB_TYPESAFE_BY_RCU) || in create_cache()
239 err = do_kmem_cache_create(s, name, object_size, args, flags); in create_cache()
283 slab_flags_t flags) in __kmem_cache_create_args() argument
297 if (flags & SLAB_DEBUG_FLAGS) in __kmem_cache_create_args()
299 if (flags & SLAB_STORE_USER) in __kmem_cache_create_args()
302 flags &= ~SLAB_DEBUG_FLAGS; in __kmem_cache_create_args()
312 if (flags & ~SLAB_FLAGS_PERMITTED) { in __kmem_cache_create_args()
325 s = __kmem_cache_alias(name, object_size, args->align, flags, in __kmem_cache_create_args()
336 args->align = calculate_alignment(flags, args->align, object_size); in __kmem_cache_create_args()
337 s = create_cache(cache_name, object_size, args, flags); in __kmem_cache_create_args()
347 if (flags & SLAB_PANIC) in __kmem_cache_create_args()
382 kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags, in kmem_buckets_create() argument
408 flags |= SLAB_NO_MERGE; in kmem_buckets_create()
440 0, flags, cache_useroffset, in kmem_buckets_create()
495 (s->flags & SLAB_TYPESAFE_BY_RCU)) { in kmem_cache_destroy()
540 if (s->flags & SLAB_TYPESAFE_BY_RCU) in kmem_cache_destroy()
650 unsigned int size, slab_flags_t flags, in create_boot_cache() argument
662 if (flags & SLAB_KMALLOC) in create_boot_cache()
664 kmem_args.align = calculate_alignment(flags, align, size); in create_boot_cache()
671 err = do_kmem_cache_create(s, name, size, &kmem_args, flags); in create_boot_cache()
682 slab_flags_t flags) in create_kmalloc_cache() argument
689 create_boot_cache(s, name, size, flags | SLAB_KMALLOC, 0, size); in create_kmalloc_cache()
901 slab_flags_t flags = 0; in new_kmalloc_cache() local
907 flags |= SLAB_RECLAIM_ACCOUNT; in new_kmalloc_cache()
913 flags |= SLAB_ACCOUNT; in new_kmalloc_cache()
915 flags |= SLAB_CACHE_DMA; in new_kmalloc_cache()
920 flags |= SLAB_NO_MERGE; in new_kmalloc_cache()
928 flags |= SLAB_NO_MERGE; in new_kmalloc_cache()
938 aligned_size, flags); in new_kmalloc_cache()
1016 gfp_t kmalloc_fix_flags(gfp_t flags) in kmalloc_fix_flags() argument
1018 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; in kmalloc_fix_flags()
1020 flags &= ~GFP_SLAB_BUG_MASK; in kmalloc_fix_flags()
1022 invalid_mask, &invalid_mask, flags, &flags); in kmalloc_fix_flags()
1025 return flags; in kmalloc_fix_flags()
1151 if (s->flags & SLAB_RECLAIM_ACCOUNT) in dump_unreclaimable_slab()
1437 krc_this_cpu_lock(unsigned long *flags) in krc_this_cpu_lock() argument
1441 local_irq_save(*flags); // For safely calling this_cpu_ptr(). in krc_this_cpu_lock()
1449 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags) in krc_this_cpu_unlock() argument
1451 raw_spin_unlock_irqrestore(&krcp->lock, flags); in krc_this_cpu_unlock()
1481 unsigned long flags; in drain_page_cache() local
1488 raw_spin_lock_irqsave(&krcp->lock, flags); in drain_page_cache()
1491 raw_spin_unlock_irqrestore(&krcp->lock, flags); in drain_page_cache()
1505 unsigned long flags; in kvfree_rcu_bulk() local
1528 raw_spin_lock_irqsave(&krcp->lock, flags); in kvfree_rcu_bulk()
1531 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kvfree_rcu_bulk()
1566 unsigned long flags; in kfree_rcu_work() local
1579 raw_spin_lock_irqsave(&krcp->lock, flags); in kfree_rcu_work()
1588 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_work()
1661 unsigned long flags; in schedule_delayed_monitor_work() local
1663 raw_spin_lock_irqsave(&krcp->lock, flags); in schedule_delayed_monitor_work()
1665 raw_spin_unlock_irqrestore(&krcp->lock, flags); in schedule_delayed_monitor_work()
1674 unsigned long flags; in kvfree_rcu_drain_ready() local
1677 raw_spin_lock_irqsave(&krcp->lock, flags); in kvfree_rcu_drain_ready()
1695 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kvfree_rcu_drain_ready()
1712 unsigned long flags; in kvfree_rcu_queue_batch() local
1716 raw_spin_lock_irqsave(&krcp->lock, flags); in kvfree_rcu_queue_batch()
1759 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kvfree_rcu_queue_batch()
1792 unsigned long flags; in fill_page_cache_func() local
1807 raw_spin_lock_irqsave(&krcp->lock, flags); in fill_page_cache_func()
1809 raw_spin_unlock_irqrestore(&krcp->lock, flags); in fill_page_cache_func()
1829 unsigned long *flags, void *ptr, bool can_alloc) in add_ptr_to_bulk_krc_lock() argument
1834 *krcp = krc_this_cpu_lock(flags); in add_ptr_to_bulk_krc_lock()
1846 krc_this_cpu_unlock(*krcp, *flags); in add_ptr_to_bulk_krc_lock()
1861 raw_spin_lock_irqsave(&(*krcp)->lock, *flags); in add_ptr_to_bulk_krc_lock()
1938 unsigned long flags; in kvfree_call_rcu() local
1963 success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head); in kvfree_call_rcu()
1994 krc_this_cpu_unlock(krcp, flags); in kvfree_call_rcu()