| /mm/kmsan/ |
| A D | kmsan_test.c | 265 static int signed_sum3(int a, int b, int c) in signed_sum3() argument 267 return a + b + c; in signed_sum3() 277 volatile char b = 3, c; in test_uninit_multiple_params() local 281 USE(signed_sum3(a, b, c)); in test_uninit_multiple_params() 624 volatile char a[4], b[4]; in test_unpoison_memory() local 638 kmsan_unpoison_memory((char *)&b[0], 1); in test_unpoison_memory() 639 kmsan_check_memory((char *)&b[1], 3); in test_unpoison_memory()
|
| A D | core.c | 194 void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b, in kmsan_internal_set_shadow_origin() argument 215 __memset(shadow_start, b, size); in kmsan_internal_set_shadow_origin()
|
| A D | kmsan.h | 158 void kmsan_internal_set_shadow_origin(void *address, size_t size, int b,
|
| /mm/ |
| A D | debug.c | 27 #define EM(a, b) b, argument 28 #define EMe(a, b) b argument
|
| A D | slab_common.c | 389 kmem_buckets *b; in kmem_buckets_create() local 404 b = kmem_cache_alloc(kmem_buckets_cache, GFP_KERNEL|__GFP_ZERO); in kmem_buckets_create() 405 if (WARN_ON(!b)) in kmem_buckets_create() 435 if (!(*b)[aligned_idx]) { in kmem_buckets_create() 439 (*b)[aligned_idx] = kmem_cache_create_usercopy(cache_name, size, in kmem_buckets_create() 443 if (WARN_ON(!(*b)[aligned_idx])) in kmem_buckets_create() 448 (*b)[idx] = (*b)[aligned_idx]; in kmem_buckets_create() 451 return b; in kmem_buckets_create() 455 kmem_cache_destroy((*b)[idx]); in kmem_buckets_create() 456 kmem_cache_free(kmem_buckets_cache, b); in kmem_buckets_create() [all …]
|
| A D | page_poison.c | 41 static bool single_bit_flip(unsigned char a, unsigned char b) in single_bit_flip() argument 43 unsigned char error = a ^ b; in single_bit_flip()
|
| A D | slab.h | 389 kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller) in kmalloc_slab() argument 393 if (!b) in kmalloc_slab() 394 b = &kmalloc_caches[kmalloc_type(flags, caller)]; in kmalloc_slab() 400 return (*b)[index]; in kmalloc_slab()
|
| A D | percpu-stats.c | 24 static int cmpint(const void *a, const void *b) in cmpint() argument 26 return *(int *)a - *(int *)b; in cmpint()
|
| A D | vma.c | 1915 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) in anon_vma_compatible() argument 1917 return a->vm_end == b->vm_start && in anon_vma_compatible() 1918 mpol_equal(vma_policy(a), vma_policy(b)) && in anon_vma_compatible() 1919 a->vm_file == b->vm_file && in anon_vma_compatible() 1920 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && in anon_vma_compatible() 1921 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); in anon_vma_compatible() 1948 struct vm_area_struct *b) in reusable_anon_vma() argument 1950 if (anon_vma_compatible(a, b)) { in reusable_anon_vma()
|
| A D | numa_memblks.c | 484 static int __init cmp_memblk(const void *a, const void *b) in cmp_memblk() argument 487 const struct numa_memblk *mb = *(const struct numa_memblk **)b; in cmp_memblk()
|
| A D | mempolicy.c | 2774 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) in __mpol_equal() argument 2776 if (!a || !b) in __mpol_equal() 2778 if (a->mode != b->mode) in __mpol_equal() 2780 if (a->flags != b->flags) in __mpol_equal() 2782 if (a->home_node != b->home_node) in __mpol_equal() 2785 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) in __mpol_equal() 2794 return !!nodes_equal(a->nodes, b->nodes); in __mpol_equal()
|
| A D | slub.c | 4346 void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node, in __do_kmalloc_node() argument 4362 s = kmalloc_slab(size, b, flags, caller); in __do_kmalloc_node() 4369 void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) in __kmalloc_node_noprof() argument 4371 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_); in __kmalloc_node_noprof() 4381 void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, in __kmalloc_node_track_caller_noprof() argument 4384 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller); in __kmalloc_node_track_caller_noprof() 5044 void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) in __kvmalloc_node_noprof() argument 5052 ret = __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), in __kvmalloc_node_noprof() 7714 static int cmp_loc_by_count(const void *a, const void *b, const void *data) in cmp_loc_by_count() argument 7717 struct location *loc2 = (struct location *)b; in cmp_loc_by_count()
|
| A D | memcontrol-v1.c | 698 static int compare_thresholds(const void *a, const void *b) in compare_thresholds() argument 701 const struct mem_cgroup_threshold *_b = b; in compare_thresholds()
|
| A D | percpu.c | 615 static inline bool pcpu_region_overlap(int a, int b, int x, int y) in pcpu_region_overlap() argument 617 return (a < y) && (x < b); in pcpu_region_overlap()
|
| A D | shmem.c | 3887 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b); in shmem_statfs() 5146 super_set_uuid(sb, uuid.b, sizeof(uuid)); in shmem_fill_super()
|
| /mm/damon/ |
| A D | stat.c | 66 static int damon_stat_cmp_regions(const void *a, const void *b) in damon_stat_cmp_regions() argument 69 const struct damon_region *rb = *(const struct damon_region **)b; in damon_stat_cmp_regions()
|