| /mm/ |
| A D | Kconfig.debug | 35 By default this option will have a small overhead, e.g. by not 77 # without KASAN, so mark it as a dependency of KASAN for now. 93 KASAN builds, in case you want to test for such a bug. 100 This is designed as a debugging feature, not a security feature. 153 have a potential performance impact if enabled with the 193 Generate a warning if any W+X mappings are found at boot. 198 Look for a message in dmesg output like this: 207 still fine, as W+X mappings are not a security hole in 212 once the kernel has booted up - it's a one time check. 232 It is probably not a good idea to enable this feature in a production [all …]
|
| A D | Kconfig | 137 making a right choice. 273 out which slabs are relevant to a particular load. 285 Typically one would choose no for a realtime system. 313 6.2a specification for an example of how a platform advertises 703 on a platform. 805 if there are a lot of transient processes. 858 memory footprint of applications without a guaranteed 867 memory footprint of applications without a guaranteed 907 a large allocation instead of the per-page mapcount. 913 # simple helper to make the code a bit easier to read [all …]
|
| A D | nommu.c | 1289 struct mmap_arg_struct a; in SYSCALL_DEFINE1() local 1291 if (copy_from_user(&a, arg, sizeof(a))) in SYSCALL_DEFINE1() 1293 if (offset_in_page(a.offset)) in SYSCALL_DEFINE1() 1296 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, in SYSCALL_DEFINE1() 1297 a.offset >> PAGE_SHIFT); in SYSCALL_DEFINE1()
|
| A D | mmap.c | 630 struct mmap_arg_struct a; in SYSCALL_DEFINE1() local 632 if (copy_from_user(&a, arg, sizeof(a))) in SYSCALL_DEFINE1() 634 if (offset_in_page(a.offset)) in SYSCALL_DEFINE1() 637 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, in SYSCALL_DEFINE1() 638 a.offset >> PAGE_SHIFT); in SYSCALL_DEFINE1()
|
| A D | page_poison.c | 41 static bool single_bit_flip(unsigned char a, unsigned char b) in single_bit_flip() argument 43 unsigned char error = a ^ b; in single_bit_flip()
|
| A D | percpu-stats.c | 24 static int cmpint(const void *a, const void *b) in cmpint() argument 26 return *(int *)a - *(int *)b; in cmpint()
|
| A D | debug.c | 27 #define EM(a, b) b, argument 28 #define EMe(a, b) b argument
|
| A D | vma.c | 1915 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b) in anon_vma_compatible() argument 1917 return a->vm_end == b->vm_start && in anon_vma_compatible() 1918 mpol_equal(vma_policy(a), vma_policy(b)) && in anon_vma_compatible() 1919 a->vm_file == b->vm_file && in anon_vma_compatible() 1920 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && in anon_vma_compatible() 1921 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); in anon_vma_compatible() 1947 struct vm_area_struct *a, in reusable_anon_vma() argument 1950 if (anon_vma_compatible(a, b)) { in reusable_anon_vma()
|
| A D | mempolicy.c | 2774 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) in __mpol_equal() argument 2776 if (!a || !b) in __mpol_equal() 2778 if (a->mode != b->mode) in __mpol_equal() 2780 if (a->flags != b->flags) in __mpol_equal() 2782 if (a->home_node != b->home_node) in __mpol_equal() 2784 if (mpol_store_user_nodemask(a)) in __mpol_equal() 2785 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask)) in __mpol_equal() 2788 switch (a->mode) { in __mpol_equal() 2794 return !!nodes_equal(a->nodes, b->nodes); in __mpol_equal()
|
| A D | numa_memblks.c | 484 static int __init cmp_memblk(const void *a, const void *b) in cmp_memblk() argument 486 const struct numa_memblk *ma = *(const struct numa_memblk **)a; in cmp_memblk()
|
| A D | slab_common.c | 779 #define __KMALLOC_RANDOM_CONCAT(a, b) a ## b argument
|
| A D | memcontrol-v1.c | 698 static int compare_thresholds(const void *a, const void *b) in compare_thresholds() argument 700 const struct mem_cgroup_threshold *_a = a; in compare_thresholds()
|
| A D | percpu.c | 615 static inline bool pcpu_region_overlap(int a, int b, int x, int y) in pcpu_region_overlap() argument 617 return (a < y) && (x < b); in pcpu_region_overlap()
|
| A D | slub.c | 7714 static int cmp_loc_by_count(const void *a, const void *b, const void *data) in cmp_loc_by_count() argument 7716 struct location *loc1 = (struct location *)a; in cmp_loc_by_count()
|
| A D | shmem.c | 5424 static ssize_t casefold_show(struct kobject *kobj, struct kobj_attribute *a, in casefold_show() argument
|
| /mm/kmsan/ |
| A D | kmsan_test.c | 265 static int signed_sum3(int a, int b, int c) in signed_sum3() argument 267 return a + b + c; in signed_sum3() 278 volatile int a; in test_uninit_multiple_params() local 281 USE(signed_sum3(a, b, c)); in test_uninit_multiple_params() 624 volatile char a[4], b[4]; in test_unpoison_memory() local 631 a[0] = 0; in test_unpoison_memory() 632 kmsan_check_memory((char *)&a[1], 3); in test_unpoison_memory()
|
| /mm/damon/ |
| A D | Kconfig | 8 This builds a framework that allows kernel subsystems to monitor 82 that not accessed for a long time (cold) using DAMON and reclaim 85 This is suggested to be used as a proactive and lightweight
|
| A D | stat.c | 66 static int damon_stat_cmp_regions(const void *a, const void *b) in damon_stat_cmp_regions() argument 68 const struct damon_region *ra = *(const struct damon_region **)a; in damon_stat_cmp_regions()
|
| A D | core.c | 2680 struct damon_system_ram_region *a = arg; in walk_system_ram() local 2682 if (a->end - a->start < resource_size(res)) { in walk_system_ram() 2683 a->start = res->start; in walk_system_ram() 2684 a->end = res->end; in walk_system_ram()
|