1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/coredump.h>
4 #include <linux/elfcore.h>
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7
8 #include <asm/cpufeature.h>
9 #include <asm/mte.h>
10
11 #define for_each_mte_vma(cprm, i, m) \
12 if (system_supports_mte()) \
13 for (i = 0, m = cprm->vma_meta; \
14 i < cprm->vma_count; \
15 i++, m = cprm->vma_meta + i) \
16 if (m->flags & VM_MTE)
17
mte_vma_tag_dump_size(struct core_vma_metadata * m)18 static unsigned long mte_vma_tag_dump_size(struct core_vma_metadata *m)
19 {
20 return (m->dump_size >> PAGE_SHIFT) * MTE_PAGE_TAG_STORAGE;
21 }
22
23 /* Derived from dump_user_range(); start/end must be page-aligned */
mte_dump_tag_range(struct coredump_params * cprm,unsigned long start,unsigned long len)24 static int mte_dump_tag_range(struct coredump_params *cprm,
25 unsigned long start, unsigned long len)
26 {
27 int ret = 1;
28 unsigned long addr;
29 void *tags = NULL;
30 int locked = 0;
31
32 for (addr = start; addr < start + len; addr += PAGE_SIZE) {
33 struct page *page = get_dump_page(addr, &locked);
34
35 /*
36 * get_dump_page() returns NULL when encountering an empty
37 * page table entry that would otherwise have been filled with
38 * the zero page. Skip the equivalent tag dump which would
39 * have been all zeros.
40 */
41 if (!page) {
42 dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
43 continue;
44 }
45
46 /*
47 * Pages mapped in user space as !pte_access_permitted() (e.g.
48 * PROT_EXEC only) may not have the PG_mte_tagged flag set.
49 */
50 if (!page_mte_tagged(page)) {
51 put_page(page);
52 dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
53 continue;
54 }
55
56 if (!tags) {
57 tags = mte_allocate_tag_storage();
58 if (!tags) {
59 put_page(page);
60 ret = 0;
61 break;
62 }
63 }
64
65 mte_save_page_tags(page_address(page), tags);
66 put_page(page);
67 if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
68 ret = 0;
69 break;
70 }
71 }
72
73 if (tags)
74 mte_free_tag_storage(tags);
75
76 return ret;
77 }
78
elf_core_extra_phdrs(struct coredump_params * cprm)79 Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
80 {
81 int i;
82 struct core_vma_metadata *m;
83 int vma_count = 0;
84
85 for_each_mte_vma(cprm, i, m)
86 vma_count++;
87
88 return vma_count;
89 }
90
elf_core_write_extra_phdrs(struct coredump_params * cprm,loff_t offset)91 int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
92 {
93 int i;
94 struct core_vma_metadata *m;
95
96 for_each_mte_vma(cprm, i, m) {
97 struct elf_phdr phdr;
98
99 phdr.p_type = PT_AARCH64_MEMTAG_MTE;
100 phdr.p_offset = offset;
101 phdr.p_vaddr = m->start;
102 phdr.p_paddr = 0;
103 phdr.p_filesz = mte_vma_tag_dump_size(m);
104 phdr.p_memsz = m->end - m->start;
105 offset += phdr.p_filesz;
106 phdr.p_flags = 0;
107 phdr.p_align = 0;
108
109 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
110 return 0;
111 }
112
113 return 1;
114 }
115
elf_core_extra_data_size(struct coredump_params * cprm)116 size_t elf_core_extra_data_size(struct coredump_params *cprm)
117 {
118 int i;
119 struct core_vma_metadata *m;
120 size_t data_size = 0;
121
122 for_each_mte_vma(cprm, i, m)
123 data_size += mte_vma_tag_dump_size(m);
124
125 return data_size;
126 }
127
elf_core_write_extra_data(struct coredump_params * cprm)128 int elf_core_write_extra_data(struct coredump_params *cprm)
129 {
130 int i;
131 struct core_vma_metadata *m;
132
133 for_each_mte_vma(cprm, i, m) {
134 if (!mte_dump_tag_range(cprm, m->start, m->dump_size))
135 return 0;
136 }
137
138 return 1;
139 }
140