1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
4 */
5
6 #include <linux/elf.h>
7 #include <linux/ftrace.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/sort.h>
11 #include <linux/moduleloader.h>
12
13 #include <asm/cache.h>
14 #include <asm/opcodes.h>
15
16 #ifdef CONFIG_THUMB2_KERNEL
17 #define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \
18 (PLT_ENT_STRIDE - 4))
19 #else
20 #define PLT_ENT_LDR __opcode_to_mem_arm(0xe59ff000 | \
21 (PLT_ENT_STRIDE - 8))
22 #endif
23
24 static const u32 fixed_plts[] = {
25 #ifdef CONFIG_DYNAMIC_FTRACE
26 FTRACE_ADDR,
27 MCOUNT_ADDR,
28 #endif
29 };
30
in_init(const struct module * mod,unsigned long loc)31 static bool in_init(const struct module *mod, unsigned long loc)
32 {
33 return loc - (u32)mod->init_layout.base < mod->init_layout.size;
34 }
35
prealloc_fixed(struct mod_plt_sec * pltsec,struct plt_entries * plt)36 static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt)
37 {
38 int i;
39
40 if (!ARRAY_SIZE(fixed_plts) || pltsec->plt_count)
41 return;
42 pltsec->plt_count = ARRAY_SIZE(fixed_plts);
43
44 for (i = 0; i < ARRAY_SIZE(plt->ldr); ++i)
45 plt->ldr[i] = PLT_ENT_LDR;
46
47 BUILD_BUG_ON(sizeof(fixed_plts) > sizeof(plt->lit));
48 memcpy(plt->lit, fixed_plts, sizeof(fixed_plts));
49 }
50
get_module_plt(struct module * mod,unsigned long loc,Elf32_Addr val)51 u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
52 {
53 struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
54 &mod->arch.init;
55 struct plt_entries *plt;
56 int idx;
57
58 /* cache the address, ELF header is available only during module load */
59 if (!pltsec->plt_ent)
60 pltsec->plt_ent = (struct plt_entries *)pltsec->plt->sh_addr;
61 plt = pltsec->plt_ent;
62
63 prealloc_fixed(pltsec, plt);
64
65 for (idx = 0; idx < ARRAY_SIZE(fixed_plts); ++idx)
66 if (plt->lit[idx] == val)
67 return (u32)&plt->ldr[idx];
68
69 idx = 0;
70 /*
71 * Look for an existing entry pointing to 'val'. Given that the
72 * relocations are sorted, this will be the last entry we allocated.
73 * (if one exists).
74 */
75 if (pltsec->plt_count > 0) {
76 plt += (pltsec->plt_count - 1) / PLT_ENT_COUNT;
77 idx = (pltsec->plt_count - 1) % PLT_ENT_COUNT;
78
79 if (plt->lit[idx] == val)
80 return (u32)&plt->ldr[idx];
81
82 idx = (idx + 1) % PLT_ENT_COUNT;
83 if (!idx)
84 plt++;
85 }
86
87 pltsec->plt_count++;
88 BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size);
89
90 if (!idx)
91 /* Populate a new set of entries */
92 *plt = (struct plt_entries){
93 { [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
94 { val, }
95 };
96 else
97 plt->lit[idx] = val;
98
99 return (u32)&plt->ldr[idx];
100 }
101
102 #define cmp_3way(a,b) ((a) < (b) ? -1 : (a) > (b))
103
cmp_rel(const void * a,const void * b)104 static int cmp_rel(const void *a, const void *b)
105 {
106 const Elf32_Rel *x = a, *y = b;
107 int i;
108
109 /* sort by type and symbol index */
110 i = cmp_3way(ELF32_R_TYPE(x->r_info), ELF32_R_TYPE(y->r_info));
111 if (i == 0)
112 i = cmp_3way(ELF32_R_SYM(x->r_info), ELF32_R_SYM(y->r_info));
113 return i;
114 }
115
is_zero_addend_relocation(Elf32_Addr base,const Elf32_Rel * rel)116 static bool is_zero_addend_relocation(Elf32_Addr base, const Elf32_Rel *rel)
117 {
118 u32 *tval = (u32 *)(base + rel->r_offset);
119
120 /*
121 * Do a bitwise compare on the raw addend rather than fully decoding
122 * the offset and doing an arithmetic comparison.
123 * Note that a zero-addend jump/call relocation is encoded taking the
124 * PC bias into account, i.e., -8 for ARM and -4 for Thumb2.
125 */
126 switch (ELF32_R_TYPE(rel->r_info)) {
127 u16 upper, lower;
128
129 case R_ARM_THM_CALL:
130 case R_ARM_THM_JUMP24:
131 upper = __mem_to_opcode_thumb16(((u16 *)tval)[0]);
132 lower = __mem_to_opcode_thumb16(((u16 *)tval)[1]);
133
134 return (upper & 0x7ff) == 0x7ff && (lower & 0x2fff) == 0x2ffe;
135
136 case R_ARM_CALL:
137 case R_ARM_PC24:
138 case R_ARM_JUMP24:
139 return (__mem_to_opcode_arm(*tval) & 0xffffff) == 0xfffffe;
140 }
141 BUG();
142 }
143
duplicate_rel(Elf32_Addr base,const Elf32_Rel * rel,int num)144 static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num)
145 {
146 const Elf32_Rel *prev;
147
148 /*
149 * Entries are sorted by type and symbol index. That means that,
150 * if a duplicate entry exists, it must be in the preceding
151 * slot.
152 */
153 if (!num)
154 return false;
155
156 prev = rel + num - 1;
157 return cmp_rel(rel + num, prev) == 0 &&
158 is_zero_addend_relocation(base, prev);
159 }
160
161 /* Count how many PLT entries we may need */
count_plts(const Elf32_Sym * syms,Elf32_Addr base,const Elf32_Rel * rel,int num,Elf32_Word dstidx)162 static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
163 const Elf32_Rel *rel, int num, Elf32_Word dstidx)
164 {
165 unsigned int ret = 0;
166 const Elf32_Sym *s;
167 int i;
168
169 for (i = 0; i < num; i++) {
170 switch (ELF32_R_TYPE(rel[i].r_info)) {
171 case R_ARM_CALL:
172 case R_ARM_PC24:
173 case R_ARM_JUMP24:
174 case R_ARM_THM_CALL:
175 case R_ARM_THM_JUMP24:
176 /*
177 * We only have to consider branch targets that resolve
178 * to symbols that are defined in a different section.
179 * This is not simply a heuristic, it is a fundamental
180 * limitation, since there is no guaranteed way to emit
181 * PLT entries sufficiently close to the branch if the
182 * section size exceeds the range of a branch
183 * instruction. So ignore relocations against defined
184 * symbols if they live in the same section as the
185 * relocation target.
186 */
187 s = syms + ELF32_R_SYM(rel[i].r_info);
188 if (s->st_shndx == dstidx)
189 break;
190
191 /*
192 * Jump relocations with non-zero addends against
193 * undefined symbols are supported by the ELF spec, but
194 * do not occur in practice (e.g., 'jump n bytes past
195 * the entry point of undefined function symbol f').
196 * So we need to support them, but there is no need to
197 * take them into consideration when trying to optimize
198 * this code. So let's only check for duplicates when
199 * the addend is zero. (Note that calls into the core
200 * module via init PLT entries could involve section
201 * relative symbol references with non-zero addends, for
202 * which we may end up emitting duplicates, but the init
203 * PLT is released along with the rest of the .init
204 * region as soon as module loading completes.)
205 */
206 if (!is_zero_addend_relocation(base, rel + i) ||
207 !duplicate_rel(base, rel, i))
208 ret++;
209 }
210 }
211 return ret;
212 }
213
module_frob_arch_sections(Elf_Ehdr * ehdr,Elf_Shdr * sechdrs,char * secstrings,struct module * mod)214 int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
215 char *secstrings, struct module *mod)
216 {
217 unsigned long core_plts = ARRAY_SIZE(fixed_plts);
218 unsigned long init_plts = ARRAY_SIZE(fixed_plts);
219 Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
220 Elf32_Sym *syms = NULL;
221
222 /*
223 * To store the PLTs, we expand the .text section for core module code
224 * and for initialization code.
225 */
226 for (s = sechdrs; s < sechdrs_end; ++s) {
227 if (strcmp(".plt", secstrings + s->sh_name) == 0)
228 mod->arch.core.plt = s;
229 else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
230 mod->arch.init.plt = s;
231 else if (s->sh_type == SHT_SYMTAB)
232 syms = (Elf32_Sym *)s->sh_addr;
233 }
234
235 if (!mod->arch.core.plt || !mod->arch.init.plt) {
236 pr_err("%s: module PLT section(s) missing\n", mod->name);
237 return -ENOEXEC;
238 }
239 if (!syms) {
240 pr_err("%s: module symtab section missing\n", mod->name);
241 return -ENOEXEC;
242 }
243
244 for (s = sechdrs + 1; s < sechdrs_end; ++s) {
245 Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
246 int numrels = s->sh_size / sizeof(Elf32_Rel);
247 Elf32_Shdr *dstsec = sechdrs + s->sh_info;
248
249 if (s->sh_type != SHT_REL)
250 continue;
251
252 /* ignore relocations that operate on non-exec sections */
253 if (!(dstsec->sh_flags & SHF_EXECINSTR))
254 continue;
255
256 /* sort by type and symbol index */
257 sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
258
259 if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
260 core_plts += count_plts(syms, dstsec->sh_addr, rels,
261 numrels, s->sh_info);
262 else
263 init_plts += count_plts(syms, dstsec->sh_addr, rels,
264 numrels, s->sh_info);
265 }
266
267 mod->arch.core.plt->sh_type = SHT_NOBITS;
268 mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
269 mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
270 mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
271 sizeof(struct plt_entries));
272 mod->arch.core.plt_count = 0;
273 mod->arch.core.plt_ent = NULL;
274
275 mod->arch.init.plt->sh_type = SHT_NOBITS;
276 mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
277 mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
278 mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
279 sizeof(struct plt_entries));
280 mod->arch.init.plt_count = 0;
281 mod->arch.init.plt_ent = NULL;
282
283 pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
284 mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
285 return 0;
286 }
287
in_module_plt(unsigned long loc)288 bool in_module_plt(unsigned long loc)
289 {
290 struct module *mod;
291 bool ret;
292
293 preempt_disable();
294 mod = __module_text_address(loc);
295 ret = mod && (loc - (u32)mod->arch.core.plt_ent < mod->arch.core.plt_count * PLT_ENT_SIZE ||
296 loc - (u32)mod->arch.init.plt_ent < mod->arch.init.plt_count * PLT_ENT_SIZE);
297 preempt_enable();
298
299 return ret;
300 }
301