1 /*
2  * Copyright (C) 2018-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <types.h>
8 #include <reloc.h>
9 #include <asm/boot/ld_sym.h>
10 
11 #ifdef CONFIG_RELOC
12 #define DT_NULL		0U	/* end of .dynamic section */
13 #define DT_RELA		7U	/* relocation table */
14 #define DT_RELASZ	8U	/* size of reloc table */
15 #define DT_RELAENT	9U	/* size of one entry */
16 
17 #define R_X86_64_RELATIVE	8UL
18 
19 struct Elf64_Dyn {
20 	uint64_t d_tag;
21 	uint64_t d_ptr;
22 };
23 
24 struct Elf64_Rel {
25 	uint64_t r_offset;
26 	uint64_t r_info;
27 	uint64_t reserved;
28 };
29 
elf64_r_type(uint64_t i)30 static inline uint64_t elf64_r_type(uint64_t i)
31 {
32 	return (i & 0xffffffffUL);
33 }
34 #endif
35 
36 /* get the delta between CONFIG_HV_RAM_START and the actual load address */
get_hv_image_delta(void)37 uint64_t get_hv_image_delta(void)
38 {
39 	uint64_t addr;
40 
41 	asm volatile (" call 0f\n"
42 		"0: pop %%rax\n"
43 		"	sub $0b, %%rax\n"
44 		"	mov %%rax, %0\n"
45 		: "=m" (addr)
46 		:
47 		: "%rax");
48 
49 	return addr;
50 }
51 
52 /* get the actual Hypervisor load address (HVA) */
get_hv_image_base(void)53 uint64_t get_hv_image_base(void)
54 {
55 	return (get_hv_image_delta() + CONFIG_HV_RAM_START);
56 }
57 
get_hv_image_size(void)58 inline uint64_t get_hv_image_size(void)
59 {
60 	return (uint64_t)(&ld_ram_end - &ld_ram_start);
61 }
62 
relocate(void)63 void relocate(void)
64 {
65 #ifdef CONFIG_RELOC
66 	struct Elf64_Dyn *dyn;
67 	struct Elf64_Rel *entry = NULL;
68 	uint8_t *rela_start = NULL, *rela_end = NULL;
69 	uint64_t rela_size = 0;
70 	uint64_t delta, entry_size = 0;
71 	uint64_t trampoline_end;
72 	uint64_t primary_entry_end;
73 	uint64_t *addr;
74 
75 	/* get the delta that needs to be patched */
76 	delta = get_hv_image_delta();
77 	if (delta != 0U) {
78 
79 		/* Look for the descriptoin of relocation sections */
80 		for (dyn = (struct Elf64_Dyn *)_DYNAMIC; dyn->d_tag != DT_NULL; dyn++) {
81 			switch (dyn->d_tag) {
82 			case DT_RELA:
83 				rela_start = (uint8_t *)(dyn->d_ptr + delta);
84 				break;
85 			case DT_RELASZ:
86 				rela_size = dyn->d_ptr;
87 				break;
88 			case DT_RELAENT:
89 				entry_size = dyn->d_ptr;
90 				break;
91 			default:
92 				/* if no RELA/RELASZ found, both start and end will be initialized to NULL, and later while loop won't be executed */
93 				break;
94 			}
95 		}
96 
97 		/*
98 		 * Need to subtract the relocation delta to get the correct
99 		 * absolute addresses
100 		 */
101 		trampoline_end = (uint64_t)(&ld_trampoline_end) - delta;
102 		primary_entry_end = (uint64_t)(&ld_entry_end) - delta;
103 
104 		rela_end = rela_start + rela_size;
105 		while (rela_start < rela_end) {
106 			entry = (struct Elf64_Rel *)rela_start;
107 			if ((elf64_r_type(entry->r_info)) == R_X86_64_RELATIVE) {
108 				addr = (uint64_t *)(delta + entry->r_offset);
109 
110 				/*
111 				 * we won't fixup any symbols from trampoline.S or cpu_primary.S
112 				 * for a number of reasons:
113 				 *
114 				 * - trampoline code itself takes another relocation,
115 				 *   so any entries for trampoline symbols can't be fixed up
116 				 *   through .rela sections
117 				 * - Linker option "-z noreloc-overflow" could force R_X86_32
118 				 *   to R_X86_64 in the relocation sections, which could make
119 				 *   the fixed up code dirty.
120 				 */
121 				if ((entry->r_offset > trampoline_end) && (entry->r_offset > primary_entry_end)) {
122 					*addr += delta;
123 				}
124 			}
125 			rela_start += entry_size;
126 		}
127 	}
128 #endif
129 }
130