1/* SPDX-License-Identifier: (BSD-2-Clause AND MIT) */
2/*
3 * Copyright 2022-2023 NXP
4 */
5
6/*
7 * Copyright (c) 2014, Linaro Limited
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 *
16 * 2. Redistributions in binary form must reproduce the above copyright notice,
17 * this list of conditions and the following disclaimer in the documentation
18 * and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * Copyright (c) 2008-2010 Travis Geiselbrecht
35 *
36 * Permission is hereby granted, free of charge, to any person obtaining
37 * a copy of this software and associated documentation files
38 * (the "Software"), to deal in the Software without restriction,
39 * including without limitation the rights to use, copy, modify, merge,
40 * publish, distribute, sublicense, and/or sell copies of the Software,
41 * and to permit persons to whom the Software is furnished to do so,
42 * subject to the following conditions:
43 *
44 * The above copyright notice and this permission notice shall be
45 * included in all copies or substantial portions of the Software.
46 *
47 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
48 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
49 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
50 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
51 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
52 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
53 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
54 */
55
56#include <mm/core_mmu.h>
57#include <platform_config.h>
58#include <util.h>
59
60/*
61 * TEE_RAM_VA_START:            The start virtual address of the TEE RAM
62 * TEE_TEXT_VA_START:           The start virtual address of the OP-TEE text
63 */
64#define TEE_RAM_VA_START        TEE_RAM_START
65#define TEE_TEXT_VA_START       (TEE_RAM_VA_START + \
66					(TEE_LOAD_ADDR - TEE_RAM_START))
67
68/*
69 * Note:
70 * Clang 11 (ld.lld) generates non-relocatable reference when using ROUNDDOWN()
71 * from <util.h>, which does not work with ASLR.
72 */
73#define LD_ROUNDDOWN(x, y) ((x) - ((x) % (y)))
74
75OUTPUT_FORMAT(CFG_KERN_LINKER_FORMAT)
76OUTPUT_ARCH(CFG_KERN_LINKER_ARCH)
77
78ENTRY(_start)
79SECTIONS
80{
81	. = TEE_TEXT_VA_START;
82	/* Ensure text section is page aligned */
83	ASSERT(!(TEE_TEXT_VA_START & (SMALL_PAGE_SIZE - 1)),
84	       "text start should align to 4Kb")
85
86	__text_start = .;
87
88	/*
89	 * Memory between TEE_TEXT_VA_START and page aligned rounded down
90	 * value will be mapped with unpaged "text" section attributes:
91	 * likely to be read-only/executable.
92	 */
93	__flatmap_rx_start = LD_ROUNDDOWN(__text_start, SMALL_PAGE_SIZE);
94
95	.text : {
96		KEEP(*(.text._start))
97		__identity_map_init_start = .;
98		__text_data_start = .;
99		*(.identity_map.data)
100		__text_data_end = .;
101		*(.identity_map .identity_map.*)
102		__identity_map_init_end = .;
103		*(.text .text.*)
104		*(.sram.text.glue_7* .gnu.linkonce.t.*)
105		. = ALIGN(8);
106	}
107	__text_end = .;
108
109#ifdef CFG_CORE_RODATA_NOEXEC
110	. = ALIGN(SMALL_PAGE_SIZE);
111#endif
112	__flatmap_rx_size = . - __flatmap_rx_start;
113	__flatmap_ro_start = .;
114
115	.rodata : ALIGN(8) {
116		__rodata_start = .;
117		*(.gnu.linkonce.r.*)
118		*(.rodata .rodata.*)
119#ifndef CFG_CORE_ASLR
120		. = ALIGN(8);
121		KEEP(*(SORT(.scattered_array*)));
122#endif
123		. = ALIGN(8);
124		__rodata_end = .;
125	}
126
127	.got : { *(.got.plt) *(.got) }
128	.note.gnu.property : { *(.note.gnu.property) }
129	.plt : { *(.plt) }
130
131	.ctors : ALIGN(8) {
132		__ctor_list = .;
133		KEEP(*(.ctors .ctors.* .init_array .init_array.*))
134		__ctor_end = .;
135	}
136	.dtors : ALIGN(8) {
137		__dtor_list = .;
138		KEEP(*(.dtors .dtors.* .fini_array .fini_array.*))
139		__dtor_end = .;
140	}
141
142	/* Start page aligned read-write memory */
143#ifdef CFG_CORE_RWDATA_NOEXEC
144	. = ALIGN(SMALL_PAGE_SIZE);
145#endif
146	__flatmap_ro_size = . - __flatmap_ro_start;
147	__flatmap_rw_start = .;
148
149	.data : ALIGN(8) {
150		/* writable data  */
151		__data_start_rom = .;
152		/* in one segment binaries, the rom data address is on top
153		   of the ram data address */
154		__data_start = .;
155		*(.data .data.* .gnu.linkonce.d.*)
156		. = ALIGN(8);
157		/*
158		 * To allow the linker relax accesses to global symbols,
159		 * those need to be within imm12 (signed 12-bit) offsets
160		 * from __global_pointer$.
161		 */
162		PROVIDE(__global_pointer$ = . + 0x800 );
163	}
164
165	/* uninitialized data */
166	.bss : {
167		__data_end = .;
168		__bss_start = .;
169		*(.bss .bss.*)
170		*(.gnu.linkonce.b.*)
171		*(COMMON)
172		. = ALIGN(8);
173		__bss_end = .;
174	}
175
176	.heap1 (NOLOAD) : {
177		/*
178		 * We're keeping track of the padding added before the
179		 * .nozi section so we can do something useful with
180		 * this otherwise wasted memory.
181		 */
182		__heap1_start = .;
183		. += CFG_CORE_HEAP_SIZE;
184		. = ALIGN(4 * 1024);
185		__heap1_end = .;
186	}
187	/*
188	 * Uninitialized data that shouldn't be zero initialized at
189	 * runtime.
190	 */
191	.nozi (NOLOAD) : {
192		__nozi_start = .;
193		KEEP(*(.nozi .nozi.*))
194		. = ALIGN(16);
195		__nozi_end = .;
196		__nozi_stack_start = .;
197		KEEP(*(.nozi_stack .nozi_stack.*))
198		. = ALIGN(8);
199		__nozi_stack_end = .;
200	}
201
202#ifdef CFG_CORE_SANITIZE_KADDRESS
203	. = TEE_RAM_VA_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8;
204	. = ALIGN(8);
205	.asan_shadow : {
206		__asan_shadow_start = .;
207		. += TEE_RAM_VA_SIZE / 9;
208		__asan_shadow_end = .;
209		__asan_shadow_size = __asan_shadow_end - __asan_shadow_start;
210	}
211#endif /*CFG_CORE_SANITIZE_KADDRESS*/
212
213	__end = .;
214	__init_size = __data_end - TEE_TEXT_VA_START;
215
216	/*
217	 * Guard against moving the location counter backwards in the assignment
218	 * below.
219	 */
220	ASSERT(. <= (TEE_RAM_VA_START + TEE_RAM_VA_SIZE),
221		"TEE_RAM_VA_SIZE is too small")
222	. = TEE_RAM_VA_START + TEE_RAM_VA_SIZE;
223
224	_end_of_ram = .;
225
226	__flatmap_rw_size = _end_of_ram - __flatmap_rw_start;
227	__get_tee_init_end = .;
228
229	/*
230	 * These regions will not become a normal part of the dumped
231	 * binary, instead some are interpreted by the dump script and
232	 * converted into suitable format for OP-TEE itself to use.
233	 */
234	.dynamic : { *(.dynamic) }
235	.hash : { *(.hash) }
236	.dynsym : { *(.dynsym) }
237	.dynstr : { *(.dynstr) }
238
239	.rel : {
240		*(.rel.*)
241	}
242	.rela : {
243		*(.rela.*)
244	}
245#ifndef CFG_CORE_ASLR
246	ASSERT(SIZEOF(.rel) == 0, "Relocation entries not expected")
247	ASSERT(SIZEOF(.rela) == 0, "Relocation entries not expected")
248#endif
249
250	/DISCARD/ : {
251		/* Strip unnecessary stuff */
252		*(.comment .note .eh_frame .interp)
253		/* Strip meta variables */
254		*(__keep_meta_vars*)
255	}
256
257}
258
259/* Unpaged read-only memories */
260__vcore_unpg_rx_start = __flatmap_rx_start;
261__vcore_unpg_ro_start = __flatmap_ro_start;
262#ifdef CFG_CORE_RODATA_NOEXEC
263__vcore_unpg_rx_size = __flatmap_rx_size;
264__vcore_unpg_ro_size = __flatmap_ro_size;
265#else
266__vcore_unpg_rx_size = __flatmap_rx_size + __flatmap_ro_size;
267__vcore_unpg_ro_size = 0;
268#endif
269__vcore_unpg_rx_end = __vcore_unpg_rx_start + __vcore_unpg_rx_size;
270__vcore_unpg_ro_end = __vcore_unpg_ro_start + __vcore_unpg_ro_size;
271
272/* Unpaged read-write memory */
273__vcore_unpg_rw_start = __flatmap_rw_start;
274__vcore_unpg_rw_size = __flatmap_rw_size;
275__vcore_unpg_rw_end = __vcore_unpg_rw_start + __vcore_unpg_rw_size;
276
277#ifdef CFG_CORE_SANITIZE_KADDRESS
278__asan_map_start = (__asan_shadow_start / SMALL_PAGE_SIZE) *
279		   SMALL_PAGE_SIZE;
280__asan_map_end = ((__asan_shadow_end - 1) / SMALL_PAGE_SIZE) *
281		 SMALL_PAGE_SIZE + SMALL_PAGE_SIZE;
282__asan_map_size = __asan_map_end - __asan_map_start;
283#endif /*CFG_CORE_SANITIZE_KADDRESS*/
284