1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2024 Google LLC
4  * Written by Simon Glass <sjg@chromium.org>
5  */
6 
7 #include <gzip.h>
8 #include <image.h>
9 #include <log.h>
10 #include <mapmem.h>
11 #include <spl.h>
12 #include <asm/global_data.h>
13 #include <asm/io.h>
14 #include <asm/sections.h>
15 #include <asm/unaligned.h>
16 #include <linux/types.h>
17 #include <lzma/LzmaTypes.h>
18 #include <lzma/LzmaDec.h>
19 #include <lzma/LzmaTools.h>
20 #include <u-boot/crc.h>
21 #include <u-boot/lz4.h>
22 
23 DECLARE_GLOBAL_DATA_PTR;
24 
25 /* provide a way to jump straight into the relocation code, for debugging */
26 #define DEBUG_JUMP	0
27 
28 enum {
29 	/* margin to allow for stack growth */
30 	RELOC_STACK_MARGIN	= 0x800,
31 
32 	/* align base address for DMA controllers which require it */
33 	BASE_ALIGN		= 0x200,
34 
35 	STACK_PROT_VALUE	= 0x51ce4697,
36 };
37 
38 typedef int (*rcode_func)(struct spl_image_info *image);
39 
setup_layout(struct spl_image_info * image,ulong * addrp)40 static int setup_layout(struct spl_image_info *image, ulong *addrp)
41 {
42 	ulong base, fdt_size;
43 	ulong limit, rcode_base;
44 	uint rcode_size;
45 	int buf_size, margin;
46 	char *rcode_buf;
47 
48 	limit = ALIGN(map_to_sysmem(&limit) - RELOC_STACK_MARGIN, 8);
49 	image->stack_prot = map_sysmem(limit, sizeof(uint));
50 	*image->stack_prot = STACK_PROT_VALUE;
51 
52 	fdt_size = fdt_totalsize(gd->fdt_blob);
53 	base = ALIGN(map_to_sysmem(gd->fdt_blob) + fdt_size + BASE_ALIGN - 1,
54 		     BASE_ALIGN);
55 
56 	rcode_size = _rcode_end - _rcode_start;
57 	rcode_base = limit - rcode_size;
58 	buf_size = rcode_base - base;
59 	uint need_size = image->size + image->fdt_size;
60 	margin = buf_size - need_size;
61 	log_debug("spl_reloc %s->%s: margin%s%lx limit %lx fdt_size %lx base %lx avail %x image %x fdt %lx need %x\n",
62 		  spl_phase_name(spl_phase()), spl_phase_name(spl_phase() + 1),
63 		  margin >= 0 ? " " : " -", abs(margin), limit, fdt_size, base,
64 		  buf_size, image->size, image->fdt_size, need_size);
65 	if (margin < 0) {
66 		log_err("Image size %x but buffer is only %x\n", need_size,
67 			buf_size);
68 		return -ENOSPC;
69 	}
70 
71 	rcode_buf = map_sysmem(rcode_base, rcode_size);
72 	log_debug("_rcode_start %p: %x -- func %p %x\n", _rcode_start,
73 		  *(uint *)_rcode_start, setup_layout, *(uint *)setup_layout);
74 
75 	image->reloc_offset = rcode_buf - _rcode_start;
76 	log_debug("_rcode start %lx base %lx size %x offset %lx\n",
77 		  (ulong)map_to_sysmem(_rcode_start), rcode_base, rcode_size,
78 		  image->reloc_offset);
79 
80 	memcpy(rcode_buf, _rcode_start, rcode_size);
81 
82 	image->buf = map_sysmem(base, need_size);
83 	image->fdt_buf = image->buf + image->size;
84 	image->rcode_buf = rcode_buf;
85 	*addrp = base;
86 
87 	return 0;
88 }
89 
spl_reloc_prepare(struct spl_image_info * image,ulong * addrp)90 int spl_reloc_prepare(struct spl_image_info *image, ulong *addrp)
91 {
92 	int ret;
93 
94 	ret = setup_layout(image, addrp);
95 	if (ret)
96 		return ret;
97 
98 	return 0;
99 }
100 
101 typedef void __noreturn (*image_entry_noargs_t)(uint crc, uint unc_len);
102 
103 /* this is the relocation + jump code that is copied to the top of memory */
rcode_reloc_and_jump(struct spl_image_info * image)104 __rcode int rcode_reloc_and_jump(struct spl_image_info *image)
105 {
106 	image_entry_noargs_t entry = (image_entry_noargs_t)image->entry_point;
107 	u32 *dst;
108 	ulong image_len;
109 	size_t unc_len;
110 	int ret, crc;
111 	uint magic;
112 
113 	dst = map_sysmem(image->load_addr, image->size);
114 	unc_len = (void *)image->rcode_buf - (void *)dst;
115 	image_len = image->size;
116 	if (*image->stack_prot != STACK_PROT_VALUE)
117 		return -EFAULT;
118 	magic = get_unaligned_le32(image->buf);
119 	if (CONFIG_IS_ENABLED(LZMA)) {
120 		SizeT lzma_len = unc_len;
121 
122 		ret = lzmaBuffToBuffDecompress((u8 *)dst, &lzma_len,
123 					       image->buf, image_len);
124 		unc_len = lzma_len;
125 	} else if (CONFIG_IS_ENABLED(GZIP)) {
126 		ret = gunzip(dst, unc_len, image->buf, &image_len);
127 	} else if (CONFIG_IS_ENABLED(LZ4) && magic == LZ4F_MAGIC) {
128 		ret = ulz4fn(image->buf, image_len, dst, &unc_len);
129 		if (ret)
130 			return ret;
131 	} else {
132 		u32 *src, *end, *ptr;
133 
134 		unc_len = image->size;
135 		for (src = image->buf, end = (void *)src + image->size,
136 		     ptr = dst; src < end;)
137 			*ptr++ = *src++;
138 	}
139 	if (*image->stack_prot != STACK_PROT_VALUE)
140 		return -EFAULT;
141 
142 	/* copy in the FDT if needed */
143 	if (image->fdt_size)
144 		memcpy(image->fdt_start, image->fdt_buf, image->fdt_size);
145 
146 	crc = crc8(0, (u8 *)dst, unc_len);
147 
148 	/* jump to the entry point */
149 	entry(crc, unc_len);
150 }
151 
spl_reloc_jump(struct spl_image_info * image,spl_jump_to_image_t jump)152 int spl_reloc_jump(struct spl_image_info *image, spl_jump_to_image_t jump)
153 {
154 	rcode_func loader;
155 	int ret;
156 
157 	log_debug("malloc usage %x bytes (%d KB of %d KB)\n", gd->malloc_ptr,
158 		  gd->malloc_ptr / 1024, CONFIG_VAL(SYS_MALLOC_F_LEN) / 1024);
159 
160 	if (*image->stack_prot != STACK_PROT_VALUE) {
161 		log_err("stack busted, cannot continue\n");
162 		return -EFAULT;
163 	}
164 	loader = (rcode_func)(void *)rcode_reloc_and_jump + image->reloc_offset;
165 	log_debug("Jumping via %p to %lx - image %p size %x load %lx\n", loader,
166 		  image->entry_point, image, image->size, image->load_addr);
167 
168 	log_debug("unc_len %lx\n",
169 		  image->rcode_buf - map_sysmem(image->load_addr, image->size));
170 	if (DEBUG_JUMP) {
171 		rcode_reloc_and_jump(image);
172 	} else {
173 		/*
174 		 * Must disable LOG_DEBUG since the decompressor cannot call
175 		 * log functions, printf(), etc.
176 		 */
177 		_Static_assert(DEBUG_JUMP || !_DEBUG,
178 			       "Cannot have debug output from decompressor");
179 		ret = loader(image);
180 	}
181 
182 	return -EFAULT;
183 }
184