1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/errno.h>
3 #include <linux/init.h>
4 #include <asm/setup.h>
5 #include <asm/processor.h>
6 #include <asm/sclp.h>
7 #include <asm/sections.h>
8 #include <asm/mem_detect.h>
9 #include <asm/sparsemem.h>
10 #include "decompressor.h"
11 #include "boot.h"
12 
13 struct mem_detect_info __bootdata(mem_detect);
14 
15 /* up to 256 storage elements, 1020 subincrements each */
16 #define ENTRIES_EXTENDED_MAX						       \
17 	(256 * (1020 / 2) * sizeof(struct mem_detect_block))
18 
__get_mem_detect_block_ptr(u32 n)19 static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
20 {
21 	if (n < MEM_INLINED_ENTRIES)
22 		return &mem_detect.entries[n];
23 	return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
24 }
25 
26 /*
27  * sequential calls to add_mem_detect_block with adjacent memory areas
28  * are merged together into single memory block.
29  */
add_mem_detect_block(u64 start,u64 end)30 void add_mem_detect_block(u64 start, u64 end)
31 {
32 	struct mem_detect_block *block;
33 
34 	if (mem_detect.count) {
35 		block = __get_mem_detect_block_ptr(mem_detect.count - 1);
36 		if (block->end == start) {
37 			block->end = end;
38 			return;
39 		}
40 	}
41 
42 	block = __get_mem_detect_block_ptr(mem_detect.count);
43 	block->start = start;
44 	block->end = end;
45 	mem_detect.count++;
46 }
47 
__diag260(unsigned long rx1,unsigned long rx2)48 static int __diag260(unsigned long rx1, unsigned long rx2)
49 {
50 	unsigned long reg1, reg2, ry;
51 	union register_pair rx;
52 	psw_t old;
53 	int rc;
54 
55 	rx.even = rx1;
56 	rx.odd	= rx2;
57 	ry = 0x10; /* storage configuration */
58 	rc = -1;   /* fail */
59 	asm volatile(
60 		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
61 		"	epsw	%[reg1],%[reg2]\n"
62 		"	st	%[reg1],0(%[psw_pgm])\n"
63 		"	st	%[reg2],4(%[psw_pgm])\n"
64 		"	larl	%[reg1],1f\n"
65 		"	stg	%[reg1],8(%[psw_pgm])\n"
66 		"	diag	%[rx],%[ry],0x260\n"
67 		"	ipm	%[rc]\n"
68 		"	srl	%[rc],28\n"
69 		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
70 		: [reg1] "=&d" (reg1),
71 		  [reg2] "=&a" (reg2),
72 		  [rc] "+&d" (rc),
73 		  [ry] "+&d" (ry),
74 		  "+Q" (S390_lowcore.program_new_psw),
75 		  "=Q" (old)
76 		: [rx] "d" (rx.pair),
77 		  [psw_old] "a" (&old),
78 		  [psw_pgm] "a" (&S390_lowcore.program_new_psw)
79 		: "cc", "memory");
80 	return rc == 0 ? ry : -1;
81 }
82 
diag260(void)83 static int diag260(void)
84 {
85 	int rc, i;
86 
87 	struct {
88 		unsigned long start;
89 		unsigned long end;
90 	} storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
91 
92 	memset(storage_extents, 0, sizeof(storage_extents));
93 	rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
94 	if (rc == -1)
95 		return -1;
96 
97 	for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
98 		add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
99 	return 0;
100 }
101 
tprot(unsigned long addr)102 static int tprot(unsigned long addr)
103 {
104 	unsigned long reg1, reg2;
105 	int rc = -EFAULT;
106 	psw_t old;
107 
108 	asm volatile(
109 		"	mvc	0(16,%[psw_old]),0(%[psw_pgm])\n"
110 		"	epsw	%[reg1],%[reg2]\n"
111 		"	st	%[reg1],0(%[psw_pgm])\n"
112 		"	st	%[reg2],4(%[psw_pgm])\n"
113 		"	larl	%[reg1],1f\n"
114 		"	stg	%[reg1],8(%[psw_pgm])\n"
115 		"	tprot	0(%[addr]),0\n"
116 		"	ipm	%[rc]\n"
117 		"	srl	%[rc],28\n"
118 		"1:	mvc	0(16,%[psw_pgm]),0(%[psw_old])\n"
119 		: [reg1] "=&d" (reg1),
120 		  [reg2] "=&a" (reg2),
121 		  [rc] "+&d" (rc),
122 		  "=Q" (S390_lowcore.program_new_psw.addr),
123 		  "=Q" (old)
124 		: [psw_old] "a" (&old),
125 		  [psw_pgm] "a" (&S390_lowcore.program_new_psw),
126 		  [addr] "a" (addr)
127 		: "cc", "memory");
128 	return rc;
129 }
130 
search_mem_end(void)131 static unsigned long search_mem_end(void)
132 {
133 	unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
134 	unsigned long offset = 0;
135 	unsigned long pivot;
136 
137 	while (range > 1) {
138 		range >>= 1;
139 		pivot = offset + range;
140 		if (!tprot(pivot << 20))
141 			offset = pivot;
142 	}
143 	return (offset + 1) << 20;
144 }
145 
detect_memory(unsigned long * safe_addr)146 unsigned long detect_memory(unsigned long *safe_addr)
147 {
148 	unsigned long max_physmem_end = 0;
149 
150 	sclp_early_get_memsize(&max_physmem_end);
151 	mem_detect.entries_extended = (struct mem_detect_block *)ALIGN(*safe_addr, sizeof(u64));
152 
153 	if (!sclp_early_read_storage_info()) {
154 		mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
155 	} else if (!diag260()) {
156 		mem_detect.info_source = MEM_DETECT_DIAG260;
157 		max_physmem_end = max_physmem_end ?: get_mem_detect_end();
158 	} else if (max_physmem_end) {
159 		add_mem_detect_block(0, max_physmem_end);
160 		mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
161 	} else {
162 		max_physmem_end = search_mem_end();
163 		add_mem_detect_block(0, max_physmem_end);
164 		mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
165 	}
166 
167 	if (mem_detect.count > MEM_INLINED_ENTRIES) {
168 		*safe_addr += (mem_detect.count - MEM_INLINED_ENTRIES) *
169 			     sizeof(struct mem_detect_block);
170 	}
171 
172 	return max_physmem_end;
173 }
174 
mem_detect_set_usable_limit(unsigned long limit)175 void mem_detect_set_usable_limit(unsigned long limit)
176 {
177 	struct mem_detect_block *block;
178 	int i;
179 
180 	/* make sure mem_detect.usable ends up within online memory block */
181 	for (i = 0; i < mem_detect.count; i++) {
182 		block = __get_mem_detect_block_ptr(i);
183 		if (block->start >= limit)
184 			break;
185 		if (block->end >= limit) {
186 			mem_detect.usable = limit;
187 			break;
188 		}
189 		mem_detect.usable = block->end;
190 	}
191 }
192