1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_S390_MEM_DETECT_H
3 #define _ASM_S390_MEM_DETECT_H
4
5 #include <linux/types.h>
6
7 enum mem_info_source {
8 MEM_DETECT_NONE = 0,
9 MEM_DETECT_SCLP_STOR_INFO,
10 MEM_DETECT_DIAG260,
11 MEM_DETECT_SCLP_READ_INFO,
12 MEM_DETECT_BIN_SEARCH
13 };
14
15 struct mem_detect_block {
16 u64 start;
17 u64 end;
18 };
19
20 /*
21 * Storage element id is defined as 1 byte (up to 256 storage elements).
22 * In practise only storage element id 0 and 1 are used).
23 * According to architecture one storage element could have as much as
24 * 1020 subincrements. 255 mem_detect_blocks are embedded in mem_detect_info.
25 * If more mem_detect_blocks are required, a block of memory from already
26 * known mem_detect_block is taken (entries_extended points to it).
27 */
28 #define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
29
30 struct mem_detect_info {
31 u32 count;
32 u8 info_source;
33 unsigned long usable;
34 struct mem_detect_block entries[MEM_INLINED_ENTRIES];
35 struct mem_detect_block *entries_extended;
36 };
37 extern struct mem_detect_info mem_detect;
38
39 void add_mem_detect_block(u64 start, u64 end);
40
__get_mem_detect_block(u32 n,unsigned long * start,unsigned long * end,bool respect_usable_limit)41 static inline int __get_mem_detect_block(u32 n, unsigned long *start,
42 unsigned long *end, bool respect_usable_limit)
43 {
44 if (n >= mem_detect.count) {
45 *start = 0;
46 *end = 0;
47 return -1;
48 }
49
50 if (n < MEM_INLINED_ENTRIES) {
51 *start = (unsigned long)mem_detect.entries[n].start;
52 *end = (unsigned long)mem_detect.entries[n].end;
53 } else {
54 *start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start;
55 *end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end;
56 }
57
58 if (respect_usable_limit && mem_detect.usable) {
59 if (*start >= mem_detect.usable)
60 return -1;
61 if (*end > mem_detect.usable)
62 *end = mem_detect.usable;
63 }
64 return 0;
65 }
66
67 /**
68 * for_each_mem_detect_usable_block - early online memory range iterator
69 * @i: an integer used as loop variable
70 * @p_start: ptr to unsigned long for start address of the range
71 * @p_end: ptr to unsigned long for end address of the range
72 *
73 * Walks over detected online memory ranges below usable limit.
74 */
75 #define for_each_mem_detect_usable_block(i, p_start, p_end) \
76 for (i = 0; !__get_mem_detect_block(i, p_start, p_end, true); i++)
77
78 /* Walks over all detected online memory ranges disregarding usable limit. */
79 #define for_each_mem_detect_block(i, p_start, p_end) \
80 for (i = 0; !__get_mem_detect_block(i, p_start, p_end, false); i++)
81
get_mem_detect_usable_total(void)82 static inline unsigned long get_mem_detect_usable_total(void)
83 {
84 unsigned long start, end, total = 0;
85 int i;
86
87 for_each_mem_detect_usable_block(i, &start, &end)
88 total += end - start;
89
90 return total;
91 }
92
get_mem_detect_reserved(unsigned long * start,unsigned long * size)93 static inline void get_mem_detect_reserved(unsigned long *start,
94 unsigned long *size)
95 {
96 *start = (unsigned long)mem_detect.entries_extended;
97 if (mem_detect.count > MEM_INLINED_ENTRIES)
98 *size = (mem_detect.count - MEM_INLINED_ENTRIES) * sizeof(struct mem_detect_block);
99 else
100 *size = 0;
101 }
102
get_mem_detect_end(void)103 static inline unsigned long get_mem_detect_end(void)
104 {
105 unsigned long start;
106 unsigned long end;
107
108 if (mem_detect.usable)
109 return mem_detect.usable;
110 if (mem_detect.count) {
111 __get_mem_detect_block(mem_detect.count - 1, &start, &end, false);
112 return end;
113 }
114 return 0;
115 }
116
117 #endif
118