1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * The main purpose of the tests here is to exercise the migration entry code
4  * paths in the kernel.
5  */
6 
7 #include "../kselftest_harness.h"
8 #include "thp_settings.h"
9 
10 #include <strings.h>
11 #include <pthread.h>
12 #include <numa.h>
13 #include <numaif.h>
14 #include <sys/mman.h>
15 #include <sys/prctl.h>
16 #include <sys/types.h>
17 #include <signal.h>
18 #include <time.h>
19 #include "vm_util.h"
20 
21 #define TWOMEG		(2<<20)
22 #define RUNTIME		(20)
23 #define MAX_RETRIES	100
24 #define ALIGN(x, a)	(((x) + (a - 1)) & (~((a) - 1)))
25 
FIXTURE(migration)26 FIXTURE(migration)
27 {
28 	pthread_t *threads;
29 	pid_t *pids;
30 	int nthreads;
31 	int n1;
32 	int n2;
33 };
34 
FIXTURE_SETUP(migration)35 FIXTURE_SETUP(migration)
36 {
37 	int n;
38 
39 	ASSERT_EQ(numa_available(), 0);
40 	self->nthreads = numa_num_task_cpus() - 1;
41 	self->n1 = -1;
42 	self->n2 = -1;
43 
44 	for (n = 0; n < numa_max_possible_node(); n++)
45 		if (numa_bitmask_isbitset(numa_all_nodes_ptr, n)) {
46 			if (self->n1 == -1) {
47 				self->n1 = n;
48 			} else {
49 				self->n2 = n;
50 				break;
51 			}
52 		}
53 
54 	self->threads = malloc(self->nthreads * sizeof(*self->threads));
55 	ASSERT_NE(self->threads, NULL);
56 	self->pids = malloc(self->nthreads * sizeof(*self->pids));
57 	ASSERT_NE(self->pids, NULL);
58 };
59 
FIXTURE_TEARDOWN(migration)60 FIXTURE_TEARDOWN(migration)
61 {
62 	free(self->threads);
63 	free(self->pids);
64 }
65 
migrate(uint64_t * ptr,int n1,int n2)66 int migrate(uint64_t *ptr, int n1, int n2)
67 {
68 	int ret, tmp;
69 	int status = 0;
70 	struct timespec ts1, ts2;
71 	int failures = 0;
72 
73 	if (clock_gettime(CLOCK_MONOTONIC, &ts1))
74 		return -1;
75 
76 	while (1) {
77 		if (clock_gettime(CLOCK_MONOTONIC, &ts2))
78 			return -1;
79 
80 		if (ts2.tv_sec - ts1.tv_sec >= RUNTIME)
81 			return 0;
82 
83 		ret = move_pages(0, 1, (void **) &ptr, &n2, &status,
84 				MPOL_MF_MOVE_ALL);
85 		if (ret) {
86 			if (ret > 0) {
87 				/* Migration is best effort; try again */
88 				if (++failures < MAX_RETRIES)
89 					continue;
90 				printf("Didn't migrate %d pages\n", ret);
91 			}
92 			else
93 				perror("Couldn't migrate pages");
94 			return -2;
95 		}
96 		failures = 0;
97 		tmp = n2;
98 		n2 = n1;
99 		n1 = tmp;
100 	}
101 
102 	return 0;
103 }
104 
access_mem(void * ptr)105 void *access_mem(void *ptr)
106 {
107 	while (1) {
108 		pthread_testcancel();
109 		/* Force a read from the memory pointed to by ptr. This ensures
110 		 * the memory access actually happens and prevents the compiler
111 		 * from optimizing away this entire loop.
112 		 */
113 		FORCE_READ((uint64_t *)ptr);
114 	}
115 
116 	return NULL;
117 }
118 
119 /*
120  * Basic migration entry testing. One thread will move pages back and forth
121  * between nodes whilst other threads try and access them triggering the
122  * migration entry wait paths in the kernel.
123  */
124 TEST_F_TIMEOUT(migration, private_anon, 2*RUNTIME)
125 {
126 	uint64_t *ptr;
127 	int i;
128 
129 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
130 		SKIP(return, "Not enough threads or NUMA nodes available");
131 
132 	ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
133 		MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
134 	ASSERT_NE(ptr, MAP_FAILED);
135 
136 	memset(ptr, 0xde, TWOMEG);
137 	for (i = 0; i < self->nthreads - 1; i++)
138 		if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
139 			perror("Couldn't create thread");
140 
141 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
142 	for (i = 0; i < self->nthreads - 1; i++)
143 		ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
144 }
145 
146 /*
147  * Same as the previous test but with shared memory.
148  */
149 TEST_F_TIMEOUT(migration, shared_anon, 2*RUNTIME)
150 {
151 	pid_t pid;
152 	uint64_t *ptr;
153 	int i;
154 
155 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
156 		SKIP(return, "Not enough threads or NUMA nodes available");
157 
158 	ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
159 		MAP_SHARED | MAP_ANONYMOUS, -1, 0);
160 	ASSERT_NE(ptr, MAP_FAILED);
161 
162 	memset(ptr, 0xde, TWOMEG);
163 	for (i = 0; i < self->nthreads - 1; i++) {
164 		pid = fork();
165 		if (!pid) {
166 			prctl(PR_SET_PDEATHSIG, SIGHUP);
167 			/* Parent may have died before prctl so check now. */
168 			if (getppid() == 1)
169 				kill(getpid(), SIGHUP);
170 			access_mem(ptr);
171 		} else {
172 			self->pids[i] = pid;
173 		}
174 	}
175 
176 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
177 	for (i = 0; i < self->nthreads - 1; i++)
178 		ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
179 }
180 
181 /*
182  * Tests the pmd migration entry paths.
183  */
184 TEST_F_TIMEOUT(migration, private_anon_thp, 2*RUNTIME)
185 {
186 	uint64_t *ptr;
187 	int i;
188 
189 	if (!thp_is_enabled())
190 		SKIP(return, "Transparent Hugepages not available");
191 
192 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
193 		SKIP(return, "Not enough threads or NUMA nodes available");
194 
195 	ptr = mmap(NULL, 2*TWOMEG, PROT_READ | PROT_WRITE,
196 		MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
197 	ASSERT_NE(ptr, MAP_FAILED);
198 
199 	ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG);
200 	ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
201 	memset(ptr, 0xde, TWOMEG);
202 	for (i = 0; i < self->nthreads - 1; i++)
203 		if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
204 			perror("Couldn't create thread");
205 
206 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
207 	for (i = 0; i < self->nthreads - 1; i++)
208 		ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
209 }
210 
211 /*
212  * migration test with shared anon THP page
213  */
214 
215 TEST_F_TIMEOUT(migration, shared_anon_thp, 2*RUNTIME)
216 {
217 	pid_t pid;
218 	uint64_t *ptr;
219 	int i;
220 
221 	if (!thp_is_enabled())
222 		SKIP(return, "Transparent Hugepages not available");
223 
224 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
225 		SKIP(return, "Not enough threads or NUMA nodes available");
226 
227 	ptr = mmap(NULL, 2 * TWOMEG, PROT_READ | PROT_WRITE,
228 		MAP_SHARED | MAP_ANONYMOUS, -1, 0);
229 	ASSERT_NE(ptr, MAP_FAILED);
230 
231 	ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG);
232 	ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
233 
234 	memset(ptr, 0xde, TWOMEG);
235 	for (i = 0; i < self->nthreads - 1; i++) {
236 		pid = fork();
237 		if (!pid) {
238 			prctl(PR_SET_PDEATHSIG, SIGHUP);
239 			/* Parent may have died before prctl so check now. */
240 			if (getppid() == 1)
241 				kill(getpid(), SIGHUP);
242 			access_mem(ptr);
243 		} else {
244 			self->pids[i] = pid;
245 		}
246 	}
247 
248 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
249 	for (i = 0; i < self->nthreads - 1; i++)
250 		ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
251 }
252 
253 /*
254  * migration test with private anon hugetlb page
255  */
256 TEST_F_TIMEOUT(migration, private_anon_htlb, 2*RUNTIME)
257 {
258 	uint64_t *ptr;
259 	int i;
260 
261 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
262 		SKIP(return, "Not enough threads or NUMA nodes available");
263 
264 	ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
265 		MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
266 	ASSERT_NE(ptr, MAP_FAILED);
267 
268 	memset(ptr, 0xde, TWOMEG);
269 	for (i = 0; i < self->nthreads - 1; i++)
270 		if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
271 			perror("Couldn't create thread");
272 
273 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
274 	for (i = 0; i < self->nthreads - 1; i++)
275 		ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
276 }
277 
278 /*
279  * migration test with shared anon hugetlb page
280  */
281 TEST_F_TIMEOUT(migration, shared_anon_htlb, 2*RUNTIME)
282 {
283 	pid_t pid;
284 	uint64_t *ptr;
285 	int i;
286 
287 	if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
288 		SKIP(return, "Not enough threads or NUMA nodes available");
289 
290 	ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
291 		MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
292 	ASSERT_NE(ptr, MAP_FAILED);
293 
294 	memset(ptr, 0xde, TWOMEG);
295 	for (i = 0; i < self->nthreads - 1; i++) {
296 		pid = fork();
297 		if (!pid) {
298 			prctl(PR_SET_PDEATHSIG, SIGHUP);
299 			/* Parent may have died before prctl so check now. */
300 			if (getppid() == 1)
301 				kill(getpid(), SIGHUP);
302 			access_mem(ptr);
303 		} else {
304 			self->pids[i] = pid;
305 		}
306 	}
307 
308 	ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
309 	for (i = 0; i < self->nthreads - 1; i++)
310 		ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
311 }
312 
313 TEST_HARNESS_MAIN
314