1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2000
5  *    Author(s): Hartmut Penner (hp@de.ibm.com),
6  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
7  *
8  *  Derived from "include/asm-i386/uaccess.h"
9  */
10 #ifndef __S390_UACCESS_H
11 #define __S390_UACCESS_H
12 
13 /*
14  * User space memory access functions
15  */
16 #include <asm/asm-extable.h>
17 #include <asm/processor.h>
18 #include <asm/ctl_reg.h>
19 #include <asm/extable.h>
20 #include <asm/facility.h>
21 #include <asm-generic/access_ok.h>
22 
23 void debug_user_asce(int exit);
24 
25 unsigned long __must_check
26 raw_copy_from_user(void *to, const void __user *from, unsigned long n);
27 
28 unsigned long __must_check
29 raw_copy_to_user(void __user *to, const void *from, unsigned long n);
30 
31 #ifndef CONFIG_KASAN
32 #define INLINE_COPY_FROM_USER
33 #define INLINE_COPY_TO_USER
34 #endif
35 
36 unsigned long __must_check
37 _copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key);
38 
39 static __always_inline unsigned long __must_check
copy_from_user_key(void * to,const void __user * from,unsigned long n,unsigned long key)40 copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key)
41 {
42 	if (check_copy_size(to, n, false))
43 		n = _copy_from_user_key(to, from, n, key);
44 	return n;
45 }
46 
47 unsigned long __must_check
48 _copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key);
49 
50 static __always_inline unsigned long __must_check
copy_to_user_key(void __user * to,const void * from,unsigned long n,unsigned long key)51 copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key)
52 {
53 	if (check_copy_size(from, n, true))
54 		n = _copy_to_user_key(to, from, n, key);
55 	return n;
56 }
57 
58 union oac {
59 	unsigned int val;
60 	struct {
61 		struct {
62 			unsigned short key : 4;
63 			unsigned short	   : 4;
64 			unsigned short as  : 2;
65 			unsigned short	   : 4;
66 			unsigned short k   : 1;
67 			unsigned short a   : 1;
68 		} oac1;
69 		struct {
70 			unsigned short key : 4;
71 			unsigned short	   : 4;
72 			unsigned short as  : 2;
73 			unsigned short	   : 4;
74 			unsigned short k   : 1;
75 			unsigned short a   : 1;
76 		} oac2;
77 	};
78 };
79 
80 int __noreturn __put_user_bad(void);
81 
82 #define __put_user_asm(to, from, size)					\
83 ({									\
84 	union oac __oac_spec = {					\
85 		.oac1.as = PSW_BITS_AS_SECONDARY,			\
86 		.oac1.a = 1,						\
87 	};								\
88 	int __rc;							\
89 									\
90 	asm volatile(							\
91 		"	lr	0,%[spec]\n"				\
92 		"0:	mvcos	%[_to],%[_from],%[_size]\n"		\
93 		"1:	xr	%[rc],%[rc]\n"				\
94 		"2:\n"							\
95 		EX_TABLE_UA_STORE(0b, 2b, %[rc])			\
96 		EX_TABLE_UA_STORE(1b, 2b, %[rc])			\
97 		: [rc] "=&d" (__rc), [_to] "+Q" (*(to))			\
98 		: [_size] "d" (size), [_from] "Q" (*(from)),		\
99 		  [spec] "d" (__oac_spec.val)				\
100 		: "cc", "0");						\
101 	__rc;								\
102 })
103 
__put_user_fn(void * x,void __user * ptr,unsigned long size)104 static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
105 {
106 	int rc;
107 
108 	switch (size) {
109 	case 1:
110 		rc = __put_user_asm((unsigned char __user *)ptr,
111 				    (unsigned char *)x,
112 				    size);
113 		break;
114 	case 2:
115 		rc = __put_user_asm((unsigned short __user *)ptr,
116 				    (unsigned short *)x,
117 				    size);
118 		break;
119 	case 4:
120 		rc = __put_user_asm((unsigned int __user *)ptr,
121 				    (unsigned int *)x,
122 				    size);
123 		break;
124 	case 8:
125 		rc = __put_user_asm((unsigned long __user *)ptr,
126 				    (unsigned long *)x,
127 				    size);
128 		break;
129 	default:
130 		__put_user_bad();
131 		break;
132 	}
133 	return rc;
134 }
135 
136 int __noreturn __get_user_bad(void);
137 
138 #define __get_user_asm(to, from, size)					\
139 ({									\
140 	union oac __oac_spec = {					\
141 		.oac2.as = PSW_BITS_AS_SECONDARY,			\
142 		.oac2.a = 1,						\
143 	};								\
144 	int __rc;							\
145 									\
146 	asm volatile(							\
147 		"	lr	0,%[spec]\n"				\
148 		"0:	mvcos	0(%[_to]),%[_from],%[_size]\n"		\
149 		"1:	xr	%[rc],%[rc]\n"				\
150 		"2:\n"							\
151 		EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize])	\
152 		EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize])	\
153 		: [rc] "=&d" (__rc), "=Q" (*(to))			\
154 		: [_size] "d" (size), [_from] "Q" (*(from)),		\
155 		  [spec] "d" (__oac_spec.val), [_to] "a" (to),		\
156 		  [_ksize] "K" (size)					\
157 		: "cc", "0");						\
158 	__rc;								\
159 })
160 
__get_user_fn(void * x,const void __user * ptr,unsigned long size)161 static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
162 {
163 	int rc;
164 
165 	switch (size) {
166 	case 1:
167 		rc = __get_user_asm((unsigned char *)x,
168 				    (unsigned char __user *)ptr,
169 				    size);
170 		break;
171 	case 2:
172 		rc = __get_user_asm((unsigned short *)x,
173 				    (unsigned short __user *)ptr,
174 				    size);
175 		break;
176 	case 4:
177 		rc = __get_user_asm((unsigned int *)x,
178 				    (unsigned int __user *)ptr,
179 				    size);
180 		break;
181 	case 8:
182 		rc = __get_user_asm((unsigned long *)x,
183 				    (unsigned long __user *)ptr,
184 				    size);
185 		break;
186 	default:
187 		__get_user_bad();
188 		break;
189 	}
190 	return rc;
191 }
192 
193 /*
194  * These are the main single-value transfer routines.  They automatically
195  * use the right size if we just have the right pointer type.
196  */
197 #define __put_user(x, ptr)						\
198 ({									\
199 	__typeof__(*(ptr)) __x = (x);					\
200 	int __pu_err = -EFAULT;						\
201 									\
202 	__chk_user_ptr(ptr);						\
203 	switch (sizeof(*(ptr))) {					\
204 	case 1:								\
205 	case 2:								\
206 	case 4:								\
207 	case 8:								\
208 		__pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr)));	\
209 		break;							\
210 	default:							\
211 		__put_user_bad();					\
212 		break;							\
213 	}								\
214 	__builtin_expect(__pu_err, 0);					\
215 })
216 
217 #define put_user(x, ptr)						\
218 ({									\
219 	might_fault();							\
220 	__put_user(x, ptr);						\
221 })
222 
223 #define __get_user(x, ptr)						\
224 ({									\
225 	int __gu_err = -EFAULT;						\
226 									\
227 	__chk_user_ptr(ptr);						\
228 	switch (sizeof(*(ptr))) {					\
229 	case 1: {							\
230 		unsigned char __x;					\
231 									\
232 		__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr)));	\
233 		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
234 		break;							\
235 	};								\
236 	case 2: {							\
237 		unsigned short __x;					\
238 									\
239 		__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr)));	\
240 		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
241 		break;							\
242 	};								\
243 	case 4: {							\
244 		unsigned int __x;					\
245 									\
246 		__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr)));	\
247 		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
248 		break;							\
249 	};								\
250 	case 8: {							\
251 		unsigned long __x;					\
252 									\
253 		__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr)));	\
254 		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
255 		break;							\
256 	};								\
257 	default:							\
258 		__get_user_bad();					\
259 		break;							\
260 	}								\
261 	__builtin_expect(__gu_err, 0);					\
262 })
263 
264 #define get_user(x, ptr)						\
265 ({									\
266 	might_fault();							\
267 	__get_user(x, ptr);						\
268 })
269 
270 /*
271  * Copy a null terminated string from userspace.
272  */
273 long __must_check strncpy_from_user(char *dst, const char __user *src, long count);
274 
275 long __must_check strnlen_user(const char __user *src, long count);
276 
277 /*
278  * Zero Userspace
279  */
280 unsigned long __must_check __clear_user(void __user *to, unsigned long size);
281 
clear_user(void __user * to,unsigned long n)282 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
283 {
284 	might_fault();
285 	return __clear_user(to, n);
286 }
287 
288 void *s390_kernel_write(void *dst, const void *src, size_t size);
289 
290 int __noreturn __put_kernel_bad(void);
291 
292 #define __put_kernel_asm(val, to, insn)					\
293 ({									\
294 	int __rc;							\
295 									\
296 	asm volatile(							\
297 		"0:   " insn "  %[_val],%[_to]\n"			\
298 		"1:	xr	%[rc],%[rc]\n"				\
299 		"2:\n"							\
300 		EX_TABLE_UA_STORE(0b, 2b, %[rc])			\
301 		EX_TABLE_UA_STORE(1b, 2b, %[rc])			\
302 		: [rc] "=d" (__rc), [_to] "+Q" (*(to))			\
303 		: [_val] "d" (val)					\
304 		: "cc");						\
305 	__rc;								\
306 })
307 
308 #define __put_kernel_nofault(dst, src, type, err_label)			\
309 do {									\
310 	unsigned long __x = (unsigned long)(*((type *)(src)));		\
311 	int __pk_err;							\
312 									\
313 	switch (sizeof(type)) {						\
314 	case 1:								\
315 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
316 		break;							\
317 	case 2:								\
318 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
319 		break;							\
320 	case 4:								\
321 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "st");	\
322 		break;							\
323 	case 8:								\
324 		__pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
325 		break;							\
326 	default:							\
327 		__pk_err = __put_kernel_bad();				\
328 		break;							\
329 	}								\
330 	if (unlikely(__pk_err))						\
331 		goto err_label;						\
332 } while (0)
333 
334 int __noreturn __get_kernel_bad(void);
335 
336 #define __get_kernel_asm(val, from, insn)				\
337 ({									\
338 	int __rc;							\
339 									\
340 	asm volatile(							\
341 		"0:   " insn "  %[_val],%[_from]\n"			\
342 		"1:	xr	%[rc],%[rc]\n"				\
343 		"2:\n"							\
344 		EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val])		\
345 		EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val])		\
346 		: [rc] "=d" (__rc), [_val] "=d" (val)			\
347 		: [_from] "Q" (*(from))					\
348 		: "cc");						\
349 	__rc;								\
350 })
351 
352 #define __get_kernel_nofault(dst, src, type, err_label)			\
353 do {									\
354 	int __gk_err;							\
355 									\
356 	switch (sizeof(type)) {						\
357 	case 1: {							\
358 		unsigned char __x;					\
359 									\
360 		__gk_err = __get_kernel_asm(__x, (type *)(src), "ic");	\
361 		*((type *)(dst)) = (type)__x;				\
362 		break;							\
363 	};								\
364 	case 2: {							\
365 		unsigned short __x;					\
366 									\
367 		__gk_err = __get_kernel_asm(__x, (type *)(src), "lh");	\
368 		*((type *)(dst)) = (type)__x;				\
369 		break;							\
370 	};								\
371 	case 4: {							\
372 		unsigned int __x;					\
373 									\
374 		__gk_err = __get_kernel_asm(__x, (type *)(src), "l");	\
375 		*((type *)(dst)) = (type)__x;				\
376 		break;							\
377 	};								\
378 	case 8: {							\
379 		unsigned long __x;					\
380 									\
381 		__gk_err = __get_kernel_asm(__x, (type *)(src), "lg");	\
382 		*((type *)(dst)) = (type)__x;				\
383 		break;							\
384 	};								\
385 	default:							\
386 		__gk_err = __get_kernel_bad();				\
387 		break;							\
388 	}								\
389 	if (unlikely(__gk_err))						\
390 		goto err_label;						\
391 } while (0)
392 
393 void __cmpxchg_user_key_called_with_bad_pointer(void);
394 
395 #define CMPXCHG_USER_KEY_MAX_LOOPS 128
396 
__cmpxchg_user_key(unsigned long address,void * uval,__uint128_t old,__uint128_t new,unsigned long key,int size)397 static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval,
398 					      __uint128_t old, __uint128_t new,
399 					      unsigned long key, int size)
400 {
401 	int rc = 0;
402 
403 	switch (size) {
404 	case 1: {
405 		unsigned int prev, shift, mask, _old, _new;
406 		unsigned long count;
407 
408 		shift = (3 ^ (address & 3)) << 3;
409 		address ^= address & 3;
410 		_old = ((unsigned int)old & 0xff) << shift;
411 		_new = ((unsigned int)new & 0xff) << shift;
412 		mask = ~(0xff << shift);
413 		asm volatile(
414 			"	spka	0(%[key])\n"
415 			"	sacf	256\n"
416 			"	llill	%[count],%[max_loops]\n"
417 			"0:	l	%[prev],%[address]\n"
418 			"1:	nr	%[prev],%[mask]\n"
419 			"	xilf	%[mask],0xffffffff\n"
420 			"	or	%[new],%[prev]\n"
421 			"	or	%[prev],%[tmp]\n"
422 			"2:	lr	%[tmp],%[prev]\n"
423 			"3:	cs	%[prev],%[new],%[address]\n"
424 			"4:	jnl	5f\n"
425 			"	xr	%[tmp],%[prev]\n"
426 			"	xr	%[new],%[tmp]\n"
427 			"	nr	%[tmp],%[mask]\n"
428 			"	jnz	5f\n"
429 			"	brct	%[count],2b\n"
430 			"5:	sacf	768\n"
431 			"	spka	%[default_key]\n"
432 			EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
433 			EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
434 			EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
435 			EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
436 			: [rc] "+&d" (rc),
437 			  [prev] "=&d" (prev),
438 			  [address] "+Q" (*(int *)address),
439 			  [tmp] "+&d" (_old),
440 			  [new] "+&d" (_new),
441 			  [mask] "+&d" (mask),
442 			  [count] "=a" (count)
443 			: [key] "%[count]" (key << 4),
444 			  [default_key] "J" (PAGE_DEFAULT_KEY),
445 			  [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
446 			: "memory", "cc");
447 		*(unsigned char *)uval = prev >> shift;
448 		if (!count)
449 			rc = -EAGAIN;
450 		return rc;
451 	}
452 	case 2: {
453 		unsigned int prev, shift, mask, _old, _new;
454 		unsigned long count;
455 
456 		shift = (2 ^ (address & 2)) << 3;
457 		address ^= address & 2;
458 		_old = ((unsigned int)old & 0xffff) << shift;
459 		_new = ((unsigned int)new & 0xffff) << shift;
460 		mask = ~(0xffff << shift);
461 		asm volatile(
462 			"	spka	0(%[key])\n"
463 			"	sacf	256\n"
464 			"	llill	%[count],%[max_loops]\n"
465 			"0:	l	%[prev],%[address]\n"
466 			"1:	nr	%[prev],%[mask]\n"
467 			"	xilf	%[mask],0xffffffff\n"
468 			"	or	%[new],%[prev]\n"
469 			"	or	%[prev],%[tmp]\n"
470 			"2:	lr	%[tmp],%[prev]\n"
471 			"3:	cs	%[prev],%[new],%[address]\n"
472 			"4:	jnl	5f\n"
473 			"	xr	%[tmp],%[prev]\n"
474 			"	xr	%[new],%[tmp]\n"
475 			"	nr	%[tmp],%[mask]\n"
476 			"	jnz	5f\n"
477 			"	brct	%[count],2b\n"
478 			"5:	sacf	768\n"
479 			"	spka	%[default_key]\n"
480 			EX_TABLE_UA_LOAD_REG(0b, 5b, %[rc], %[prev])
481 			EX_TABLE_UA_LOAD_REG(1b, 5b, %[rc], %[prev])
482 			EX_TABLE_UA_LOAD_REG(3b, 5b, %[rc], %[prev])
483 			EX_TABLE_UA_LOAD_REG(4b, 5b, %[rc], %[prev])
484 			: [rc] "+&d" (rc),
485 			  [prev] "=&d" (prev),
486 			  [address] "+Q" (*(int *)address),
487 			  [tmp] "+&d" (_old),
488 			  [new] "+&d" (_new),
489 			  [mask] "+&d" (mask),
490 			  [count] "=a" (count)
491 			: [key] "%[count]" (key << 4),
492 			  [default_key] "J" (PAGE_DEFAULT_KEY),
493 			  [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS)
494 			: "memory", "cc");
495 		*(unsigned short *)uval = prev >> shift;
496 		if (!count)
497 			rc = -EAGAIN;
498 		return rc;
499 	}
500 	case 4:	{
501 		unsigned int prev = old;
502 
503 		asm volatile(
504 			"	spka	0(%[key])\n"
505 			"	sacf	256\n"
506 			"0:	cs	%[prev],%[new],%[address]\n"
507 			"1:	sacf	768\n"
508 			"	spka	%[default_key]\n"
509 			EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
510 			EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
511 			: [rc] "+&d" (rc),
512 			  [prev] "+&d" (prev),
513 			  [address] "+Q" (*(int *)address)
514 			: [new] "d" ((unsigned int)new),
515 			  [key] "a" (key << 4),
516 			  [default_key] "J" (PAGE_DEFAULT_KEY)
517 			: "memory", "cc");
518 		*(unsigned int *)uval = prev;
519 		return rc;
520 	}
521 	case 8: {
522 		unsigned long prev = old;
523 
524 		asm volatile(
525 			"	spka	0(%[key])\n"
526 			"	sacf	256\n"
527 			"0:	csg	%[prev],%[new],%[address]\n"
528 			"1:	sacf	768\n"
529 			"	spka	%[default_key]\n"
530 			EX_TABLE_UA_LOAD_REG(0b, 1b, %[rc], %[prev])
531 			EX_TABLE_UA_LOAD_REG(1b, 1b, %[rc], %[prev])
532 			: [rc] "+&d" (rc),
533 			  [prev] "+&d" (prev),
534 			  [address] "+QS" (*(long *)address)
535 			: [new] "d" ((unsigned long)new),
536 			  [key] "a" (key << 4),
537 			  [default_key] "J" (PAGE_DEFAULT_KEY)
538 			: "memory", "cc");
539 		*(unsigned long *)uval = prev;
540 		return rc;
541 	}
542 	case 16: {
543 		__uint128_t prev = old;
544 
545 		asm volatile(
546 			"	spka	0(%[key])\n"
547 			"	sacf	256\n"
548 			"0:	cdsg	%[prev],%[new],%[address]\n"
549 			"1:	sacf	768\n"
550 			"	spka	%[default_key]\n"
551 			EX_TABLE_UA_LOAD_REGPAIR(0b, 1b, %[rc], %[prev])
552 			EX_TABLE_UA_LOAD_REGPAIR(1b, 1b, %[rc], %[prev])
553 			: [rc] "+&d" (rc),
554 			  [prev] "+&d" (prev),
555 			  [address] "+QS" (*(__int128_t *)address)
556 			: [new] "d" (new),
557 			  [key] "a" (key << 4),
558 			  [default_key] "J" (PAGE_DEFAULT_KEY)
559 			: "memory", "cc");
560 		*(__uint128_t *)uval = prev;
561 		return rc;
562 	}
563 	}
564 	__cmpxchg_user_key_called_with_bad_pointer();
565 	return rc;
566 }
567 
568 /**
569  * cmpxchg_user_key() - cmpxchg with user space target, honoring storage keys
570  * @ptr: User space address of value to compare to @old and exchange with
571  *	 @new. Must be aligned to sizeof(*@ptr).
572  * @uval: Address where the old value of *@ptr is written to.
573  * @old: Old value. Compared to the content pointed to by @ptr in order to
574  *	 determine if the exchange occurs. The old value read from *@ptr is
575  *	 written to *@uval.
576  * @new: New value to place at *@ptr.
577  * @key: Access key to use for checking storage key protection.
578  *
579  * Perform a cmpxchg on a user space target, honoring storage key protection.
580  * @key alone determines how key checking is performed, neither
581  * storage-protection-override nor fetch-protection-override apply.
582  * The caller must compare *@uval and @old to determine if values have been
583  * exchanged. In case of an exception *@uval is set to zero.
584  *
585  * Return:     0: cmpxchg executed
586  *	       -EFAULT: an exception happened when trying to access *@ptr
587  *	       -EAGAIN: maxed out number of retries (byte and short only)
588  */
589 #define cmpxchg_user_key(ptr, uval, old, new, key)			\
590 ({									\
591 	__typeof__(ptr) __ptr = (ptr);					\
592 	__typeof__(uval) __uval = (uval);				\
593 									\
594 	BUILD_BUG_ON(sizeof(*(__ptr)) != sizeof(*(__uval)));		\
595 	might_fault();							\
596 	__chk_user_ptr(__ptr);						\
597 	__cmpxchg_user_key((unsigned long)(__ptr), (void *)(__uval),	\
598 			   (old), (new), (key), sizeof(*(__ptr)));	\
599 })
600 
601 #endif /* __S390_UACCESS_H */
602