1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* thread_info.h: common low-level thread information accessors
3  *
4  * Copyright (C) 2002  David Howells (dhowells@redhat.com)
5  * - Incorporating suggestions made by Linus Torvalds
6  */
7 
8 #ifndef _LINUX_THREAD_INFO_H
9 #define _LINUX_THREAD_INFO_H
10 
11 #include <linux/types.h>
12 #include <linux/limits.h>
13 #include <linux/bug.h>
14 #include <linux/restart_block.h>
15 #include <linux/errno.h>
16 
17 #ifdef CONFIG_THREAD_INFO_IN_TASK
18 /*
19  * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
20  * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
21  * including <asm/current.h> can cause a circular dependency on some platforms.
22  */
23 #include <asm/current.h>
24 #define current_thread_info() ((struct thread_info *)current)
25 #endif
26 
27 #include <linux/bitops.h>
28 
29 /*
30  * For per-arch arch_within_stack_frames() implementations, defined in
31  * asm/thread_info.h.
32  */
33 enum {
34 	BAD_STACK = -1,
35 	NOT_STACK = 0,
36 	GOOD_FRAME,
37 	GOOD_STACK,
38 };
39 
40 #ifdef CONFIG_GENERIC_ENTRY
41 enum syscall_work_bit {
42 	SYSCALL_WORK_BIT_SECCOMP,
43 	SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT,
44 	SYSCALL_WORK_BIT_SYSCALL_TRACE,
45 	SYSCALL_WORK_BIT_SYSCALL_EMU,
46 	SYSCALL_WORK_BIT_SYSCALL_AUDIT,
47 	SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH,
48 	SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP,
49 };
50 
51 #define SYSCALL_WORK_SECCOMP		BIT(SYSCALL_WORK_BIT_SECCOMP)
52 #define SYSCALL_WORK_SYSCALL_TRACEPOINT	BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
53 #define SYSCALL_WORK_SYSCALL_TRACE	BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
54 #define SYSCALL_WORK_SYSCALL_EMU	BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
55 #define SYSCALL_WORK_SYSCALL_AUDIT	BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
56 #define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
57 #define SYSCALL_WORK_SYSCALL_EXIT_TRAP	BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP)
58 #endif
59 
60 #include <asm/thread_info.h>
61 
62 #ifdef __KERNEL__
63 
64 #ifndef arch_set_restart_data
65 #define arch_set_restart_data(restart) do { } while (0)
66 #endif
67 
set_restart_fn(struct restart_block * restart,long (* fn)(struct restart_block *))68 static inline long set_restart_fn(struct restart_block *restart,
69 					long (*fn)(struct restart_block *))
70 {
71 	restart->fn = fn;
72 	arch_set_restart_data(restart);
73 	return -ERESTART_RESTARTBLOCK;
74 }
75 
76 #ifndef THREAD_ALIGN
77 #define THREAD_ALIGN	THREAD_SIZE
78 #endif
79 
80 #define THREADINFO_GFP		(GFP_KERNEL_ACCOUNT | __GFP_ZERO)
81 
82 /*
83  * flag set/clear/test wrappers
84  * - pass TIF_xxxx constants to these functions
85  */
86 
set_ti_thread_flag(struct thread_info * ti,int flag)87 static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
88 {
89 	set_bit(flag, (unsigned long *)&ti->flags);
90 }
91 
clear_ti_thread_flag(struct thread_info * ti,int flag)92 static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
93 {
94 	clear_bit(flag, (unsigned long *)&ti->flags);
95 }
96 
update_ti_thread_flag(struct thread_info * ti,int flag,bool value)97 static inline void update_ti_thread_flag(struct thread_info *ti, int flag,
98 					 bool value)
99 {
100 	if (value)
101 		set_ti_thread_flag(ti, flag);
102 	else
103 		clear_ti_thread_flag(ti, flag);
104 }
105 
test_and_set_ti_thread_flag(struct thread_info * ti,int flag)106 static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
107 {
108 	return test_and_set_bit(flag, (unsigned long *)&ti->flags);
109 }
110 
test_and_clear_ti_thread_flag(struct thread_info * ti,int flag)111 static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
112 {
113 	return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
114 }
115 
test_ti_thread_flag(struct thread_info * ti,int flag)116 static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
117 {
118 	return test_bit(flag, (unsigned long *)&ti->flags);
119 }
120 
121 /*
122  * This may be used in noinstr code, and needs to be __always_inline to prevent
123  * inadvertent instrumentation.
124  */
read_ti_thread_flags(struct thread_info * ti)125 static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti)
126 {
127 	return READ_ONCE(ti->flags);
128 }
129 
130 #define set_thread_flag(flag) \
131 	set_ti_thread_flag(current_thread_info(), flag)
132 #define clear_thread_flag(flag) \
133 	clear_ti_thread_flag(current_thread_info(), flag)
134 #define update_thread_flag(flag, value) \
135 	update_ti_thread_flag(current_thread_info(), flag, value)
136 #define test_and_set_thread_flag(flag) \
137 	test_and_set_ti_thread_flag(current_thread_info(), flag)
138 #define test_and_clear_thread_flag(flag) \
139 	test_and_clear_ti_thread_flag(current_thread_info(), flag)
140 #define test_thread_flag(flag) \
141 	test_ti_thread_flag(current_thread_info(), flag)
142 #define read_thread_flags() \
143 	read_ti_thread_flags(current_thread_info())
144 
145 #define read_task_thread_flags(t) \
146 	read_ti_thread_flags(task_thread_info(t))
147 
148 #ifdef CONFIG_GENERIC_ENTRY
149 #define set_syscall_work(fl) \
150 	set_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
151 #define test_syscall_work(fl) \
152 	test_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
153 #define clear_syscall_work(fl) \
154 	clear_bit(SYSCALL_WORK_BIT_##fl, &current_thread_info()->syscall_work)
155 
156 #define set_task_syscall_work(t, fl) \
157 	set_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
158 #define test_task_syscall_work(t, fl) \
159 	test_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
160 #define clear_task_syscall_work(t, fl) \
161 	clear_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
162 
163 #else /* CONFIG_GENERIC_ENTRY */
164 
165 #define set_syscall_work(fl)						\
166 	set_ti_thread_flag(current_thread_info(), TIF_##fl)
167 #define test_syscall_work(fl) \
168 	test_ti_thread_flag(current_thread_info(), TIF_##fl)
169 #define clear_syscall_work(fl) \
170 	clear_ti_thread_flag(current_thread_info(), TIF_##fl)
171 
172 #define set_task_syscall_work(t, fl) \
173 	set_ti_thread_flag(task_thread_info(t), TIF_##fl)
174 #define test_task_syscall_work(t, fl) \
175 	test_ti_thread_flag(task_thread_info(t), TIF_##fl)
176 #define clear_task_syscall_work(t, fl) \
177 	clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
178 #endif /* !CONFIG_GENERIC_ENTRY */
179 
180 #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
181 
tif_need_resched(void)182 static __always_inline bool tif_need_resched(void)
183 {
184 	return arch_test_bit(TIF_NEED_RESCHED,
185 			     (unsigned long *)(&current_thread_info()->flags));
186 }
187 
188 #else
189 
tif_need_resched(void)190 static __always_inline bool tif_need_resched(void)
191 {
192 	return test_bit(TIF_NEED_RESCHED,
193 			(unsigned long *)(&current_thread_info()->flags));
194 }
195 
196 #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
197 
198 #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
arch_within_stack_frames(const void * const stack,const void * const stackend,const void * obj,unsigned long len)199 static inline int arch_within_stack_frames(const void * const stack,
200 					   const void * const stackend,
201 					   const void *obj, unsigned long len)
202 {
203 	return 0;
204 }
205 #endif
206 
207 #ifdef CONFIG_HARDENED_USERCOPY
208 extern void __check_object_size(const void *ptr, unsigned long n,
209 					bool to_user);
210 
check_object_size(const void * ptr,unsigned long n,bool to_user)211 static __always_inline void check_object_size(const void *ptr, unsigned long n,
212 					      bool to_user)
213 {
214 	if (!__builtin_constant_p(n))
215 		__check_object_size(ptr, n, to_user);
216 }
217 #else
check_object_size(const void * ptr,unsigned long n,bool to_user)218 static inline void check_object_size(const void *ptr, unsigned long n,
219 				     bool to_user)
220 { }
221 #endif /* CONFIG_HARDENED_USERCOPY */
222 
223 extern void __compiletime_error("copy source size is too small")
224 __bad_copy_from(void);
225 extern void __compiletime_error("copy destination size is too small")
226 __bad_copy_to(void);
227 
228 void __copy_overflow(int size, unsigned long count);
229 
copy_overflow(int size,unsigned long count)230 static inline void copy_overflow(int size, unsigned long count)
231 {
232 	if (IS_ENABLED(CONFIG_BUG))
233 		__copy_overflow(size, count);
234 }
235 
236 static __always_inline __must_check bool
check_copy_size(const void * addr,size_t bytes,bool is_source)237 check_copy_size(const void *addr, size_t bytes, bool is_source)
238 {
239 	int sz = __builtin_object_size(addr, 0);
240 	if (unlikely(sz >= 0 && sz < bytes)) {
241 		if (!__builtin_constant_p(bytes))
242 			copy_overflow(sz, bytes);
243 		else if (is_source)
244 			__bad_copy_from();
245 		else
246 			__bad_copy_to();
247 		return false;
248 	}
249 	if (WARN_ON_ONCE(bytes > INT_MAX))
250 		return false;
251 	check_object_size(addr, bytes, is_source);
252 	return true;
253 }
254 
255 #ifndef arch_setup_new_exec
arch_setup_new_exec(void)256 static inline void arch_setup_new_exec(void) { }
257 #endif
258 
259 #endif	/* __KERNEL__ */
260 
261 #endif /* _LINUX_THREAD_INFO_H */
262