1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic waiting primitives.
4  *
5  * (C) 2004 Nadia Yvette Chambers, Oracle
6  */
7 
__init_waitqueue_head(struct wait_queue_head * wq_head,const char * name,struct lock_class_key * key)8 void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
9 {
10 	spin_lock_init(&wq_head->lock);
11 	lockdep_set_class_and_name(&wq_head->lock, key, name);
12 	INIT_LIST_HEAD(&wq_head->head);
13 }
14 
15 EXPORT_SYMBOL(__init_waitqueue_head);
16 
add_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)17 void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
18 {
19 	unsigned long flags;
20 
21 	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
22 	spin_lock_irqsave(&wq_head->lock, flags);
23 	__add_wait_queue(wq_head, wq_entry);
24 	spin_unlock_irqrestore(&wq_head->lock, flags);
25 }
26 EXPORT_SYMBOL(add_wait_queue);
27 
add_wait_queue_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)28 void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
29 {
30 	unsigned long flags;
31 
32 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
33 	spin_lock_irqsave(&wq_head->lock, flags);
34 	__add_wait_queue_entry_tail(wq_head, wq_entry);
35 	spin_unlock_irqrestore(&wq_head->lock, flags);
36 }
37 EXPORT_SYMBOL(add_wait_queue_exclusive);
38 
add_wait_queue_priority(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)39 void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
40 {
41 	unsigned long flags;
42 
43 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY;
44 	spin_lock_irqsave(&wq_head->lock, flags);
45 	__add_wait_queue(wq_head, wq_entry);
46 	spin_unlock_irqrestore(&wq_head->lock, flags);
47 }
48 EXPORT_SYMBOL_GPL(add_wait_queue_priority);
49 
remove_wait_queue(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)50 void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
51 {
52 	unsigned long flags;
53 
54 	spin_lock_irqsave(&wq_head->lock, flags);
55 	__remove_wait_queue(wq_head, wq_entry);
56 	spin_unlock_irqrestore(&wq_head->lock, flags);
57 }
58 EXPORT_SYMBOL(remove_wait_queue);
59 
60 /*
61  * Scan threshold to break wait queue walk.
62  * This allows a waker to take a break from holding the
63  * wait queue lock during the wait queue walk.
64  */
65 #define WAITQUEUE_WALK_BREAK_CNT 64
66 
67 /*
68  * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
69  * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
70  * number) then we wake that number of exclusive tasks, and potentially all
71  * the non-exclusive tasks. Normally, exclusive tasks will be at the end of
72  * the list and any non-exclusive tasks will be woken first. A priority task
73  * may be at the head of the list, and can consume the event without any other
74  * tasks being woken.
75  *
76  * There are circumstances in which we can try to wake a task which has already
77  * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
78  * zero in this (rare) case, and we handle it by continuing to scan the queue.
79  */
__wake_up_common(struct wait_queue_head * wq_head,unsigned int mode,int nr_exclusive,int wake_flags,void * key,wait_queue_entry_t * bookmark)80 static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
81 			int nr_exclusive, int wake_flags, void *key,
82 			wait_queue_entry_t *bookmark)
83 {
84 	wait_queue_entry_t *curr, *next;
85 	int cnt = 0;
86 
87 	lockdep_assert_held(&wq_head->lock);
88 
89 	if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
90 		curr = list_next_entry(bookmark, entry);
91 
92 		list_del(&bookmark->entry);
93 		bookmark->flags = 0;
94 	} else
95 		curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
96 
97 	if (&curr->entry == &wq_head->head)
98 		return nr_exclusive;
99 
100 	list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
101 		unsigned flags = curr->flags;
102 		int ret;
103 
104 		if (flags & WQ_FLAG_BOOKMARK)
105 			continue;
106 
107 		ret = curr->func(curr, mode, wake_flags, key);
108 		if (ret < 0)
109 			break;
110 		if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
111 			break;
112 
113 		if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
114 				(&next->entry != &wq_head->head)) {
115 			bookmark->flags = WQ_FLAG_BOOKMARK;
116 			list_add_tail(&bookmark->entry, &next->entry);
117 			break;
118 		}
119 	}
120 
121 	return nr_exclusive;
122 }
123 
__wake_up_common_lock(struct wait_queue_head * wq_head,unsigned int mode,int nr_exclusive,int wake_flags,void * key)124 static int __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
125 			int nr_exclusive, int wake_flags, void *key)
126 {
127 	unsigned long flags;
128 	wait_queue_entry_t bookmark;
129 	int remaining = nr_exclusive;
130 
131 	bookmark.flags = 0;
132 	bookmark.private = NULL;
133 	bookmark.func = NULL;
134 	INIT_LIST_HEAD(&bookmark.entry);
135 
136 	do {
137 		spin_lock_irqsave(&wq_head->lock, flags);
138 		remaining = __wake_up_common(wq_head, mode, remaining,
139 						wake_flags, key, &bookmark);
140 		spin_unlock_irqrestore(&wq_head->lock, flags);
141 	} while (bookmark.flags & WQ_FLAG_BOOKMARK);
142 
143 	return nr_exclusive - remaining;
144 }
145 
146 /**
147  * __wake_up - wake up threads blocked on a waitqueue.
148  * @wq_head: the waitqueue
149  * @mode: which threads
150  * @nr_exclusive: how many wake-one or wake-many threads to wake up
151  * @key: is directly passed to the wakeup function
152  *
153  * If this function wakes up a task, it executes a full memory barrier
154  * before accessing the task state.  Returns the number of exclusive
155  * tasks that were awaken.
156  */
__wake_up(struct wait_queue_head * wq_head,unsigned int mode,int nr_exclusive,void * key)157 int __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
158 	      int nr_exclusive, void *key)
159 {
160 	return __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
161 }
162 EXPORT_SYMBOL(__wake_up);
163 
164 /*
165  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
166  */
__wake_up_locked(struct wait_queue_head * wq_head,unsigned int mode,int nr)167 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
168 {
169 	__wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
170 }
171 EXPORT_SYMBOL_GPL(__wake_up_locked);
172 
__wake_up_locked_key(struct wait_queue_head * wq_head,unsigned int mode,void * key)173 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
174 {
175 	__wake_up_common(wq_head, mode, 1, 0, key, NULL);
176 }
177 EXPORT_SYMBOL_GPL(__wake_up_locked_key);
178 
__wake_up_locked_key_bookmark(struct wait_queue_head * wq_head,unsigned int mode,void * key,wait_queue_entry_t * bookmark)179 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
180 		unsigned int mode, void *key, wait_queue_entry_t *bookmark)
181 {
182 	__wake_up_common(wq_head, mode, 1, 0, key, bookmark);
183 }
184 EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
185 
186 /**
187  * __wake_up_sync_key - wake up threads blocked on a waitqueue.
188  * @wq_head: the waitqueue
189  * @mode: which threads
190  * @key: opaque value to be passed to wakeup targets
191  *
192  * The sync wakeup differs that the waker knows that it will schedule
193  * away soon, so while the target thread will be woken up, it will not
194  * be migrated to another CPU - ie. the two threads are 'synchronized'
195  * with each other. This can prevent needless bouncing between CPUs.
196  *
197  * On UP it can prevent extra preemption.
198  *
199  * If this function wakes up a task, it executes a full memory barrier before
200  * accessing the task state.
201  */
__wake_up_sync_key(struct wait_queue_head * wq_head,unsigned int mode,void * key)202 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
203 			void *key)
204 {
205 	if (unlikely(!wq_head))
206 		return;
207 
208 	__wake_up_common_lock(wq_head, mode, 1, WF_SYNC, key);
209 }
210 EXPORT_SYMBOL_GPL(__wake_up_sync_key);
211 
212 /**
213  * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
214  * @wq_head: the waitqueue
215  * @mode: which threads
216  * @key: opaque value to be passed to wakeup targets
217  *
218  * The sync wakeup differs in that the waker knows that it will schedule
219  * away soon, so while the target thread will be woken up, it will not
220  * be migrated to another CPU - ie. the two threads are 'synchronized'
221  * with each other. This can prevent needless bouncing between CPUs.
222  *
223  * On UP it can prevent extra preemption.
224  *
225  * If this function wakes up a task, it executes a full memory barrier before
226  * accessing the task state.
227  */
__wake_up_locked_sync_key(struct wait_queue_head * wq_head,unsigned int mode,void * key)228 void __wake_up_locked_sync_key(struct wait_queue_head *wq_head,
229 			       unsigned int mode, void *key)
230 {
231         __wake_up_common(wq_head, mode, 1, WF_SYNC, key, NULL);
232 }
233 EXPORT_SYMBOL_GPL(__wake_up_locked_sync_key);
234 
235 /*
236  * __wake_up_sync - see __wake_up_sync_key()
237  */
__wake_up_sync(struct wait_queue_head * wq_head,unsigned int mode)238 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
239 {
240 	__wake_up_sync_key(wq_head, mode, NULL);
241 }
242 EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
243 
__wake_up_pollfree(struct wait_queue_head * wq_head)244 void __wake_up_pollfree(struct wait_queue_head *wq_head)
245 {
246 	__wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
247 	/* POLLFREE must have cleared the queue. */
248 	WARN_ON_ONCE(waitqueue_active(wq_head));
249 }
250 
251 /*
252  * Note: we use "set_current_state()" _after_ the wait-queue add,
253  * because we need a memory barrier there on SMP, so that any
254  * wake-function that tests for the wait-queue being active
255  * will be guaranteed to see waitqueue addition _or_ subsequent
256  * tests in this thread will see the wakeup having taken place.
257  *
258  * The spin_unlock() itself is semi-permeable and only protects
259  * one way (it only protects stuff inside the critical region and
260  * stops them from bleeding out - it would still allow subsequent
261  * loads to move into the critical region).
262  */
263 void
prepare_to_wait(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry,int state)264 prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
265 {
266 	unsigned long flags;
267 
268 	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
269 	spin_lock_irqsave(&wq_head->lock, flags);
270 	if (list_empty(&wq_entry->entry))
271 		__add_wait_queue(wq_head, wq_entry);
272 	set_current_state(state);
273 	spin_unlock_irqrestore(&wq_head->lock, flags);
274 }
275 EXPORT_SYMBOL(prepare_to_wait);
276 
277 /* Returns true if we are the first waiter in the queue, false otherwise. */
278 bool
prepare_to_wait_exclusive(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry,int state)279 prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
280 {
281 	unsigned long flags;
282 	bool was_empty = false;
283 
284 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
285 	spin_lock_irqsave(&wq_head->lock, flags);
286 	if (list_empty(&wq_entry->entry)) {
287 		was_empty = list_empty(&wq_head->head);
288 		__add_wait_queue_entry_tail(wq_head, wq_entry);
289 	}
290 	set_current_state(state);
291 	spin_unlock_irqrestore(&wq_head->lock, flags);
292 	return was_empty;
293 }
294 EXPORT_SYMBOL(prepare_to_wait_exclusive);
295 
init_wait_entry(struct wait_queue_entry * wq_entry,int flags)296 void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
297 {
298 	wq_entry->flags = flags;
299 	wq_entry->private = current;
300 	wq_entry->func = autoremove_wake_function;
301 	INIT_LIST_HEAD(&wq_entry->entry);
302 }
303 EXPORT_SYMBOL(init_wait_entry);
304 
prepare_to_wait_event(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry,int state)305 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
306 {
307 	unsigned long flags;
308 	long ret = 0;
309 
310 	spin_lock_irqsave(&wq_head->lock, flags);
311 	if (signal_pending_state(state, current)) {
312 		/*
313 		 * Exclusive waiter must not fail if it was selected by wakeup,
314 		 * it should "consume" the condition we were waiting for.
315 		 *
316 		 * The caller will recheck the condition and return success if
317 		 * we were already woken up, we can not miss the event because
318 		 * wakeup locks/unlocks the same wq_head->lock.
319 		 *
320 		 * But we need to ensure that set-condition + wakeup after that
321 		 * can't see us, it should wake up another exclusive waiter if
322 		 * we fail.
323 		 */
324 		list_del_init(&wq_entry->entry);
325 		ret = -ERESTARTSYS;
326 	} else {
327 		if (list_empty(&wq_entry->entry)) {
328 			if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
329 				__add_wait_queue_entry_tail(wq_head, wq_entry);
330 			else
331 				__add_wait_queue(wq_head, wq_entry);
332 		}
333 		set_current_state(state);
334 	}
335 	spin_unlock_irqrestore(&wq_head->lock, flags);
336 
337 	return ret;
338 }
339 EXPORT_SYMBOL(prepare_to_wait_event);
340 
341 /*
342  * Note! These two wait functions are entered with the
343  * wait-queue lock held (and interrupts off in the _irq
344  * case), so there is no race with testing the wakeup
345  * condition in the caller before they add the wait
346  * entry to the wake queue.
347  */
do_wait_intr(wait_queue_head_t * wq,wait_queue_entry_t * wait)348 int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
349 {
350 	if (likely(list_empty(&wait->entry)))
351 		__add_wait_queue_entry_tail(wq, wait);
352 
353 	set_current_state(TASK_INTERRUPTIBLE);
354 	if (signal_pending(current))
355 		return -ERESTARTSYS;
356 
357 	spin_unlock(&wq->lock);
358 	schedule();
359 	spin_lock(&wq->lock);
360 
361 	return 0;
362 }
363 EXPORT_SYMBOL(do_wait_intr);
364 
do_wait_intr_irq(wait_queue_head_t * wq,wait_queue_entry_t * wait)365 int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
366 {
367 	if (likely(list_empty(&wait->entry)))
368 		__add_wait_queue_entry_tail(wq, wait);
369 
370 	set_current_state(TASK_INTERRUPTIBLE);
371 	if (signal_pending(current))
372 		return -ERESTARTSYS;
373 
374 	spin_unlock_irq(&wq->lock);
375 	schedule();
376 	spin_lock_irq(&wq->lock);
377 
378 	return 0;
379 }
380 EXPORT_SYMBOL(do_wait_intr_irq);
381 
382 /**
383  * finish_wait - clean up after waiting in a queue
384  * @wq_head: waitqueue waited on
385  * @wq_entry: wait descriptor
386  *
387  * Sets current thread back to running state and removes
388  * the wait descriptor from the given waitqueue if still
389  * queued.
390  */
finish_wait(struct wait_queue_head * wq_head,struct wait_queue_entry * wq_entry)391 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
392 {
393 	unsigned long flags;
394 
395 	__set_current_state(TASK_RUNNING);
396 	/*
397 	 * We can check for list emptiness outside the lock
398 	 * IFF:
399 	 *  - we use the "careful" check that verifies both
400 	 *    the next and prev pointers, so that there cannot
401 	 *    be any half-pending updates in progress on other
402 	 *    CPU's that we haven't seen yet (and that might
403 	 *    still change the stack area.
404 	 * and
405 	 *  - all other users take the lock (ie we can only
406 	 *    have _one_ other CPU that looks at or modifies
407 	 *    the list).
408 	 */
409 	if (!list_empty_careful(&wq_entry->entry)) {
410 		spin_lock_irqsave(&wq_head->lock, flags);
411 		list_del_init(&wq_entry->entry);
412 		spin_unlock_irqrestore(&wq_head->lock, flags);
413 	}
414 }
415 EXPORT_SYMBOL(finish_wait);
416 
autoremove_wake_function(struct wait_queue_entry * wq_entry,unsigned mode,int sync,void * key)417 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
418 {
419 	int ret = default_wake_function(wq_entry, mode, sync, key);
420 
421 	if (ret)
422 		list_del_init_careful(&wq_entry->entry);
423 
424 	return ret;
425 }
426 EXPORT_SYMBOL(autoremove_wake_function);
427 
is_kthread_should_stop(void)428 static inline bool is_kthread_should_stop(void)
429 {
430 	return (current->flags & PF_KTHREAD) && kthread_should_stop();
431 }
432 
433 /*
434  * DEFINE_WAIT_FUNC(wait, woken_wake_func);
435  *
436  * add_wait_queue(&wq_head, &wait);
437  * for (;;) {
438  *     if (condition)
439  *         break;
440  *
441  *     // in wait_woken()			// in woken_wake_function()
442  *
443  *     p->state = mode;				wq_entry->flags |= WQ_FLAG_WOKEN;
444  *     smp_mb(); // A				try_to_wake_up():
445  *     if (!(wq_entry->flags & WQ_FLAG_WOKEN))	   <full barrier>
446  *         schedule()				   if (p->state & mode)
447  *     p->state = TASK_RUNNING;			      p->state = TASK_RUNNING;
448  *     wq_entry->flags &= ~WQ_FLAG_WOKEN;	~~~~~~~~~~~~~~~~~~
449  *     smp_mb(); // B				condition = true;
450  * }						smp_mb(); // C
451  * remove_wait_queue(&wq_head, &wait);		wq_entry->flags |= WQ_FLAG_WOKEN;
452  */
wait_woken(struct wait_queue_entry * wq_entry,unsigned mode,long timeout)453 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
454 {
455 	/*
456 	 * The below executes an smp_mb(), which matches with the full barrier
457 	 * executed by the try_to_wake_up() in woken_wake_function() such that
458 	 * either we see the store to wq_entry->flags in woken_wake_function()
459 	 * or woken_wake_function() sees our store to current->state.
460 	 */
461 	set_current_state(mode); /* A */
462 	if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
463 		timeout = schedule_timeout(timeout);
464 	__set_current_state(TASK_RUNNING);
465 
466 	/*
467 	 * The below executes an smp_mb(), which matches with the smp_mb() (C)
468 	 * in woken_wake_function() such that either we see the wait condition
469 	 * being true or the store to wq_entry->flags in woken_wake_function()
470 	 * follows ours in the coherence order.
471 	 */
472 	smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
473 
474 	return timeout;
475 }
476 EXPORT_SYMBOL(wait_woken);
477 
woken_wake_function(struct wait_queue_entry * wq_entry,unsigned mode,int sync,void * key)478 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
479 {
480 	/* Pairs with the smp_store_mb() in wait_woken(). */
481 	smp_mb(); /* C */
482 	wq_entry->flags |= WQ_FLAG_WOKEN;
483 
484 	return default_wake_function(wq_entry, mode, sync, key);
485 }
486 EXPORT_SYMBOL(woken_wake_function);
487