1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Basic worker thread pool for io_uring
4 *
5 * Copyright (C) 2019 Jens Axboe
6 *
7 */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
12 #include <linux/percpu.h>
13 #include <linux/slab.h>
14 #include <linux/rculist_nulls.h>
15 #include <linux/cpu.h>
16 #include <linux/task_work.h>
17 #include <linux/audit.h>
18 #include <uapi/linux/io_uring.h>
19
20 #include "io-wq.h"
21 #include "slist.h"
22 #include "io_uring.h"
23
24 #define WORKER_IDLE_TIMEOUT (5 * HZ)
25
26 enum {
27 IO_WORKER_F_UP = 1, /* up and active */
28 IO_WORKER_F_RUNNING = 2, /* account as running */
29 IO_WORKER_F_FREE = 4, /* worker on free list */
30 IO_WORKER_F_BOUND = 8, /* is doing bounded work */
31 };
32
33 enum {
34 IO_WQ_BIT_EXIT = 0, /* wq exiting */
35 };
36
37 enum {
38 IO_ACCT_STALLED_BIT = 0, /* stalled on hash */
39 };
40
41 /*
42 * One for each thread in a wqe pool
43 */
44 struct io_worker {
45 refcount_t ref;
46 unsigned flags;
47 struct hlist_nulls_node nulls_node;
48 struct list_head all_list;
49 struct task_struct *task;
50 struct io_wqe *wqe;
51
52 struct io_wq_work *cur_work;
53 struct io_wq_work *next_work;
54 raw_spinlock_t lock;
55
56 struct completion ref_done;
57
58 unsigned long create_state;
59 struct callback_head create_work;
60 int create_index;
61
62 union {
63 struct rcu_head rcu;
64 struct work_struct work;
65 };
66 };
67
68 #if BITS_PER_LONG == 64
69 #define IO_WQ_HASH_ORDER 6
70 #else
71 #define IO_WQ_HASH_ORDER 5
72 #endif
73
74 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
75
76 struct io_wqe_acct {
77 unsigned nr_workers;
78 unsigned max_workers;
79 int index;
80 atomic_t nr_running;
81 raw_spinlock_t lock;
82 struct io_wq_work_list work_list;
83 unsigned long flags;
84 };
85
86 enum {
87 IO_WQ_ACCT_BOUND,
88 IO_WQ_ACCT_UNBOUND,
89 IO_WQ_ACCT_NR,
90 };
91
92 /*
93 * Per-node worker thread pool
94 */
95 struct io_wqe {
96 raw_spinlock_t lock;
97 struct io_wqe_acct acct[IO_WQ_ACCT_NR];
98
99 int node;
100
101 struct hlist_nulls_head free_list;
102 struct list_head all_list;
103
104 struct wait_queue_entry wait;
105
106 struct io_wq *wq;
107 struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
108
109 cpumask_var_t cpu_mask;
110 };
111
112 /*
113 * Per io_wq state
114 */
115 struct io_wq {
116 unsigned long state;
117
118 free_work_fn *free_work;
119 io_wq_work_fn *do_work;
120
121 struct io_wq_hash *hash;
122
123 atomic_t worker_refs;
124 struct completion worker_done;
125
126 struct hlist_node cpuhp_node;
127
128 struct task_struct *task;
129
130 struct io_wqe *wqes[];
131 };
132
133 static enum cpuhp_state io_wq_online;
134
135 struct io_cb_cancel_data {
136 work_cancel_fn *fn;
137 void *data;
138 int nr_running;
139 int nr_pending;
140 bool cancel_all;
141 };
142
143 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index);
144 static void io_wqe_dec_running(struct io_worker *worker);
145 static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
146 struct io_wqe_acct *acct,
147 struct io_cb_cancel_data *match);
148 static void create_worker_cb(struct callback_head *cb);
149 static void io_wq_cancel_tw_create(struct io_wq *wq);
150
io_worker_get(struct io_worker * worker)151 static bool io_worker_get(struct io_worker *worker)
152 {
153 return refcount_inc_not_zero(&worker->ref);
154 }
155
io_worker_release(struct io_worker * worker)156 static void io_worker_release(struct io_worker *worker)
157 {
158 if (refcount_dec_and_test(&worker->ref))
159 complete(&worker->ref_done);
160 }
161
io_get_acct(struct io_wqe * wqe,bool bound)162 static inline struct io_wqe_acct *io_get_acct(struct io_wqe *wqe, bool bound)
163 {
164 return &wqe->acct[bound ? IO_WQ_ACCT_BOUND : IO_WQ_ACCT_UNBOUND];
165 }
166
io_work_get_acct(struct io_wqe * wqe,struct io_wq_work * work)167 static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
168 struct io_wq_work *work)
169 {
170 return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND));
171 }
172
io_wqe_get_acct(struct io_worker * worker)173 static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker)
174 {
175 return io_get_acct(worker->wqe, worker->flags & IO_WORKER_F_BOUND);
176 }
177
io_worker_ref_put(struct io_wq * wq)178 static void io_worker_ref_put(struct io_wq *wq)
179 {
180 if (atomic_dec_and_test(&wq->worker_refs))
181 complete(&wq->worker_done);
182 }
183
io_worker_cancel_cb(struct io_worker * worker)184 static void io_worker_cancel_cb(struct io_worker *worker)
185 {
186 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
187 struct io_wqe *wqe = worker->wqe;
188 struct io_wq *wq = wqe->wq;
189
190 atomic_dec(&acct->nr_running);
191 raw_spin_lock(&worker->wqe->lock);
192 acct->nr_workers--;
193 raw_spin_unlock(&worker->wqe->lock);
194 io_worker_ref_put(wq);
195 clear_bit_unlock(0, &worker->create_state);
196 io_worker_release(worker);
197 }
198
io_task_worker_match(struct callback_head * cb,void * data)199 static bool io_task_worker_match(struct callback_head *cb, void *data)
200 {
201 struct io_worker *worker;
202
203 if (cb->func != create_worker_cb)
204 return false;
205 worker = container_of(cb, struct io_worker, create_work);
206 return worker == data;
207 }
208
io_worker_exit(struct io_worker * worker)209 static void io_worker_exit(struct io_worker *worker)
210 {
211 struct io_wqe *wqe = worker->wqe;
212 struct io_wq *wq = wqe->wq;
213
214 while (1) {
215 struct callback_head *cb = task_work_cancel_match(wq->task,
216 io_task_worker_match, worker);
217
218 if (!cb)
219 break;
220 io_worker_cancel_cb(worker);
221 }
222
223 io_worker_release(worker);
224 wait_for_completion(&worker->ref_done);
225
226 raw_spin_lock(&wqe->lock);
227 if (worker->flags & IO_WORKER_F_FREE)
228 hlist_nulls_del_rcu(&worker->nulls_node);
229 list_del_rcu(&worker->all_list);
230 raw_spin_unlock(&wqe->lock);
231 io_wqe_dec_running(worker);
232 worker->flags = 0;
233 preempt_disable();
234 current->flags &= ~PF_IO_WORKER;
235 preempt_enable();
236
237 kfree_rcu(worker, rcu);
238 io_worker_ref_put(wqe->wq);
239 do_exit(0);
240 }
241
io_acct_run_queue(struct io_wqe_acct * acct)242 static inline bool io_acct_run_queue(struct io_wqe_acct *acct)
243 {
244 bool ret = false;
245
246 raw_spin_lock(&acct->lock);
247 if (!wq_list_empty(&acct->work_list) &&
248 !test_bit(IO_ACCT_STALLED_BIT, &acct->flags))
249 ret = true;
250 raw_spin_unlock(&acct->lock);
251
252 return ret;
253 }
254
255 /*
256 * Check head of free list for an available worker. If one isn't available,
257 * caller must create one.
258 */
io_wqe_activate_free_worker(struct io_wqe * wqe,struct io_wqe_acct * acct)259 static bool io_wqe_activate_free_worker(struct io_wqe *wqe,
260 struct io_wqe_acct *acct)
261 __must_hold(RCU)
262 {
263 struct hlist_nulls_node *n;
264 struct io_worker *worker;
265
266 /*
267 * Iterate free_list and see if we can find an idle worker to
268 * activate. If a given worker is on the free_list but in the process
269 * of exiting, keep trying.
270 */
271 hlist_nulls_for_each_entry_rcu(worker, n, &wqe->free_list, nulls_node) {
272 if (!io_worker_get(worker))
273 continue;
274 if (io_wqe_get_acct(worker) != acct) {
275 io_worker_release(worker);
276 continue;
277 }
278 if (wake_up_process(worker->task)) {
279 io_worker_release(worker);
280 return true;
281 }
282 io_worker_release(worker);
283 }
284
285 return false;
286 }
287
288 /*
289 * We need a worker. If we find a free one, we're good. If not, and we're
290 * below the max number of workers, create one.
291 */
io_wqe_create_worker(struct io_wqe * wqe,struct io_wqe_acct * acct)292 static bool io_wqe_create_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
293 {
294 /*
295 * Most likely an attempt to queue unbounded work on an io_wq that
296 * wasn't setup with any unbounded workers.
297 */
298 if (unlikely(!acct->max_workers))
299 pr_warn_once("io-wq is not configured for unbound workers");
300
301 raw_spin_lock(&wqe->lock);
302 if (acct->nr_workers >= acct->max_workers) {
303 raw_spin_unlock(&wqe->lock);
304 return true;
305 }
306 acct->nr_workers++;
307 raw_spin_unlock(&wqe->lock);
308 atomic_inc(&acct->nr_running);
309 atomic_inc(&wqe->wq->worker_refs);
310 return create_io_worker(wqe->wq, wqe, acct->index);
311 }
312
io_wqe_inc_running(struct io_worker * worker)313 static void io_wqe_inc_running(struct io_worker *worker)
314 {
315 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
316
317 atomic_inc(&acct->nr_running);
318 }
319
create_worker_cb(struct callback_head * cb)320 static void create_worker_cb(struct callback_head *cb)
321 {
322 struct io_worker *worker;
323 struct io_wq *wq;
324 struct io_wqe *wqe;
325 struct io_wqe_acct *acct;
326 bool do_create = false;
327
328 worker = container_of(cb, struct io_worker, create_work);
329 wqe = worker->wqe;
330 wq = wqe->wq;
331 acct = &wqe->acct[worker->create_index];
332 raw_spin_lock(&wqe->lock);
333 if (acct->nr_workers < acct->max_workers) {
334 acct->nr_workers++;
335 do_create = true;
336 }
337 raw_spin_unlock(&wqe->lock);
338 if (do_create) {
339 create_io_worker(wq, wqe, worker->create_index);
340 } else {
341 atomic_dec(&acct->nr_running);
342 io_worker_ref_put(wq);
343 }
344 clear_bit_unlock(0, &worker->create_state);
345 io_worker_release(worker);
346 }
347
io_queue_worker_create(struct io_worker * worker,struct io_wqe_acct * acct,task_work_func_t func)348 static bool io_queue_worker_create(struct io_worker *worker,
349 struct io_wqe_acct *acct,
350 task_work_func_t func)
351 {
352 struct io_wqe *wqe = worker->wqe;
353 struct io_wq *wq = wqe->wq;
354
355 /* raced with exit, just ignore create call */
356 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
357 goto fail;
358 if (!io_worker_get(worker))
359 goto fail;
360 /*
361 * create_state manages ownership of create_work/index. We should
362 * only need one entry per worker, as the worker going to sleep
363 * will trigger the condition, and waking will clear it once it
364 * runs the task_work.
365 */
366 if (test_bit(0, &worker->create_state) ||
367 test_and_set_bit_lock(0, &worker->create_state))
368 goto fail_release;
369
370 atomic_inc(&wq->worker_refs);
371 init_task_work(&worker->create_work, func);
372 worker->create_index = acct->index;
373 if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
374 /*
375 * EXIT may have been set after checking it above, check after
376 * adding the task_work and remove any creation item if it is
377 * now set. wq exit does that too, but we can have added this
378 * work item after we canceled in io_wq_exit_workers().
379 */
380 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
381 io_wq_cancel_tw_create(wq);
382 io_worker_ref_put(wq);
383 return true;
384 }
385 io_worker_ref_put(wq);
386 clear_bit_unlock(0, &worker->create_state);
387 fail_release:
388 io_worker_release(worker);
389 fail:
390 atomic_dec(&acct->nr_running);
391 io_worker_ref_put(wq);
392 return false;
393 }
394
io_wqe_dec_running(struct io_worker * worker)395 static void io_wqe_dec_running(struct io_worker *worker)
396 {
397 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
398 struct io_wqe *wqe = worker->wqe;
399
400 if (!(worker->flags & IO_WORKER_F_UP))
401 return;
402
403 if (!atomic_dec_and_test(&acct->nr_running))
404 return;
405 if (!io_acct_run_queue(acct))
406 return;
407
408 atomic_inc(&acct->nr_running);
409 atomic_inc(&wqe->wq->worker_refs);
410 io_queue_worker_create(worker, acct, create_worker_cb);
411 }
412
413 /*
414 * Worker will start processing some work. Move it to the busy list, if
415 * it's currently on the freelist
416 */
__io_worker_busy(struct io_wqe * wqe,struct io_worker * worker)417 static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker)
418 {
419 if (worker->flags & IO_WORKER_F_FREE) {
420 worker->flags &= ~IO_WORKER_F_FREE;
421 raw_spin_lock(&wqe->lock);
422 hlist_nulls_del_init_rcu(&worker->nulls_node);
423 raw_spin_unlock(&wqe->lock);
424 }
425 }
426
427 /*
428 * No work, worker going to sleep. Move to freelist, and unuse mm if we
429 * have one attached. Dropping the mm may potentially sleep, so we drop
430 * the lock in that case and return success. Since the caller has to
431 * retry the loop in that case (we changed task state), we don't regrab
432 * the lock if we return success.
433 */
__io_worker_idle(struct io_wqe * wqe,struct io_worker * worker)434 static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
435 __must_hold(wqe->lock)
436 {
437 if (!(worker->flags & IO_WORKER_F_FREE)) {
438 worker->flags |= IO_WORKER_F_FREE;
439 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
440 }
441 }
442
io_get_work_hash(struct io_wq_work * work)443 static inline unsigned int io_get_work_hash(struct io_wq_work *work)
444 {
445 return work->flags >> IO_WQ_HASH_SHIFT;
446 }
447
io_wait_on_hash(struct io_wqe * wqe,unsigned int hash)448 static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
449 {
450 struct io_wq *wq = wqe->wq;
451 bool ret = false;
452
453 spin_lock_irq(&wq->hash->wait.lock);
454 if (list_empty(&wqe->wait.entry)) {
455 __add_wait_queue(&wq->hash->wait, &wqe->wait);
456 if (!test_bit(hash, &wq->hash->map)) {
457 __set_current_state(TASK_RUNNING);
458 list_del_init(&wqe->wait.entry);
459 ret = true;
460 }
461 }
462 spin_unlock_irq(&wq->hash->wait.lock);
463 return ret;
464 }
465
io_get_next_work(struct io_wqe_acct * acct,struct io_worker * worker)466 static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
467 struct io_worker *worker)
468 __must_hold(acct->lock)
469 {
470 struct io_wq_work_node *node, *prev;
471 struct io_wq_work *work, *tail;
472 unsigned int stall_hash = -1U;
473 struct io_wqe *wqe = worker->wqe;
474
475 wq_list_for_each(node, prev, &acct->work_list) {
476 unsigned int hash;
477
478 work = container_of(node, struct io_wq_work, list);
479
480 /* not hashed, can run anytime */
481 if (!io_wq_is_hashed(work)) {
482 wq_list_del(&acct->work_list, node, prev);
483 return work;
484 }
485
486 hash = io_get_work_hash(work);
487 /* all items with this hash lie in [work, tail] */
488 tail = wqe->hash_tail[hash];
489
490 /* hashed, can run if not already running */
491 if (!test_and_set_bit(hash, &wqe->wq->hash->map)) {
492 wqe->hash_tail[hash] = NULL;
493 wq_list_cut(&acct->work_list, &tail->list, prev);
494 return work;
495 }
496 if (stall_hash == -1U)
497 stall_hash = hash;
498 /* fast forward to a next hash, for-each will fix up @prev */
499 node = &tail->list;
500 }
501
502 if (stall_hash != -1U) {
503 bool unstalled;
504
505 /*
506 * Set this before dropping the lock to avoid racing with new
507 * work being added and clearing the stalled bit.
508 */
509 set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
510 raw_spin_unlock(&acct->lock);
511 unstalled = io_wait_on_hash(wqe, stall_hash);
512 raw_spin_lock(&acct->lock);
513 if (unstalled) {
514 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
515 if (wq_has_sleeper(&wqe->wq->hash->wait))
516 wake_up(&wqe->wq->hash->wait);
517 }
518 }
519
520 return NULL;
521 }
522
io_assign_current_work(struct io_worker * worker,struct io_wq_work * work)523 static void io_assign_current_work(struct io_worker *worker,
524 struct io_wq_work *work)
525 {
526 if (work) {
527 io_run_task_work();
528 cond_resched();
529 }
530
531 raw_spin_lock(&worker->lock);
532 worker->cur_work = work;
533 worker->next_work = NULL;
534 raw_spin_unlock(&worker->lock);
535 }
536
537 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
538
io_worker_handle_work(struct io_worker * worker)539 static void io_worker_handle_work(struct io_worker *worker)
540 {
541 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
542 struct io_wqe *wqe = worker->wqe;
543 struct io_wq *wq = wqe->wq;
544 bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
545
546 do {
547 struct io_wq_work *work;
548
549 /*
550 * If we got some work, mark us as busy. If we didn't, but
551 * the list isn't empty, it means we stalled on hashed work.
552 * Mark us stalled so we don't keep looking for work when we
553 * can't make progress, any work completion or insertion will
554 * clear the stalled flag.
555 */
556 raw_spin_lock(&acct->lock);
557 work = io_get_next_work(acct, worker);
558 raw_spin_unlock(&acct->lock);
559 if (work) {
560 __io_worker_busy(wqe, worker);
561
562 /*
563 * Make sure cancelation can find this, even before
564 * it becomes the active work. That avoids a window
565 * where the work has been removed from our general
566 * work list, but isn't yet discoverable as the
567 * current work item for this worker.
568 */
569 raw_spin_lock(&worker->lock);
570 worker->next_work = work;
571 raw_spin_unlock(&worker->lock);
572 } else {
573 break;
574 }
575 io_assign_current_work(worker, work);
576 __set_current_state(TASK_RUNNING);
577
578 /* handle a whole dependent link */
579 do {
580 struct io_wq_work *next_hashed, *linked;
581 unsigned int hash = io_get_work_hash(work);
582
583 next_hashed = wq_next_work(work);
584
585 if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
586 work->flags |= IO_WQ_WORK_CANCEL;
587 wq->do_work(work);
588 io_assign_current_work(worker, NULL);
589
590 linked = wq->free_work(work);
591 work = next_hashed;
592 if (!work && linked && !io_wq_is_hashed(linked)) {
593 work = linked;
594 linked = NULL;
595 }
596 io_assign_current_work(worker, work);
597 if (linked)
598 io_wqe_enqueue(wqe, linked);
599
600 if (hash != -1U && !next_hashed) {
601 /* serialize hash clear with wake_up() */
602 spin_lock_irq(&wq->hash->wait.lock);
603 clear_bit(hash, &wq->hash->map);
604 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
605 spin_unlock_irq(&wq->hash->wait.lock);
606 if (wq_has_sleeper(&wq->hash->wait))
607 wake_up(&wq->hash->wait);
608 }
609 } while (work);
610 } while (1);
611 }
612
io_wqe_worker(void * data)613 static int io_wqe_worker(void *data)
614 {
615 struct io_worker *worker = data;
616 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
617 struct io_wqe *wqe = worker->wqe;
618 struct io_wq *wq = wqe->wq;
619 bool exit_mask = false, last_timeout = false;
620 char buf[TASK_COMM_LEN];
621
622 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
623
624 snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
625 set_task_comm(current, buf);
626
627 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
628 long ret;
629
630 set_current_state(TASK_INTERRUPTIBLE);
631 while (io_acct_run_queue(acct))
632 io_worker_handle_work(worker);
633
634 raw_spin_lock(&wqe->lock);
635 /*
636 * Last sleep timed out. Exit if we're not the last worker,
637 * or if someone modified our affinity.
638 */
639 if (last_timeout && (exit_mask || acct->nr_workers > 1)) {
640 acct->nr_workers--;
641 raw_spin_unlock(&wqe->lock);
642 __set_current_state(TASK_RUNNING);
643 break;
644 }
645 last_timeout = false;
646 __io_worker_idle(wqe, worker);
647 raw_spin_unlock(&wqe->lock);
648 if (io_run_task_work())
649 continue;
650 ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
651 if (signal_pending(current)) {
652 struct ksignal ksig;
653
654 if (!get_signal(&ksig))
655 continue;
656 break;
657 }
658 if (!ret) {
659 last_timeout = true;
660 exit_mask = !cpumask_test_cpu(raw_smp_processor_id(),
661 wqe->cpu_mask);
662 }
663 }
664
665 if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
666 io_worker_handle_work(worker);
667
668 io_worker_exit(worker);
669 return 0;
670 }
671
672 /*
673 * Called when a worker is scheduled in. Mark us as currently running.
674 */
io_wq_worker_running(struct task_struct * tsk)675 void io_wq_worker_running(struct task_struct *tsk)
676 {
677 struct io_worker *worker = tsk->worker_private;
678
679 if (!worker)
680 return;
681 if (!(worker->flags & IO_WORKER_F_UP))
682 return;
683 if (worker->flags & IO_WORKER_F_RUNNING)
684 return;
685 worker->flags |= IO_WORKER_F_RUNNING;
686 io_wqe_inc_running(worker);
687 }
688
689 /*
690 * Called when worker is going to sleep. If there are no workers currently
691 * running and we have work pending, wake up a free one or create a new one.
692 */
io_wq_worker_sleeping(struct task_struct * tsk)693 void io_wq_worker_sleeping(struct task_struct *tsk)
694 {
695 struct io_worker *worker = tsk->worker_private;
696
697 if (!worker)
698 return;
699 if (!(worker->flags & IO_WORKER_F_UP))
700 return;
701 if (!(worker->flags & IO_WORKER_F_RUNNING))
702 return;
703
704 worker->flags &= ~IO_WORKER_F_RUNNING;
705 io_wqe_dec_running(worker);
706 }
707
io_init_new_worker(struct io_wqe * wqe,struct io_worker * worker,struct task_struct * tsk)708 static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
709 struct task_struct *tsk)
710 {
711 tsk->worker_private = worker;
712 worker->task = tsk;
713 set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
714
715 raw_spin_lock(&wqe->lock);
716 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
717 list_add_tail_rcu(&worker->all_list, &wqe->all_list);
718 worker->flags |= IO_WORKER_F_FREE;
719 raw_spin_unlock(&wqe->lock);
720 wake_up_new_task(tsk);
721 }
722
io_wq_work_match_all(struct io_wq_work * work,void * data)723 static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
724 {
725 return true;
726 }
727
io_should_retry_thread(long err)728 static inline bool io_should_retry_thread(long err)
729 {
730 /*
731 * Prevent perpetual task_work retry, if the task (or its group) is
732 * exiting.
733 */
734 if (fatal_signal_pending(current))
735 return false;
736
737 switch (err) {
738 case -EAGAIN:
739 case -ERESTARTSYS:
740 case -ERESTARTNOINTR:
741 case -ERESTARTNOHAND:
742 return true;
743 default:
744 return false;
745 }
746 }
747
create_worker_cont(struct callback_head * cb)748 static void create_worker_cont(struct callback_head *cb)
749 {
750 struct io_worker *worker;
751 struct task_struct *tsk;
752 struct io_wqe *wqe;
753
754 worker = container_of(cb, struct io_worker, create_work);
755 clear_bit_unlock(0, &worker->create_state);
756 wqe = worker->wqe;
757 tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
758 if (!IS_ERR(tsk)) {
759 io_init_new_worker(wqe, worker, tsk);
760 io_worker_release(worker);
761 return;
762 } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
763 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
764
765 atomic_dec(&acct->nr_running);
766 raw_spin_lock(&wqe->lock);
767 acct->nr_workers--;
768 if (!acct->nr_workers) {
769 struct io_cb_cancel_data match = {
770 .fn = io_wq_work_match_all,
771 .cancel_all = true,
772 };
773
774 raw_spin_unlock(&wqe->lock);
775 while (io_acct_cancel_pending_work(wqe, acct, &match))
776 ;
777 } else {
778 raw_spin_unlock(&wqe->lock);
779 }
780 io_worker_ref_put(wqe->wq);
781 kfree(worker);
782 return;
783 }
784
785 /* re-create attempts grab a new worker ref, drop the existing one */
786 io_worker_release(worker);
787 schedule_work(&worker->work);
788 }
789
io_workqueue_create(struct work_struct * work)790 static void io_workqueue_create(struct work_struct *work)
791 {
792 struct io_worker *worker = container_of(work, struct io_worker, work);
793 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
794
795 if (!io_queue_worker_create(worker, acct, create_worker_cont))
796 kfree(worker);
797 }
798
create_io_worker(struct io_wq * wq,struct io_wqe * wqe,int index)799 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
800 {
801 struct io_wqe_acct *acct = &wqe->acct[index];
802 struct io_worker *worker;
803 struct task_struct *tsk;
804
805 __set_current_state(TASK_RUNNING);
806
807 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
808 if (!worker) {
809 fail:
810 atomic_dec(&acct->nr_running);
811 raw_spin_lock(&wqe->lock);
812 acct->nr_workers--;
813 raw_spin_unlock(&wqe->lock);
814 io_worker_ref_put(wq);
815 return false;
816 }
817
818 refcount_set(&worker->ref, 1);
819 worker->wqe = wqe;
820 raw_spin_lock_init(&worker->lock);
821 init_completion(&worker->ref_done);
822
823 if (index == IO_WQ_ACCT_BOUND)
824 worker->flags |= IO_WORKER_F_BOUND;
825
826 tsk = create_io_thread(io_wqe_worker, worker, wqe->node);
827 if (!IS_ERR(tsk)) {
828 io_init_new_worker(wqe, worker, tsk);
829 } else if (!io_should_retry_thread(PTR_ERR(tsk))) {
830 kfree(worker);
831 goto fail;
832 } else {
833 INIT_WORK(&worker->work, io_workqueue_create);
834 schedule_work(&worker->work);
835 }
836
837 return true;
838 }
839
840 /*
841 * Iterate the passed in list and call the specific function for each
842 * worker that isn't exiting
843 */
io_wq_for_each_worker(struct io_wqe * wqe,bool (* func)(struct io_worker *,void *),void * data)844 static bool io_wq_for_each_worker(struct io_wqe *wqe,
845 bool (*func)(struct io_worker *, void *),
846 void *data)
847 {
848 struct io_worker *worker;
849 bool ret = false;
850
851 list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
852 if (io_worker_get(worker)) {
853 /* no task if node is/was offline */
854 if (worker->task)
855 ret = func(worker, data);
856 io_worker_release(worker);
857 if (ret)
858 break;
859 }
860 }
861
862 return ret;
863 }
864
io_wq_worker_wake(struct io_worker * worker,void * data)865 static bool io_wq_worker_wake(struct io_worker *worker, void *data)
866 {
867 __set_notify_signal(worker->task);
868 wake_up_process(worker->task);
869 return false;
870 }
871
io_run_cancel(struct io_wq_work * work,struct io_wqe * wqe)872 static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
873 {
874 struct io_wq *wq = wqe->wq;
875
876 do {
877 work->flags |= IO_WQ_WORK_CANCEL;
878 wq->do_work(work);
879 work = wq->free_work(work);
880 } while (work);
881 }
882
io_wqe_insert_work(struct io_wqe * wqe,struct io_wq_work * work)883 static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
884 {
885 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
886 unsigned int hash;
887 struct io_wq_work *tail;
888
889 if (!io_wq_is_hashed(work)) {
890 append:
891 wq_list_add_tail(&work->list, &acct->work_list);
892 return;
893 }
894
895 hash = io_get_work_hash(work);
896 tail = wqe->hash_tail[hash];
897 wqe->hash_tail[hash] = work;
898 if (!tail)
899 goto append;
900
901 wq_list_add_after(&work->list, &tail->list, &acct->work_list);
902 }
903
io_wq_work_match_item(struct io_wq_work * work,void * data)904 static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
905 {
906 return work == data;
907 }
908
io_wqe_enqueue(struct io_wqe * wqe,struct io_wq_work * work)909 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
910 {
911 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
912 struct io_cb_cancel_data match;
913 unsigned work_flags = work->flags;
914 bool do_create;
915
916 /*
917 * If io-wq is exiting for this task, or if the request has explicitly
918 * been marked as one that should not get executed, cancel it here.
919 */
920 if (test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state) ||
921 (work->flags & IO_WQ_WORK_CANCEL)) {
922 io_run_cancel(work, wqe);
923 return;
924 }
925
926 raw_spin_lock(&acct->lock);
927 io_wqe_insert_work(wqe, work);
928 clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
929 raw_spin_unlock(&acct->lock);
930
931 raw_spin_lock(&wqe->lock);
932 rcu_read_lock();
933 do_create = !io_wqe_activate_free_worker(wqe, acct);
934 rcu_read_unlock();
935
936 raw_spin_unlock(&wqe->lock);
937
938 if (do_create && ((work_flags & IO_WQ_WORK_CONCURRENT) ||
939 !atomic_read(&acct->nr_running))) {
940 bool did_create;
941
942 did_create = io_wqe_create_worker(wqe, acct);
943 if (likely(did_create))
944 return;
945
946 raw_spin_lock(&wqe->lock);
947 if (acct->nr_workers) {
948 raw_spin_unlock(&wqe->lock);
949 return;
950 }
951 raw_spin_unlock(&wqe->lock);
952
953 /* fatal condition, failed to create the first worker */
954 match.fn = io_wq_work_match_item,
955 match.data = work,
956 match.cancel_all = false,
957
958 io_acct_cancel_pending_work(wqe, acct, &match);
959 }
960 }
961
io_wq_enqueue(struct io_wq * wq,struct io_wq_work * work)962 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
963 {
964 struct io_wqe *wqe = wq->wqes[numa_node_id()];
965
966 io_wqe_enqueue(wqe, work);
967 }
968
969 /*
970 * Work items that hash to the same value will not be done in parallel.
971 * Used to limit concurrent writes, generally hashed by inode.
972 */
io_wq_hash_work(struct io_wq_work * work,void * val)973 void io_wq_hash_work(struct io_wq_work *work, void *val)
974 {
975 unsigned int bit;
976
977 bit = hash_ptr(val, IO_WQ_HASH_ORDER);
978 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
979 }
980
__io_wq_worker_cancel(struct io_worker * worker,struct io_cb_cancel_data * match,struct io_wq_work * work)981 static bool __io_wq_worker_cancel(struct io_worker *worker,
982 struct io_cb_cancel_data *match,
983 struct io_wq_work *work)
984 {
985 if (work && match->fn(work, match->data)) {
986 work->flags |= IO_WQ_WORK_CANCEL;
987 __set_notify_signal(worker->task);
988 return true;
989 }
990
991 return false;
992 }
993
io_wq_worker_cancel(struct io_worker * worker,void * data)994 static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
995 {
996 struct io_cb_cancel_data *match = data;
997
998 /*
999 * Hold the lock to avoid ->cur_work going out of scope, caller
1000 * may dereference the passed in work.
1001 */
1002 raw_spin_lock(&worker->lock);
1003 if (__io_wq_worker_cancel(worker, match, worker->cur_work) ||
1004 __io_wq_worker_cancel(worker, match, worker->next_work))
1005 match->nr_running++;
1006 raw_spin_unlock(&worker->lock);
1007
1008 return match->nr_running && !match->cancel_all;
1009 }
1010
io_wqe_remove_pending(struct io_wqe * wqe,struct io_wq_work * work,struct io_wq_work_node * prev)1011 static inline void io_wqe_remove_pending(struct io_wqe *wqe,
1012 struct io_wq_work *work,
1013 struct io_wq_work_node *prev)
1014 {
1015 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
1016 unsigned int hash = io_get_work_hash(work);
1017 struct io_wq_work *prev_work = NULL;
1018
1019 if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
1020 if (prev)
1021 prev_work = container_of(prev, struct io_wq_work, list);
1022 if (prev_work && io_get_work_hash(prev_work) == hash)
1023 wqe->hash_tail[hash] = prev_work;
1024 else
1025 wqe->hash_tail[hash] = NULL;
1026 }
1027 wq_list_del(&acct->work_list, &work->list, prev);
1028 }
1029
io_acct_cancel_pending_work(struct io_wqe * wqe,struct io_wqe_acct * acct,struct io_cb_cancel_data * match)1030 static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
1031 struct io_wqe_acct *acct,
1032 struct io_cb_cancel_data *match)
1033 {
1034 struct io_wq_work_node *node, *prev;
1035 struct io_wq_work *work;
1036
1037 raw_spin_lock(&acct->lock);
1038 wq_list_for_each(node, prev, &acct->work_list) {
1039 work = container_of(node, struct io_wq_work, list);
1040 if (!match->fn(work, match->data))
1041 continue;
1042 io_wqe_remove_pending(wqe, work, prev);
1043 raw_spin_unlock(&acct->lock);
1044 io_run_cancel(work, wqe);
1045 match->nr_pending++;
1046 /* not safe to continue after unlock */
1047 return true;
1048 }
1049 raw_spin_unlock(&acct->lock);
1050
1051 return false;
1052 }
1053
io_wqe_cancel_pending_work(struct io_wqe * wqe,struct io_cb_cancel_data * match)1054 static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
1055 struct io_cb_cancel_data *match)
1056 {
1057 int i;
1058 retry:
1059 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1060 struct io_wqe_acct *acct = io_get_acct(wqe, i == 0);
1061
1062 if (io_acct_cancel_pending_work(wqe, acct, match)) {
1063 if (match->cancel_all)
1064 goto retry;
1065 break;
1066 }
1067 }
1068 }
1069
io_wqe_cancel_running_work(struct io_wqe * wqe,struct io_cb_cancel_data * match)1070 static void io_wqe_cancel_running_work(struct io_wqe *wqe,
1071 struct io_cb_cancel_data *match)
1072 {
1073 rcu_read_lock();
1074 io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
1075 rcu_read_unlock();
1076 }
1077
io_wq_cancel_cb(struct io_wq * wq,work_cancel_fn * cancel,void * data,bool cancel_all)1078 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
1079 void *data, bool cancel_all)
1080 {
1081 struct io_cb_cancel_data match = {
1082 .fn = cancel,
1083 .data = data,
1084 .cancel_all = cancel_all,
1085 };
1086 int node;
1087
1088 /*
1089 * First check pending list, if we're lucky we can just remove it
1090 * from there. CANCEL_OK means that the work is returned as-new,
1091 * no completion will be posted for it.
1092 *
1093 * Then check if a free (going busy) or busy worker has the work
1094 * currently running. If we find it there, we'll return CANCEL_RUNNING
1095 * as an indication that we attempt to signal cancellation. The
1096 * completion will run normally in this case.
1097 *
1098 * Do both of these while holding the wqe->lock, to ensure that
1099 * we'll find a work item regardless of state.
1100 */
1101 for_each_node(node) {
1102 struct io_wqe *wqe = wq->wqes[node];
1103
1104 io_wqe_cancel_pending_work(wqe, &match);
1105 if (match.nr_pending && !match.cancel_all)
1106 return IO_WQ_CANCEL_OK;
1107
1108 raw_spin_lock(&wqe->lock);
1109 io_wqe_cancel_running_work(wqe, &match);
1110 raw_spin_unlock(&wqe->lock);
1111 if (match.nr_running && !match.cancel_all)
1112 return IO_WQ_CANCEL_RUNNING;
1113 }
1114
1115 if (match.nr_running)
1116 return IO_WQ_CANCEL_RUNNING;
1117 if (match.nr_pending)
1118 return IO_WQ_CANCEL_OK;
1119 return IO_WQ_CANCEL_NOTFOUND;
1120 }
1121
io_wqe_hash_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)1122 static int io_wqe_hash_wake(struct wait_queue_entry *wait, unsigned mode,
1123 int sync, void *key)
1124 {
1125 struct io_wqe *wqe = container_of(wait, struct io_wqe, wait);
1126 int i;
1127
1128 list_del_init(&wait->entry);
1129
1130 rcu_read_lock();
1131 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1132 struct io_wqe_acct *acct = &wqe->acct[i];
1133
1134 if (test_and_clear_bit(IO_ACCT_STALLED_BIT, &acct->flags))
1135 io_wqe_activate_free_worker(wqe, acct);
1136 }
1137 rcu_read_unlock();
1138 return 1;
1139 }
1140
io_wq_create(unsigned bounded,struct io_wq_data * data)1141 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
1142 {
1143 int ret, node, i;
1144 struct io_wq *wq;
1145
1146 if (WARN_ON_ONCE(!data->free_work || !data->do_work))
1147 return ERR_PTR(-EINVAL);
1148 if (WARN_ON_ONCE(!bounded))
1149 return ERR_PTR(-EINVAL);
1150
1151 wq = kzalloc(struct_size(wq, wqes, nr_node_ids), GFP_KERNEL);
1152 if (!wq)
1153 return ERR_PTR(-ENOMEM);
1154 ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1155 if (ret)
1156 goto err_wq;
1157
1158 refcount_inc(&data->hash->refs);
1159 wq->hash = data->hash;
1160 wq->free_work = data->free_work;
1161 wq->do_work = data->do_work;
1162
1163 ret = -ENOMEM;
1164 for_each_node(node) {
1165 struct io_wqe *wqe;
1166 int alloc_node = node;
1167
1168 if (!node_online(alloc_node))
1169 alloc_node = NUMA_NO_NODE;
1170 wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
1171 if (!wqe)
1172 goto err;
1173 wq->wqes[node] = wqe;
1174 if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL))
1175 goto err;
1176 cpumask_copy(wqe->cpu_mask, cpumask_of_node(node));
1177 wqe->node = alloc_node;
1178 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
1179 wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
1180 task_rlimit(current, RLIMIT_NPROC);
1181 INIT_LIST_HEAD(&wqe->wait.entry);
1182 wqe->wait.func = io_wqe_hash_wake;
1183 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1184 struct io_wqe_acct *acct = &wqe->acct[i];
1185
1186 acct->index = i;
1187 atomic_set(&acct->nr_running, 0);
1188 INIT_WQ_LIST(&acct->work_list);
1189 raw_spin_lock_init(&acct->lock);
1190 }
1191 wqe->wq = wq;
1192 raw_spin_lock_init(&wqe->lock);
1193 INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
1194 INIT_LIST_HEAD(&wqe->all_list);
1195 }
1196
1197 wq->task = get_task_struct(data->task);
1198 atomic_set(&wq->worker_refs, 1);
1199 init_completion(&wq->worker_done);
1200 return wq;
1201 err:
1202 io_wq_put_hash(data->hash);
1203 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1204 for_each_node(node) {
1205 if (!wq->wqes[node])
1206 continue;
1207 free_cpumask_var(wq->wqes[node]->cpu_mask);
1208 kfree(wq->wqes[node]);
1209 }
1210 err_wq:
1211 kfree(wq);
1212 return ERR_PTR(ret);
1213 }
1214
io_task_work_match(struct callback_head * cb,void * data)1215 static bool io_task_work_match(struct callback_head *cb, void *data)
1216 {
1217 struct io_worker *worker;
1218
1219 if (cb->func != create_worker_cb && cb->func != create_worker_cont)
1220 return false;
1221 worker = container_of(cb, struct io_worker, create_work);
1222 return worker->wqe->wq == data;
1223 }
1224
io_wq_exit_start(struct io_wq * wq)1225 void io_wq_exit_start(struct io_wq *wq)
1226 {
1227 set_bit(IO_WQ_BIT_EXIT, &wq->state);
1228 }
1229
io_wq_cancel_tw_create(struct io_wq * wq)1230 static void io_wq_cancel_tw_create(struct io_wq *wq)
1231 {
1232 struct callback_head *cb;
1233
1234 while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
1235 struct io_worker *worker;
1236
1237 worker = container_of(cb, struct io_worker, create_work);
1238 io_worker_cancel_cb(worker);
1239 /*
1240 * Only the worker continuation helper has worker allocated and
1241 * hence needs freeing.
1242 */
1243 if (cb->func == create_worker_cont)
1244 kfree(worker);
1245 }
1246 }
1247
io_wq_exit_workers(struct io_wq * wq)1248 static void io_wq_exit_workers(struct io_wq *wq)
1249 {
1250 int node;
1251
1252 if (!wq->task)
1253 return;
1254
1255 io_wq_cancel_tw_create(wq);
1256
1257 rcu_read_lock();
1258 for_each_node(node) {
1259 struct io_wqe *wqe = wq->wqes[node];
1260
1261 io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
1262 }
1263 rcu_read_unlock();
1264 io_worker_ref_put(wq);
1265 wait_for_completion(&wq->worker_done);
1266
1267 for_each_node(node) {
1268 spin_lock_irq(&wq->hash->wait.lock);
1269 list_del_init(&wq->wqes[node]->wait.entry);
1270 spin_unlock_irq(&wq->hash->wait.lock);
1271 }
1272 put_task_struct(wq->task);
1273 wq->task = NULL;
1274 }
1275
io_wq_destroy(struct io_wq * wq)1276 static void io_wq_destroy(struct io_wq *wq)
1277 {
1278 int node;
1279
1280 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1281
1282 for_each_node(node) {
1283 struct io_wqe *wqe = wq->wqes[node];
1284 struct io_cb_cancel_data match = {
1285 .fn = io_wq_work_match_all,
1286 .cancel_all = true,
1287 };
1288 io_wqe_cancel_pending_work(wqe, &match);
1289 free_cpumask_var(wqe->cpu_mask);
1290 kfree(wqe);
1291 }
1292 io_wq_put_hash(wq->hash);
1293 kfree(wq);
1294 }
1295
io_wq_put_and_exit(struct io_wq * wq)1296 void io_wq_put_and_exit(struct io_wq *wq)
1297 {
1298 WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state));
1299
1300 io_wq_exit_workers(wq);
1301 io_wq_destroy(wq);
1302 }
1303
1304 struct online_data {
1305 unsigned int cpu;
1306 bool online;
1307 };
1308
io_wq_worker_affinity(struct io_worker * worker,void * data)1309 static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
1310 {
1311 struct online_data *od = data;
1312
1313 if (od->online)
1314 cpumask_set_cpu(od->cpu, worker->wqe->cpu_mask);
1315 else
1316 cpumask_clear_cpu(od->cpu, worker->wqe->cpu_mask);
1317 return false;
1318 }
1319
__io_wq_cpu_online(struct io_wq * wq,unsigned int cpu,bool online)1320 static int __io_wq_cpu_online(struct io_wq *wq, unsigned int cpu, bool online)
1321 {
1322 struct online_data od = {
1323 .cpu = cpu,
1324 .online = online
1325 };
1326 int i;
1327
1328 rcu_read_lock();
1329 for_each_node(i)
1330 io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, &od);
1331 rcu_read_unlock();
1332 return 0;
1333 }
1334
io_wq_cpu_online(unsigned int cpu,struct hlist_node * node)1335 static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
1336 {
1337 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1338
1339 return __io_wq_cpu_online(wq, cpu, true);
1340 }
1341
io_wq_cpu_offline(unsigned int cpu,struct hlist_node * node)1342 static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
1343 {
1344 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1345
1346 return __io_wq_cpu_online(wq, cpu, false);
1347 }
1348
io_wq_cpu_affinity(struct io_wq * wq,cpumask_var_t mask)1349 int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
1350 {
1351 int i;
1352
1353 rcu_read_lock();
1354 for_each_node(i) {
1355 struct io_wqe *wqe = wq->wqes[i];
1356
1357 if (mask)
1358 cpumask_copy(wqe->cpu_mask, mask);
1359 else
1360 cpumask_copy(wqe->cpu_mask, cpumask_of_node(i));
1361 }
1362 rcu_read_unlock();
1363 return 0;
1364 }
1365
1366 /*
1367 * Set max number of unbounded workers, returns old value. If new_count is 0,
1368 * then just return the old value.
1369 */
io_wq_max_workers(struct io_wq * wq,int * new_count)1370 int io_wq_max_workers(struct io_wq *wq, int *new_count)
1371 {
1372 int prev[IO_WQ_ACCT_NR];
1373 bool first_node = true;
1374 int i, node;
1375
1376 BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND);
1377 BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
1378 BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2);
1379
1380 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1381 if (new_count[i] > task_rlimit(current, RLIMIT_NPROC))
1382 new_count[i] = task_rlimit(current, RLIMIT_NPROC);
1383 }
1384
1385 for (i = 0; i < IO_WQ_ACCT_NR; i++)
1386 prev[i] = 0;
1387
1388 rcu_read_lock();
1389 for_each_node(node) {
1390 struct io_wqe *wqe = wq->wqes[node];
1391 struct io_wqe_acct *acct;
1392
1393 raw_spin_lock(&wqe->lock);
1394 for (i = 0; i < IO_WQ_ACCT_NR; i++) {
1395 acct = &wqe->acct[i];
1396 if (first_node)
1397 prev[i] = max_t(int, acct->max_workers, prev[i]);
1398 if (new_count[i])
1399 acct->max_workers = new_count[i];
1400 }
1401 raw_spin_unlock(&wqe->lock);
1402 first_node = false;
1403 }
1404 rcu_read_unlock();
1405
1406 for (i = 0; i < IO_WQ_ACCT_NR; i++)
1407 new_count[i] = prev[i];
1408
1409 return 0;
1410 }
1411
io_wq_init(void)1412 static __init int io_wq_init(void)
1413 {
1414 int ret;
1415
1416 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
1417 io_wq_cpu_online, io_wq_cpu_offline);
1418 if (ret < 0)
1419 return ret;
1420 io_wq_online = ret;
1421 return 0;
1422 }
1423 subsys_initcall(io_wq_init);
1424