Lines Matching refs:pwq

381 static void show_pwq(struct pool_workqueue *pwq);
447 #define for_each_pwq(pwq, wq) \ argument
448 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \
642 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, in set_work_pwq() argument
645 set_work_data(work, (unsigned long)pwq, in set_work_pwq()
1113 static void get_pwq(struct pool_workqueue *pwq) in get_pwq() argument
1115 lockdep_assert_held(&pwq->pool->lock); in get_pwq()
1116 WARN_ON_ONCE(pwq->refcnt <= 0); in get_pwq()
1117 pwq->refcnt++; in get_pwq()
1127 static void put_pwq(struct pool_workqueue *pwq) in put_pwq() argument
1129 lockdep_assert_held(&pwq->pool->lock); in put_pwq()
1130 if (likely(--pwq->refcnt)) in put_pwq()
1132 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) in put_pwq()
1142 schedule_work(&pwq->unbound_release_work); in put_pwq()
1151 static void put_pwq_unlocked(struct pool_workqueue *pwq) in put_pwq_unlocked() argument
1153 if (pwq) { in put_pwq_unlocked()
1158 raw_spin_lock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1159 put_pwq(pwq); in put_pwq_unlocked()
1160 raw_spin_unlock_irq(&pwq->pool->lock); in put_pwq_unlocked()
1166 struct pool_workqueue *pwq = get_work_pwq(work); in pwq_activate_inactive_work() local
1169 if (list_empty(&pwq->pool->worklist)) in pwq_activate_inactive_work()
1170 pwq->pool->watchdog_ts = jiffies; in pwq_activate_inactive_work()
1171 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_inactive_work()
1173 pwq->nr_active++; in pwq_activate_inactive_work()
1176 static void pwq_activate_first_inactive(struct pool_workqueue *pwq) in pwq_activate_first_inactive() argument
1178 struct work_struct *work = list_first_entry(&pwq->inactive_works, in pwq_activate_first_inactive()
1195 static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_data) in pwq_dec_nr_in_flight() argument
1200 pwq->nr_active--; in pwq_dec_nr_in_flight()
1201 if (!list_empty(&pwq->inactive_works)) { in pwq_dec_nr_in_flight()
1203 if (pwq->nr_active < pwq->max_active) in pwq_dec_nr_in_flight()
1204 pwq_activate_first_inactive(pwq); in pwq_dec_nr_in_flight()
1208 pwq->nr_in_flight[color]--; in pwq_dec_nr_in_flight()
1211 if (likely(pwq->flush_color != color)) in pwq_dec_nr_in_flight()
1215 if (pwq->nr_in_flight[color]) in pwq_dec_nr_in_flight()
1219 pwq->flush_color = -1; in pwq_dec_nr_in_flight()
1225 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) in pwq_dec_nr_in_flight()
1226 complete(&pwq->wq->first_flusher->done); in pwq_dec_nr_in_flight()
1228 put_pwq(pwq); in pwq_dec_nr_in_flight()
1265 struct pool_workqueue *pwq; in try_to_grab_pending() local
1304 pwq = get_work_pwq(work); in try_to_grab_pending()
1305 if (pwq && pwq->pool == pool) { in try_to_grab_pending()
1323 pwq_dec_nr_in_flight(pwq, *work_data_bits(work)); in try_to_grab_pending()
1355 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, in insert_work() argument
1358 struct worker_pool *pool = pwq->pool; in insert_work()
1364 set_work_pwq(work, pwq, extra_flags); in insert_work()
1366 get_pwq(pwq); in insert_work()
1424 struct pool_workqueue *pwq; in __queue_work() local
1453 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in __queue_work()
1457 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in __queue_work()
1466 if (last_pool && last_pool != pwq->pool) { in __queue_work()
1474 pwq = worker->current_pwq; in __queue_work()
1478 raw_spin_lock(&pwq->pool->lock); in __queue_work()
1481 raw_spin_lock(&pwq->pool->lock); in __queue_work()
1492 if (unlikely(!pwq->refcnt)) { in __queue_work()
1494 raw_spin_unlock(&pwq->pool->lock); in __queue_work()
1504 trace_workqueue_queue_work(req_cpu, pwq, work); in __queue_work()
1509 pwq->nr_in_flight[pwq->work_color]++; in __queue_work()
1510 work_flags = work_color_to_flags(pwq->work_color); in __queue_work()
1512 if (likely(pwq->nr_active < pwq->max_active)) { in __queue_work()
1514 pwq->nr_active++; in __queue_work()
1515 worklist = &pwq->pool->worklist; in __queue_work()
1517 pwq->pool->watchdog_ts = jiffies; in __queue_work()
1520 worklist = &pwq->inactive_works; in __queue_work()
1524 insert_work(pwq, work, worklist, work_flags); in __queue_work()
1527 raw_spin_unlock(&pwq->pool->lock); in __queue_work()
2133 struct pool_workqueue *pwq = get_work_pwq(work); in send_mayday() local
2134 struct workqueue_struct *wq = pwq->wq; in send_mayday()
2142 if (list_empty(&pwq->mayday_node)) { in send_mayday()
2148 get_pwq(pwq); in send_mayday()
2149 list_add_tail(&pwq->mayday_node, &wq->maydays); in send_mayday()
2286 struct pool_workqueue *pwq = get_work_pwq(work); in process_one_work() local
2288 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; in process_one_work()
2324 worker->current_pwq = pwq; in process_one_work()
2332 strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); in process_one_work()
2365 lock_map_acquire(&pwq->wq->lockdep_map); in process_one_work()
2397 lock_map_release(&pwq->wq->lockdep_map); in process_one_work()
2433 pwq_dec_nr_in_flight(pwq, work_data); in process_one_work()
2614 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, in rescuer_thread() local
2616 struct worker_pool *pool = pwq->pool; in rescuer_thread()
2621 list_del_init(&pwq->mayday_node); in rescuer_thread()
2635 if (get_work_pwq(work) == pwq) { in rescuer_thread()
2655 if (pwq->nr_active && need_to_create_worker(pool)) { in rescuer_thread()
2661 if (wq->rescuer && list_empty(&pwq->mayday_node)) { in rescuer_thread()
2662 get_pwq(pwq); in rescuer_thread()
2663 list_add_tail(&pwq->mayday_node, &wq->maydays); in rescuer_thread()
2673 put_pwq(pwq); in rescuer_thread()
2772 static void insert_wq_barrier(struct pool_workqueue *pwq, in insert_wq_barrier() argument
2813 pwq->nr_in_flight[work_color]++; in insert_wq_barrier()
2817 insert_work(pwq, &barr->work, head, work_flags); in insert_wq_barrier()
2855 struct pool_workqueue *pwq; in flush_workqueue_prep_pwqs() local
2862 for_each_pwq(pwq, wq) { in flush_workqueue_prep_pwqs()
2863 struct worker_pool *pool = pwq->pool; in flush_workqueue_prep_pwqs()
2868 WARN_ON_ONCE(pwq->flush_color != -1); in flush_workqueue_prep_pwqs()
2870 if (pwq->nr_in_flight[flush_color]) { in flush_workqueue_prep_pwqs()
2871 pwq->flush_color = flush_color; in flush_workqueue_prep_pwqs()
2878 WARN_ON_ONCE(work_color != work_next_color(pwq->work_color)); in flush_workqueue_prep_pwqs()
2879 pwq->work_color = work_color; in flush_workqueue_prep_pwqs()
3063 struct pool_workqueue *pwq; in drain_workqueue() local
3079 for_each_pwq(pwq, wq) { in drain_workqueue()
3082 raw_spin_lock_irq(&pwq->pool->lock); in drain_workqueue()
3083 drained = !pwq->nr_active && list_empty(&pwq->inactive_works); in drain_workqueue()
3084 raw_spin_unlock_irq(&pwq->pool->lock); in drain_workqueue()
3109 struct pool_workqueue *pwq; in start_flush_work() local
3122 pwq = get_work_pwq(work); in start_flush_work()
3123 if (pwq) { in start_flush_work()
3124 if (unlikely(pwq->pool != pool)) in start_flush_work()
3130 pwq = worker->current_pwq; in start_flush_work()
3133 check_flush_dependency(pwq->wq, work); in start_flush_work()
3135 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
3148 (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { in start_flush_work()
3149 lock_map_acquire(&pwq->wq->lockdep_map); in start_flush_work()
3150 lock_map_release(&pwq->wq->lockdep_map); in start_flush_work()
3817 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, in pwq_unbound_release_workfn() local
3819 struct workqueue_struct *wq = pwq->wq; in pwq_unbound_release_workfn()
3820 struct worker_pool *pool = pwq->pool; in pwq_unbound_release_workfn()
3827 if (!list_empty(&pwq->pwqs_node)) { in pwq_unbound_release_workfn()
3832 list_del_rcu(&pwq->pwqs_node); in pwq_unbound_release_workfn()
3841 call_rcu(&pwq->rcu, rcu_free_pwq); in pwq_unbound_release_workfn()
3861 static void pwq_adjust_max_active(struct pool_workqueue *pwq) in pwq_adjust_max_active() argument
3863 struct workqueue_struct *wq = pwq->wq; in pwq_adjust_max_active()
3871 if (!freezable && pwq->max_active == wq->saved_max_active) in pwq_adjust_max_active()
3875 raw_spin_lock_irqsave(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3885 pwq->max_active = wq->saved_max_active; in pwq_adjust_max_active()
3887 while (!list_empty(&pwq->inactive_works) && in pwq_adjust_max_active()
3888 pwq->nr_active < pwq->max_active) { in pwq_adjust_max_active()
3889 pwq_activate_first_inactive(pwq); in pwq_adjust_max_active()
3900 wake_up_worker(pwq->pool); in pwq_adjust_max_active()
3902 pwq->max_active = 0; in pwq_adjust_max_active()
3905 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); in pwq_adjust_max_active()
3909 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, in init_pwq() argument
3912 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); in init_pwq()
3914 memset(pwq, 0, sizeof(*pwq)); in init_pwq()
3916 pwq->pool = pool; in init_pwq()
3917 pwq->wq = wq; in init_pwq()
3918 pwq->flush_color = -1; in init_pwq()
3919 pwq->refcnt = 1; in init_pwq()
3920 INIT_LIST_HEAD(&pwq->inactive_works); in init_pwq()
3921 INIT_LIST_HEAD(&pwq->pwqs_node); in init_pwq()
3922 INIT_LIST_HEAD(&pwq->mayday_node); in init_pwq()
3923 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); in init_pwq()
3927 static void link_pwq(struct pool_workqueue *pwq) in link_pwq() argument
3929 struct workqueue_struct *wq = pwq->wq; in link_pwq()
3934 if (!list_empty(&pwq->pwqs_node)) in link_pwq()
3938 pwq->work_color = wq->work_color; in link_pwq()
3941 pwq_adjust_max_active(pwq); in link_pwq()
3944 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); in link_pwq()
3952 struct pool_workqueue *pwq; in alloc_unbound_pwq() local
3960 pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node); in alloc_unbound_pwq()
3961 if (!pwq) { in alloc_unbound_pwq()
3966 init_pwq(pwq, wq, pool); in alloc_unbound_pwq()
3967 return pwq; in alloc_unbound_pwq()
4025 struct pool_workqueue *pwq) in numa_pwq_tbl_install() argument
4033 link_pwq(pwq); in numa_pwq_tbl_install()
4036 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); in numa_pwq_tbl_install()
4260 struct pool_workqueue *old_pwq = NULL, *pwq; in wq_update_unbound_numa() local
4279 pwq = unbound_pwq_by_node(wq, node); in wq_update_unbound_numa()
4288 if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask)) in wq_update_unbound_numa()
4295 pwq = alloc_unbound_pwq(wq, target_attrs); in wq_update_unbound_numa()
4296 if (!pwq) { in wq_update_unbound_numa()
4304 old_pwq = numa_pwq_tbl_install(wq, node, pwq); in wq_update_unbound_numa()
4329 struct pool_workqueue *pwq = in alloc_and_link_pwqs() local
4334 init_pwq(pwq, wq, &cpu_pools[highpri]); in alloc_and_link_pwqs()
4337 link_pwq(pwq); in alloc_and_link_pwqs()
4409 struct pool_workqueue *pwq; in alloc_workqueue() local
4476 for_each_pwq(pwq, wq) in alloc_workqueue()
4477 pwq_adjust_max_active(pwq); in alloc_workqueue()
4499 static bool pwq_busy(struct pool_workqueue *pwq) in pwq_busy() argument
4504 if (pwq->nr_in_flight[i]) in pwq_busy()
4507 if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1)) in pwq_busy()
4509 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) in pwq_busy()
4523 struct pool_workqueue *pwq; in destroy_workqueue() local
4560 for_each_pwq(pwq, wq) { in destroy_workqueue()
4561 raw_spin_lock_irq(&pwq->pool->lock); in destroy_workqueue()
4562 if (WARN_ON(pwq_busy(pwq))) { in destroy_workqueue()
4565 show_pwq(pwq); in destroy_workqueue()
4566 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
4572 raw_spin_unlock_irq(&pwq->pool->lock); in destroy_workqueue()
4597 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); in destroy_workqueue()
4599 put_pwq_unlocked(pwq); in destroy_workqueue()
4606 pwq = wq->dfl_pwq; in destroy_workqueue()
4608 put_pwq_unlocked(pwq); in destroy_workqueue()
4625 struct pool_workqueue *pwq; in workqueue_set_max_active() local
4638 for_each_pwq(pwq, wq) in workqueue_set_max_active()
4639 pwq_adjust_max_active(pwq); in workqueue_set_max_active()
4696 struct pool_workqueue *pwq; in workqueue_congested() local
4706 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in workqueue_congested()
4708 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); in workqueue_congested()
4710 ret = !list_empty(&pwq->inactive_works); in workqueue_congested()
4793 struct pool_workqueue *pwq = NULL; in print_worker_info() local
4811 copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq)); in print_worker_info()
4812 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); in print_worker_info()
4876 static void show_pwq(struct pool_workqueue *pwq) in show_pwq() argument
4879 struct worker_pool *pool = pwq->pool; in show_pwq()
4889 pwq->nr_active, pwq->max_active, pwq->refcnt, in show_pwq()
4890 !list_empty(&pwq->mayday_node) ? " MAYDAY" : ""); in show_pwq()
4893 if (worker->current_pwq == pwq) { in show_pwq()
4903 if (worker->current_pwq != pwq) in show_pwq()
4919 if (get_work_pwq(work) == pwq) { in show_pwq()
4929 if (get_work_pwq(work) != pwq) in show_pwq()
4939 if (!list_empty(&pwq->inactive_works)) { in show_pwq()
4943 list_for_each_entry(work, &pwq->inactive_works, entry) { in show_pwq()
4958 struct pool_workqueue *pwq; in show_one_workqueue() local
4962 for_each_pwq(pwq, wq) { in show_one_workqueue()
4963 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { in show_one_workqueue()
4973 for_each_pwq(pwq, wq) { in show_one_workqueue()
4974 raw_spin_lock_irqsave(&pwq->pool->lock, flags); in show_one_workqueue()
4975 if (pwq->nr_active || !list_empty(&pwq->inactive_works)) { in show_one_workqueue()
4982 show_pwq(pwq); in show_one_workqueue()
4985 raw_spin_unlock_irqrestore(&pwq->pool->lock, flags); in show_one_workqueue()
5389 struct pool_workqueue *pwq; in freeze_workqueues_begin() local
5398 for_each_pwq(pwq, wq) in freeze_workqueues_begin()
5399 pwq_adjust_max_active(pwq); in freeze_workqueues_begin()
5423 struct pool_workqueue *pwq; in freeze_workqueues_busy() local
5437 for_each_pwq(pwq, wq) { in freeze_workqueues_busy()
5438 WARN_ON_ONCE(pwq->nr_active < 0); in freeze_workqueues_busy()
5439 if (pwq->nr_active) { in freeze_workqueues_busy()
5464 struct pool_workqueue *pwq; in thaw_workqueues() local
5476 for_each_pwq(pwq, wq) in thaw_workqueues()
5477 pwq_adjust_max_active(pwq); in thaw_workqueues()