| /kernel/sched/ |
| A D | swait.c | 10 raw_spin_lock_init(&q->lock); in __init_swait_queue_head() 12 INIT_LIST_HEAD(&q->task_list); in __init_swait_queue_head() 26 if (list_empty(&q->task_list)) in swake_up_locked() 45 swake_up_locked(q, 0); in swake_up_all_locked() 53 swake_up_locked(q, 0); in swake_up_one() 67 raw_spin_lock_irq(&q->lock); in swake_up_all() 78 raw_spin_unlock_irq(&q->lock); in swake_up_all() 79 raw_spin_lock_irq(&q->lock); in swake_up_all() 81 raw_spin_unlock_irq(&q->lock); in swake_up_all() 97 __prepare_to_swait(q, wait); in prepare_to_swait_exclusive() [all …]
|
| /kernel/futex/ |
| A D | requeue.c | 96 q->key = *key2; in requeue_futex() 128 q->pi_state = pi_state; in futex_requeue_pi_prepare() 233 q->key = *key; in requeue_pi_wake_futex() 235 __futex_unqueue(q); in requeue_pi_wake_futex() 237 WARN_ON(!q->rt_waiter); in requeue_pi_wake_futex() 238 q->rt_waiter = NULL; in requeue_pi_wake_futex() 244 q->drop_hb_ref = true; in requeue_pi_wake_futex() 810 q.bitset = bitset; in futex_wait_requeue_pi() 823 futex_do_wait(&q, to); in futex_wait_requeue_pi() 838 if (q.pi_state && (q.pi_state->owner != current)) { in futex_wait_requeue_pi() [all …]
|
| A D | pi.c | 792 spin_unlock(q->lock_ptr); in __fixup_pi_state_owner() 809 futex_q_lockptr_lock(q); in __fixup_pi_state_owner() 942 CLASS(hb, hb)(&q.key); in futex_lock_pi() 944 futex_q_lock(&q, hb); in futex_lock_pi() 946 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, in futex_lock_pi() 982 WARN_ON(!q.pi_state); in futex_lock_pi() 1028 spin_unlock(q.lock_ptr); in futex_lock_pi() 1093 futex_unqueue_pi(&q); in futex_lock_pi() 1094 spin_unlock(q.lock_ptr); in futex_lock_pi() 1095 if (q.drop_hb_ref) { in futex_lock_pi() [all …]
|
| A D | waitwake.c | 112 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) in __futex_wake_mark() 115 __futex_unqueue(q); in __futex_wake_mark() 140 if (!__futex_wake_mark(q)) { in futex_wake_mark() 445 struct futex_q *q = &vs[i].q; in futex_wait_multiple_setup() local 449 CLASS(hb, hb)(&q->key); in futex_wait_multiple_setup() 451 futex_q_lock(q, hb); in futex_wait_multiple_setup() 623 CLASS(hb, hb)(&q->key); in futex_wait_setup() 625 futex_q_lock(q, hb); in futex_wait_setup() 675 q.bitset = bitset; in __futex_wait() 687 futex_do_wait(&q, to); in __futex_wait() [all …]
|
| A D | futex.h | 163 typedef void (futex_wake_fn)(struct wake_q_head *wake_q, struct futex_q *q); 220 extern void futex_q_lockptr_lock(struct futex_q *q); 264 struct futex_q *q, union futex_key *key2, 267 extern bool __futex_wake_mark(struct futex_q *q); 343 extern void __futex_unqueue(struct futex_q *q); 344 extern void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb, 346 extern int futex_unqueue(struct futex_q *q); 367 __futex_queue(q, hb, task); in futex_queue() 371 extern void futex_unqueue_pi(struct futex_q *q); 413 extern void futex_q_lock(struct futex_q *q, struct futex_hash_bucket *hb); [all …]
|
| A D | core.c | 856 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) in __futex_unqueue() 861 plist_del(&q->list, &hb->chain); in __futex_unqueue() 879 q->lock_ptr = &hb->lock; in futex_q_lock() 906 plist_node_init(&q->list, prio); in __futex_queue() 908 q->task = task; in __futex_queue() 956 __futex_unqueue(q); in futex_unqueue() 958 BUG_ON(q->pi_state); in futex_unqueue() 1000 __futex_unqueue(q); in futex_unqueue_pi() 1002 BUG_ON(!q->pi_state); in futex_unqueue_pi() 1003 put_pi_state(q->pi_state); in futex_unqueue_pi() [all …]
|
| A D | syscalls.c | 219 futexv[i].q = futex_q_init; in futex_parse_waitv() 220 futexv[i].q.wake = wake; in futex_parse_waitv() 221 futexv[i].q.wake_data = wake_data; in futex_parse_waitv()
|
| /kernel/trace/ |
| A D | blktrace.c | 384 blk_trace_free(q, bt); in blk_trace_cleanup() 397 blk_trace_cleanup(q, bt); in __blk_trace_remove() 407 ret = __blk_trace_remove(q); in blk_trace_remove() 549 dir = q->debugfs_dir; in do_blk_trace_setup() 598 blk_trace_free(q, bt); in do_blk_trace_setup() 620 blk_trace_remove(q); in blk_trace_setup() 655 blk_trace_remove(q); in compat_blk_trace_setup() 727 ret = blk_trace_remove(q); in blk_trace_ioctl() 745 __blk_trace_remove(q); in blk_trace_shutdown() 1580 blk_trace_free(q, bt); in blk_trace_remove_queue() [all …]
|
| A D | trace_boot.c | 564 char *q; in trace_boot_set_ftrace_filter() local 567 q = kstrdup(p, GFP_KERNEL); in trace_boot_set_ftrace_filter() 568 if (!q) in trace_boot_set_ftrace_filter() 570 if (ftrace_set_filter(tr->ops, q, strlen(q), 0) < 0) in trace_boot_set_ftrace_filter() 574 kfree(q); in trace_boot_set_ftrace_filter() 577 q = kstrdup(p, GFP_KERNEL); in trace_boot_set_ftrace_filter() 578 if (!q) in trace_boot_set_ftrace_filter() 580 if (ftrace_set_notrace(tr->ops, q, strlen(q), 0) < 0) in trace_boot_set_ftrace_filter() 584 kfree(q); in trace_boot_set_ftrace_filter()
|
| A D | trace_events_inject.c | 105 char q = str[i]; in parse_field() local 116 if (str[i] == q) in parse_field()
|
| A D | trace_events_filter.c | 1643 char q; in parse_pred() local 1802 q = str[i]; in parse_pred() 1804 q = 0; in parse_pred() 1807 if (q && str[i] == q) in parse_pred() 1809 if (!q && (str[i] == ')' || str[i] == '&' || in parse_pred() 1814 if (q) in parse_pred() 1949 char q = str[i]; in parse_pred() local 1971 if (str[i] == q) in parse_pred()
|
| /kernel/ |
| A D | latencytop.c | 122 int q, same = 1; in account_global_scheduler_latency() local 130 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { in account_global_scheduler_latency() 131 unsigned long record = lat->backtrace[q]; in account_global_scheduler_latency() 133 if (latency_record[i].backtrace[q] != record) { in account_global_scheduler_latency() 179 int i, q; in __account_scheduler_latency() local 207 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { in __account_scheduler_latency() 208 unsigned long record = lat.backtrace[q]; in __account_scheduler_latency() 210 if (mylat->backtrace[q] != record) { in __account_scheduler_latency() 252 int q; in lstats_show() local 255 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { in lstats_show() [all …]
|
| A D | signal.c | 450 struct sigqueue *q; in sigqueue_alloc() local 456 if (!q) { in sigqueue_alloc() 462 return q; in sigqueue_alloc() 471 if (q->ucounts) { in __sigqueue_free() 473 q->ucounts = NULL; in __sigqueue_free() 480 struct sigqueue *q; in flush_sigqueue() local 486 __sigqueue_free(q); in flush_sigqueue() 566 first = q; in collect_signal() 687 sync = q; in dequeue_synchronous_signal() 1089 if (q) { in __send_signal_locked() [all …]
|
| A D | watch_queue.c | 320 struct watch_type_filter *q; in watch_queue_set_filter() local 366 q = wfilter->filters; in watch_queue_set_filter() 371 q->type = tf[i].type; in watch_queue_set_filter() 372 q->info_filter = tf[i].info_filter; in watch_queue_set_filter() 373 q->info_mask = tf[i].info_mask; in watch_queue_set_filter() 374 q->subtype_filter[0] = tf[i].subtype_filter[0]; in watch_queue_set_filter() 375 __set_bit(q->type, wfilter->type_filter); in watch_queue_set_filter() 376 q++; in watch_queue_set_filter()
|
| A D | crash_reserve.c | 229 char *q; in get_last_crashkernel() local 239 q = end_p - strlen(suffix_tbl[i]); in get_last_crashkernel() 240 if (!strncmp(q, suffix_tbl[i], in get_last_crashkernel() 246 q = end_p - strlen(suffix); in get_last_crashkernel() 247 if (!strncmp(q, suffix, strlen(suffix))) in get_last_crashkernel()
|
| A D | auditfilter.c | 1080 static void audit_list_rules(int seq, struct sk_buff_head *q) in audit_list_rules() argument 1099 skb_queue_tail(q, skb); in audit_list_rules() 1105 skb_queue_tail(q, skb); in audit_list_rules() 1189 skb_queue_head_init(&dest->q); in audit_list_rules_send() 1192 audit_list_rules(seq, &dest->q); in audit_list_rules_send() 1197 skb_queue_purge(&dest->q); in audit_list_rules_send()
|
| A D | auditsc.c | 274 struct audit_tree_refs *q; in unroll_tree_refs() local 286 for (q = p; q != ctx->trees; q = q->next, n = 31) { in unroll_tree_refs() 288 audit_put_chunk(q->c[n]); in unroll_tree_refs() 289 q->c[n] = NULL; in unroll_tree_refs() 293 audit_put_chunk(q->c[n]); in unroll_tree_refs() 294 q->c[n] = NULL; in unroll_tree_refs() 302 struct audit_tree_refs *p, *q; in free_tree_refs() local 304 for (p = ctx->first_trees; p; p = q) { in free_tree_refs() 305 q = p->next; in free_tree_refs()
|
| A D | audit_tree.c | 611 struct list_head *p, *q; in trim_marked() local 618 for (p = tree->chunks.next; p != &tree->chunks; p = q) { in trim_marked() 620 q = p->next; in trim_marked()
|
| A D | ptrace.c | 715 struct sigqueue *q; in ptrace_peek_siginfo() local 744 list_for_each_entry(q, &pending->list, list) { in ptrace_peek_siginfo() 747 copy_siginfo(&info, &q->info); in ptrace_peek_siginfo()
|
| A D | audit.h | 244 struct sk_buff_head q; member
|
| /kernel/cgroup/ |
| A D | pids.c | 168 struct pids_cgroup *p, *q; in pids_try_charge() local 193 for (q = pids; q != p; q = parent_pids(q)) in pids_try_charge() 194 pids_cancel(q, num); in pids_try_charge()
|
| A D | cpuset-v1.c | 341 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) in is_cpuset_subset() argument 343 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && in is_cpuset_subset() 344 nodes_subset(p->mems_allowed, q->mems_allowed) && in is_cpuset_subset() 345 is_cpu_exclusive(p) <= is_cpu_exclusive(q) && in is_cpuset_subset() 346 is_mem_exclusive(p) <= is_mem_exclusive(q); in is_cpuset_subset()
|
| /kernel/bpf/ |
| A D | cpumap.c | 51 void *q[CPU_MAP_BULK_SIZE]; member 717 struct ptr_ring *q; in bq_flush_to_queue() local 723 q = rcpu->queue; in bq_flush_to_queue() 724 spin_lock(&q->producer_lock); in bq_flush_to_queue() 727 struct xdp_frame *xdpf = bq->q[i]; in bq_flush_to_queue() 730 err = __ptr_ring_produce(q, xdpf); in bq_flush_to_queue() 738 spin_unlock(&q->producer_lock); in bq_flush_to_queue() 765 bq->q[bq->count++] = xdpf; in bq_enqueue()
|
| A D | devmap.c | 57 struct xdp_frame *q[DEV_MAP_BULK_SIZE]; member 388 struct xdp_frame *xdpf = bq->q[i]; in bq_xmit_all() 394 to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev, bq->dev_rx); in bq_xmit_all() 399 sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags); in bq_xmit_all() 412 xdp_return_frame_rx_napi(bq->q[i]); in bq_xmit_all() 479 bq->q[bq->count++] = xdpf; in bq_enqueue()
|
| /kernel/time/ |
| A D | timeconst.bc | 55 print "#error \qinclude/generated/timeconst.h has the wrong HZ value!\q\n"
|