| /kernel/bpf/ |
| A D | tnum.c | 66 sm = a.mask + b.mask; in tnum_add() 70 mu = chi | a.mask | b.mask; in tnum_add() 79 alpha = dv + a.mask; in tnum_sub() 80 beta = dv - b.mask; in tnum_sub() 82 mu = chi | a.mask | b.mask; in tnum_sub() 106 mu = a.mask | b.mask; in tnum_or() 115 mu = a.mask | b.mask; in tnum_xor() 154 mu = a.mask & b.mask; in tnum_intersect() 174 if (b.mask & ~a.mask) in tnum_in() 186 if (a.mask & 1) in tnum_sbin() [all …]
|
| A D | token.c | 71 u64 mask; in bpf_token_show_fdinfo() local 74 mask = BIT_ULL(__MAX_BPF_CMD) - 1; in bpf_token_show_fdinfo() 75 if ((token->allowed_cmds & mask) == mask) in bpf_token_show_fdinfo() 81 mask = BIT_ULL(__MAX_BPF_MAP_TYPE) - 1; in bpf_token_show_fdinfo() 82 if ((token->allowed_maps & mask) == mask) in bpf_token_show_fdinfo() 88 mask = BIT_ULL(__MAX_BPF_PROG_TYPE) - 1; in bpf_token_show_fdinfo() 89 if ((token->allowed_progs & mask) == mask) in bpf_token_show_fdinfo() 95 mask = BIT_ULL(__MAX_BPF_ATTACH_TYPE) - 1; in bpf_token_show_fdinfo() 96 if ((token->allowed_attachs & mask) == mask) in bpf_token_show_fdinfo()
|
| A D | rqspinlock.c | 94 if (!(atomic_read_acquire(&lock->val) & (mask))) in is_lock_released() 99 static noinline int check_deadlock_AA(rqspinlock_t *lock, u32 mask, in check_deadlock_AA() argument 145 if (is_lock_released(lock, mask, ts)) in check_deadlock_ABBA() 201 static noinline int check_deadlock(rqspinlock_t *lock, u32 mask, in check_deadlock() argument 206 ret = check_deadlock_AA(lock, mask, ts); in check_deadlock() 209 ret = check_deadlock_ABBA(lock, mask, ts); in check_deadlock() 216 static noinline int check_timeout(rqspinlock_t *lock, u32 mask, in check_timeout() argument 237 return check_deadlock(lock, mask, ts); in check_timeout() 251 (ret) = check_timeout((lock), (mask), &(ts)); \ 255 #define RES_CHECK_TIMEOUT(ts, ret, mask) \ argument [all …]
|
| A D | ringbuf.c | 30 u64 mask; member 182 rb->mask = data_sz - 1; in bpf_ringbuf_alloc() 307 return rb->mask + 1; in ringbuf_total_data_sz() 428 hdr = (void *)rb->data + (pend_pos & rb->mask); in __bpf_ringbuf_reserve() 444 if (new_prod_pos - cons_pos > rb->mask || in __bpf_ringbuf_reserve() 445 new_prod_pos - pend_pos > rb->mask) { in __bpf_ringbuf_reserve() 450 hdr = (void *)rb->data + (prod_pos & rb->mask); in __bpf_ringbuf_reserve() 502 cons_pos = smp_load_acquire(&rb->consumer_pos) & rb->mask; in bpf_ringbuf_commit() 685 hdr = (u32 *)((uintptr_t)rb->data + (uintptr_t)(cons_pos & rb->mask)); in __bpf_user_ringbuf_peek() 719 (uintptr_t)((cons_pos + BPF_RINGBUF_HDR_SZ) & rb->mask)); in __bpf_user_ringbuf_peek()
|
| /kernel/irq/ |
| A D | generic-chip.c | 41 u32 mask = d->mask; in irq_gc_mask_disable_reg() local 45 *ct->mask_cache &= ~mask; in irq_gc_mask_disable_reg() 60 u32 mask = d->mask; in irq_gc_mask_set_bit() local 63 *ct->mask_cache |= mask; in irq_gc_mask_set_bit() 79 u32 mask = d->mask; in irq_gc_mask_clr_bit() local 98 u32 mask = d->mask; in irq_gc_unmask_enable_reg() local 114 u32 mask = d->mask; in irq_gc_ack_set_bit() local 129 u32 mask = ~d->mask; in irq_gc_ack_clr_bit() local 151 u32 mask = d->mask; in irq_gc_mask_disable_and_ack_set() local 168 u32 mask = d->mask; in irq_gc_eoi() local [all …]
|
| A D | internals.h | 225 static inline void irqd_clear(struct irq_data *d, unsigned int mask) in irqd_clear() argument 227 __irqd_to_state(d) &= ~mask; in irqd_clear() 230 static inline void irqd_set(struct irq_data *d, unsigned int mask) in irqd_set() argument 232 __irqd_to_state(d) |= mask; in irqd_set() 237 return __irqd_to_state(d) & mask; in irqd_has_set() 424 cpumask_copy(desc->pending_mask, mask); in irq_copy_pending() 427 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) in irq_get_pending() argument 429 cpumask_copy(mask, desc->pending_mask); in irq_get_pending() 451 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) in irq_get_pending() argument 490 unsigned int mask; member [all …]
|
| A D | autoprobe.c | 33 unsigned long mask = 0; in probe_irq_on() local 91 mask |= 1 << i; in probe_irq_on() 96 return mask; in probe_irq_on() 114 unsigned int mask = 0; in probe_irq_mask() local 122 mask |= 1 << i; in probe_irq_mask() 130 return mask & val; in probe_irq_mask()
|
| A D | proc.c | 49 const struct cpumask *mask; in show_irq_affinity() local 54 mask = desc->irq_common_data.affinity; in show_irq_affinity() 56 mask = irq_desc_get_pending_mask(desc); in show_irq_affinity() 61 mask = irq_data_get_effective_affinity_mask(&desc->irq_data); in show_irq_affinity() 71 seq_printf(m, "%*pbl\n", cpumask_pr_args(mask)); in show_irq_affinity() 75 seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); in show_irq_affinity() 84 cpumask_var_t mask; in irq_affinity_hint_proc_show() local 86 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) in irq_affinity_hint_proc_show() 91 cpumask_copy(mask, desc->affinity_hint); in irq_affinity_hint_proc_show() 94 seq_printf(m, "%*pb\n", cpumask_pr_args(mask)); in irq_affinity_hint_proc_show() [all …]
|
| A D | manage.c | 257 cpumask_and(tmp_mask, mask, hk_mask); in irq_do_set_affinity() 259 prog_mask = mask; in irq_do_set_affinity() 263 prog_mask = mask; in irq_do_set_affinity() 327 const struct cpumask *mask) in irq_set_affinity_deactivated() argument 367 irq_copy_pending(desc, mask); in irq_set_affinity_locked() 577 static struct cpumask mask; in irq_setup_affinity() local 598 if (cpumask_empty(&mask)) in irq_setup_affinity() 606 cpumask_and(&mask, &mask, nodemask); in irq_setup_affinity() 1003 cpumask_var_t mask; in irq_thread_check_affinity() local 1029 cpumask_copy(mask, m); in irq_thread_check_affinity() [all …]
|
| /kernel/rcu/ |
| A D | tree_exp.h | 81 unsigned long mask; in sync_exp_reset_tree_hotplug() local 113 mask = rnp->grpmask; in sync_exp_reset_tree_hotplug() 124 mask = rnp_up->grpmask; in sync_exp_reset_tree_hotplug() 196 unsigned long mask; in __rcu_report_exp_rnp() local 214 mask = rnp->grpmask; in __rcu_report_exp_rnp() 244 unsigned long mask; in rcu_report_exp_cpu_mult() local 378 mask_ofl_test |= mask; in __sync_rcu_exp_select_node_cpus() 412 mask_ofl_test |= mask; in __sync_rcu_exp_select_node_cpus() 416 mask_ofl_test |= mask; in __sync_rcu_exp_select_node_cpus() 561 unsigned long mask; in synchronize_rcu_expedited_stall() local [all …]
|
| A D | tree.c | 1807 unsigned long mask; in rcu_gp_init() local 2338 if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) { in rcu_report_qs_rnp() 2396 unsigned long mask; in rcu_report_unblock_qs_rnp() local 2420 mask = rnp->grpmask; in rcu_report_unblock_qs_rnp() 2434 unsigned long mask; in rcu_report_qs_rdp() local 2453 mask = rdp->grpmask; in rcu_report_qs_rdp() 2764 if (mask != 0) { in force_qs_rnp() 4070 long mask; in rcu_cleanup_dead_rnp() local 4104 long mask; in rcu_init_new_rnp() local 4324 unsigned long mask; in rcutree_report_cpu_starting() local [all …]
|
| /kernel/time/ |
| A D | timecounter.c | 15 tc->mask = (1ULL << cc->shift) - 1; in timecounter_init() 40 cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask; in timecounter_read_delta() 44 tc->mask, &tc->frac); in timecounter_read_delta() 70 u64 cycles, u64 mask, u64 frac) in cc_cyc2ns_backwards() argument 82 u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; in timecounter_cyc2time() 90 if (delta > tc->cc->mask / 2) { in timecounter_cyc2time() 91 delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; in timecounter_cyc2time() 92 nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac); in timecounter_cyc2time() 94 nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac); in timecounter_cyc2time()
|
| A D | clocksource.c | 27 u64 delta = clocksource_delta(end, start, cs->mask, cs->max_raw_delta); in cycles_to_nsec_safe() 389 delta = (s64)((csnow_mid - csnow_begin) & cs->mask); in clocksource_verify_percpu() 392 delta = (csnow_end - csnow_mid) & cs->mask; in clocksource_verify_percpu() 526 watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask); in clocksource_watchdog() 528 cs->name, cs_nsec, csnow, cslast, cs->mask); in clocksource_watchdog() 969 max_cycles = min(max_cycles, mask); in clocks_calc_max_nsecs() 990 cs->maxadj, cs->mask, in clocksource_update_max_deferment() 1000 cs->max_raw_delta = (cs->mask >> 1) + (cs->mask >> 2) + (cs->mask >> 3); in clocksource_update_max_deferment() 1163 sec = cs->mask; in __clocksource_update_freq_scale() 1168 else if (sec > 600 && cs->mask > UINT_MAX) in __clocksource_update_freq_scale() [all …]
|
| A D | tick-broadcast.c | 227 static void err_broadcast(const struct cpumask *mask) in err_broadcast() argument 346 static bool tick_do_broadcast(struct cpumask *mask) in tick_do_broadcast() argument 355 if (cpumask_test_cpu(cpu, mask)) { in tick_do_broadcast() 358 cpumask_clear_cpu(cpu, mask); in tick_do_broadcast() 374 if (!cpumask_empty(mask)) { in tick_do_broadcast() 381 td = &per_cpu(tick_cpu_device, cpumask_first(mask)); in tick_do_broadcast() 382 td->evtdev->broadcast(mask); in tick_do_broadcast() 992 static void tick_broadcast_init_next_event(struct cpumask *mask, in tick_broadcast_init_next_event() argument 998 for_each_cpu(cpu, mask) { in tick_broadcast_init_next_event()
|
| /kernel/ |
| A D | compat.c | 148 cpumask_var_t mask; in COMPAT_SYSCALL_DEFINE3() local 155 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) in COMPAT_SYSCALL_DEFINE3() 158 ret = sched_getaffinity(pid, mask); in COMPAT_SYSCALL_DEFINE3() 162 if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8)) in COMPAT_SYSCALL_DEFINE3() 167 free_cpumask_var(mask); in COMPAT_SYSCALL_DEFINE3() 193 long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, in compat_get_bitmap() argument 209 *mask++ = ((unsigned long)l2 << BITS_PER_COMPAT_LONG) | l1; in compat_get_bitmap() 213 unsafe_get_user(*mask, umask++, Efault); in compat_get_bitmap() 222 long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, in compat_put_bitmap() argument 235 unsigned long m = *mask++; in compat_put_bitmap() [all …]
|
| A D | taskstats.c | 302 if (!cpumask_subset(mask, cpu_possible_mask)) in add_del_listener() 312 for_each_cpu(cpu, mask) { in add_del_listener() 339 for_each_cpu(cpu, mask) { in add_del_listener() 354 static int parse(struct nlattr *na, struct cpumask *mask) in parse() argument 371 ret = cpulist_parse(data, mask); in parse() 452 cpumask_var_t mask; in cmd_attr_register_cpumask() local 455 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) in cmd_attr_register_cpumask() 462 free_cpumask_var(mask); in cmd_attr_register_cpumask() 468 cpumask_var_t mask; in cmd_attr_deregister_cpumask() local 471 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) in cmd_attr_deregister_cpumask() [all …]
|
| A D | smp.c | 125 send_call_function_ipi_mask(struct cpumask *mask) in send_call_function_ipi_mask() argument 127 trace_ipi_send_cpumask(mask, _RET_IP_, in send_call_function_ipi_mask() 129 arch_send_call_function_ipi_mask(mask); in send_call_function_ipi_mask() 754 int smp_call_function_any(const struct cpumask *mask, in smp_call_function_any() argument 762 if (!cpumask_test_cpu(cpu, mask)) in smp_call_function_any() 763 cpu = sched_numa_find_nth_cpu(mask, 0, cpu_to_node(cpu)); in smp_call_function_any() 780 static void smp_call_function_many_cond(const struct cpumask *mask, in smp_call_function_many_cond() argument 814 cpumask_and(cfd->cpumask, mask, cpu_online_mask); in smp_call_function_many_cond() 898 void smp_call_function_many(const struct cpumask *mask, in smp_call_function_many() argument 1036 void *info, bool wait, const struct cpumask *mask) in on_each_cpu_cond_mask() argument [all …]
|
| A D | signal.c | 209 m = mask->sig; in next_signal() 284 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); in task_set_jobctl_pending() 292 task->jobctl |= mask; in task_set_jobctl_pending() 339 task->jobctl &= ~mask; in task_clear_jobctl_pending() 3750 sigset_t mask = *which; in do_sigtimedwait() local 3765 signotset(&mask); in do_sigtimedwait() 4284 sigset_t mask; in kernel_sigaction() local 4286 sigemptyset(&mask); in kernel_sigaction() 4306 sigset_t mask; in do_sigaction() local 4357 sigemptyset(&mask); in do_sigaction() [all …]
|
| A D | auditfilter.c | 205 if (mask[i] & classes[class][i]) in audit_match_class_bits() 219 entry->rule.mask) && in audit_match_signal() 221 entry->rule.mask)); in audit_match_signal() 227 entry->rule.mask)); in audit_match_signal() 230 entry->rule.mask)); in audit_match_signal() 282 entry->rule.mask[i] = rule->mask[i]; in audit_to_entry_common() 286 __u32 *p = &entry->rule.mask[AUDIT_WORD(bit)]; in audit_to_entry_common() 296 entry->rule.mask[j] |= class[j]; in audit_to_entry_common() 699 data->mask[i] = krule->mask[i]; in audit_krule_to_data() 781 if (a->mask[i] != b->mask[i]) in audit_compare_rule() [all …]
|
| /kernel/sched/ |
| A D | cpupri.c | 100 if (cpumask_any_and(&p->cpus_mask, vec->mask) >= nr_cpu_ids) in __cpupri_find() 104 cpumask_and(lowest_mask, &p->cpus_mask, vec->mask); in __cpupri_find() 233 cpumask_set_cpu(cpu, vec->mask); in cpupri_set() 267 cpumask_clear_cpu(cpu, vec->mask); in cpupri_set() 287 if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) in cpupri_init() 302 free_cpumask_var(cp->pri_to_cpu[i].mask); in cpupri_init() 316 free_cpumask_var(cp->pri_to_cpu[i].mask); in cpupri_cleanup()
|
| A D | topology.c | 918 cpumask_clear(mask); in build_balance_mask() 935 cpumask_set_cpu(i, mask); in build_balance_mask() 939 WARN_ON_ONCE(cpumask_empty(mask)); in build_balance_mask() 979 build_balance_mask(sd, sg, mask); in init_overlap_sched_group() 980 cpu = cpumask_first(mask); in init_overlap_sched_group() 1293 cpumask_copy(mask, sched_group_span(sg)); in init_sched_groups_capacity() 1294 for_each_cpu(cpu, mask) { in init_sched_groups_capacity() 1297 cpumask_andnot(mask, mask, cpu_smt_mask(cpu)); in init_sched_groups_capacity() 1976 if (!mask) in sched_init_numa() 1979 masks[i][j] = mask; in sched_init_numa() [all …]
|
| /kernel/debug/kdb/ |
| A D | kdb_bt.c | 77 kdb_bt1(struct task_struct *p, const char *mask, bool btaprompt) in kdb_bt1() argument 84 if (!kdb_task_state(p, mask)) in kdb_bt1() 141 const char *mask = argc ? argv[1] : kdbgetenv("PS"); in kdb_bt() local 148 if (kdb_bt1(p, mask, btaprompt)) in kdb_bt() 157 if (kdb_bt1(p, mask, btaprompt)) in kdb_bt()
|
| /kernel/kcsan/ |
| A D | report.c | 391 u64 old, u64 new, u64 mask) in print_report() argument 476 if (mask) in print_report() 477 diff &= mask; in print_report() 482 if (mask) { in print_report() 484 hex_len, diff, hex_len, mask); in print_report() 668 int watchpoint_idx, u64 old, u64 new, u64 mask) in kcsan_report_known_origin() argument 692 print_report(value_change, &ai, other_info, old, new, mask); in kcsan_report_known_origin() 701 unsigned long ip, u64 old, u64 new, u64 mask) in kcsan_report_unknown_origin() argument 710 print_report(KCSAN_VALUE_CHANGE_TRUE, &ai, NULL, old, new, mask); in kcsan_report_unknown_origin()
|
| /kernel/dma/ |
| A D | mapping.c | 131 return min_not_zero(mask, dev->bus_dma_limit) >= in dma_go_direct() 875 return ops->dma_supported(dev, mask); in dma_supported() 878 return dma_direct_supported(dev, mask); in dma_supported() 896 int dma_set_mask(struct device *dev, u64 mask) in dma_set_mask() argument 902 mask = (dma_addr_t)mask; in dma_set_mask() 904 if (!dev->dma_mask || !dma_supported(dev, mask)) in dma_set_mask() 907 arch_dma_set_mask(dev, mask); in dma_set_mask() 908 *dev->dma_mask = mask; in dma_set_mask() 921 mask = (dma_addr_t)mask; in dma_set_coherent_mask() 923 if (!dma_supported(dev, mask)) in dma_set_coherent_mask() [all …]
|
| /kernel/trace/ |
| A D | trace_events_filter.c | 81 struct cpumask *mask; member 204 kfree(pred->mask); in free_predicate() 657 return cpumask_equal(mask, cmp); in do_filter_cpumask() 659 return !cpumask_equal(mask, cmp); in do_filter_cpumask() 681 return cpumask_test_cpu(cpu, mask); in do_filter_scalar_cpumask() 693 cpumask_nth(1, mask) >= nr_cpu_ids; in do_filter_cpumask_scalar() 696 cpumask_nth(1, mask) < nr_cpu_ids; in do_filter_cpumask_scalar() 698 return cpumask_test_cpu(cpu, mask); in do_filter_cpumask_scalar() 1883 if (!pred->mask) { in parse_pred() 1906 kfree(pred->mask); in parse_pred() [all …]
|