| /linux/kernel/rcu/ |
| A D | tree_stall.h | 203 if (!READ_ONCE(rcu_kick_kthreads)) in rcu_stall_kick_kthreads() 582 gpk && !READ_ONCE(gpk->on_rq)) { in rcu_check_gp_kthread_expired_fqs_timer() 783 gs1 = READ_ONCE(rcu_state.gp_seq); in check_cpu_stall() 787 gps = READ_ONCE(rcu_state.gp_start); in check_cpu_stall() 789 gs2 = READ_ONCE(rcu_state.gp_seq); in check_cpu_stall() 862 if (READ_ONCE(rnp->gp_tasks)) in rcu_check_boost_fail() 922 if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) && in show_rcu_gp_kthreads() 923 !data_race(READ_ONCE(rnp->qsmask)) && !data_race(READ_ONCE(rnp->boost_tasks)) && in show_rcu_gp_kthreads() 924 !data_race(READ_ONCE(rnp->exp_tasks)) && !data_race(READ_ONCE(rnp->gp_tasks))) in show_rcu_gp_kthreads() 930 data_race(READ_ONCE(rnp->qsmask)), in show_rcu_gp_kthreads() [all …]
|
| A D | srcutiny.c | 102 newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1; in __srcu_read_unlock() 105 if (!newval && READ_ONCE(ssp->srcu_gp_waiting) && in_task()) in __srcu_read_unlock() 124 if (ssp->srcu_gp_running || ULONG_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) { in srcu_drive_gp() 140 swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx])); in srcu_drive_gp() 164 idx = ULONG_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)); in srcu_drive_gp() 177 if (ULONG_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie)) { in srcu_gp_start_if_needed() 182 if (!READ_ONCE(ssp->srcu_gp_running)) { in srcu_gp_start_if_needed() 247 ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1; in get_state_synchronize_srcu() 277 unsigned long cur_s = READ_ONCE(ssp->srcu_idx); in poll_state_synchronize_srcu()
|
| A D | rcu_segcblist.h | 15 return READ_ONCE(rclp->len); in rcu_cblist_n_cbs() 45 return !READ_ONCE(rsclp->head); in rcu_segcblist_empty() 54 return READ_ONCE(rsclp->len); in rcu_segcblist_n_cbs() 73 return READ_ONCE(rsclp->flags) & flags; in rcu_segcblist_test_flags() 105 return !READ_ONCE(*READ_ONCE(rsclp->tails[seg])); in rcu_segcblist_restempty()
|
| /linux/net/ipv4/ |
| A D | tcp_plb.c | 30 if (!READ_ONCE(net->ipv4.sysctl_tcp_plb_enabled)) in tcp_plb_update_state() 34 if (cong_ratio < READ_ONCE(net->ipv4.sysctl_tcp_plb_cong_thresh)) in tcp_plb_update_state() 37 READ_ONCE(net->ipv4.sysctl_tcp_plb_rehash_rounds)) in tcp_plb_update_state() 52 if (!READ_ONCE(net->ipv4.sysctl_tcp_plb_enabled)) in tcp_plb_check_rehash() 56 READ_ONCE(net->ipv4.sysctl_tcp_plb_rehash_rounds); in tcp_plb_check_rehash() 58 idle_rehash = READ_ONCE(net->ipv4.sysctl_tcp_plb_idle_rehash_rounds) && in tcp_plb_check_rehash() 61 READ_ONCE(net->ipv4.sysctl_tcp_plb_idle_rehash_rounds); in tcp_plb_check_rehash() 71 max_suspend = 2 * READ_ONCE(net->ipv4.sysctl_tcp_plb_suspend_rto_sec) * HZ; in tcp_plb_check_rehash() 96 if (!READ_ONCE(net->ipv4.sysctl_tcp_plb_enabled)) in tcp_plb_update_state_upon_rto() 99 pause = READ_ONCE(net->ipv4.sysctl_tcp_plb_suspend_rto_sec) * HZ; in tcp_plb_update_state_upon_rto()
|
| A D | tcp_timer.c | 34 user_timeout = READ_ONCE(icsk->icsk_user_timeout); in tcp_clamp_rto_to_user_timeout() 55 user_timeout = READ_ONCE(icsk->icsk_user_timeout); in tcp_clamp_probe0_to_user_timeout() 116 if (READ_ONCE(sk->sk_err_soft)) in tcp_out_of_resources() 153 if (READ_ONCE(sk->sk_err_soft) && !alive) in tcp_orphan_retries() 170 if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing)) in tcp_mtu_probing() 253 retry_until = READ_ONCE(icsk->icsk_syn_retries) ? : in tcp_write_timeout() 254 READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); in tcp_write_timeout() 283 READ_ONCE(icsk->icsk_user_timeout)); in tcp_write_timeout() 455 max_retries = READ_ONCE(icsk->icsk_syn_retries) ? : in tcp_fastopen_synack_timer() 484 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout); in tcp_rtx_probe0_timed_out() [all …]
|
| /linux/Documentation/translations/ko_KR/ |
| A D | memory-barriers.txt | 259 Q = READ_ONCE(P); D = READ_ONCE(*Q); 701 q = READ_ONCE(a); 714 q = READ_ONCE(a); 724 q = READ_ONCE(a); 747 q = READ_ONCE(a); 761 q = READ_ONCE(a); 778 q = READ_ONCE(a); 790 q = READ_ONCE(a); 806 q = READ_ONCE(a); 818 q = READ_ONCE(a); [all …]
|
| /linux/io_uring/ |
| A D | fs.c | 60 ren->old_dfd = READ_ONCE(sqe->fd); in io_renameat_prep() 61 oldf = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_renameat_prep() 63 ren->new_dfd = READ_ONCE(sqe->len); in io_renameat_prep() 64 ren->flags = READ_ONCE(sqe->rename_flags); in io_renameat_prep() 114 un->dfd = READ_ONCE(sqe->fd); in io_unlinkat_prep() 116 un->flags = READ_ONCE(sqe->unlink_flags); in io_unlinkat_prep() 164 mkd->dfd = READ_ONCE(sqe->fd); in io_mkdirat_prep() 165 mkd->mode = READ_ONCE(sqe->len); in io_mkdirat_prep() 208 sl->new_dfd = READ_ONCE(sqe->fd); in io_symlinkat_prep() 251 lnk->old_dfd = READ_ONCE(sqe->fd); in io_linkat_prep() [all …]
|
| A D | sync.c | 32 sync->off = READ_ONCE(sqe->off); in io_sfr_prep() 33 sync->len = READ_ONCE(sqe->len); in io_sfr_prep() 34 sync->flags = READ_ONCE(sqe->sync_range_flags); in io_sfr_prep() 60 sync->flags = READ_ONCE(sqe->fsync_flags); in io_fsync_prep() 64 sync->off = READ_ONCE(sqe->off); in io_fsync_prep() 65 sync->len = READ_ONCE(sqe->len); in io_fsync_prep() 92 sync->off = READ_ONCE(sqe->off); in io_fallocate_prep() 93 sync->len = READ_ONCE(sqe->addr); in io_fallocate_prep() 94 sync->mode = READ_ONCE(sqe->len); in io_fallocate_prep()
|
| A D | advise.c | 39 ma->addr = READ_ONCE(sqe->addr); in io_madvise_prep() 40 ma->len = READ_ONCE(sqe->off); in io_madvise_prep() 42 ma->len = READ_ONCE(sqe->len); in io_madvise_prep() 43 ma->advice = READ_ONCE(sqe->fadvise_advice); in io_madvise_prep() 86 fa->offset = READ_ONCE(sqe->off); in io_fadvise_prep() 87 fa->len = READ_ONCE(sqe->addr); in io_fadvise_prep() 89 fa->len = READ_ONCE(sqe->len); in io_fadvise_prep() 90 fa->advice = READ_ONCE(sqe->fadvise_advice); in io_fadvise_prep()
|
| A D | splice.c | 32 sp->len = READ_ONCE(sqe->len); in __io_splice_prep() 33 sp->flags = READ_ONCE(sqe->splice_flags); in __io_splice_prep() 36 sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in); in __io_splice_prep() 43 if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off)) in io_tee_prep() 83 sp->off_in = READ_ONCE(sqe->splice_off_in); in io_splice_prep() 84 sp->off_out = READ_ONCE(sqe->off); in io_splice_prep()
|
| A D | uring_cmd.c | 206 ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags); in io_uring_cmd_prep() 214 req->buf_index = READ_ONCE(sqe->buf_index); in io_uring_cmd_prep() 221 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op); in io_uring_cmd_prep() 295 level = READ_ONCE(cmd->sqe->level); in io_uring_cmd_getsockopt() 299 optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval)); in io_uring_cmd_getsockopt() 300 optname = READ_ONCE(cmd->sqe->optname); in io_uring_cmd_getsockopt() 301 optlen = READ_ONCE(cmd->sqe->optlen); in io_uring_cmd_getsockopt() 323 optname = READ_ONCE(cmd->sqe->optname); in io_uring_cmd_setsockopt() 324 optlen = READ_ONCE(cmd->sqe->optlen); in io_uring_cmd_setsockopt() 325 level = READ_ONCE(cmd->sqe->level); in io_uring_cmd_setsockopt() [all …]
|
| A D | statx.c | 33 sx->dfd = READ_ONCE(sqe->fd); in io_statx_prep() 34 sx->mask = READ_ONCE(sqe->len); in io_statx_prep() 35 path = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_statx_prep() 36 sx->buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_statx_prep() 37 sx->flags = READ_ONCE(sqe->statx_flags); in io_statx_prep()
|
| A D | xattr.c | 56 name = u64_to_user_ptr(READ_ONCE(sqe->addr)); in __io_getxattr_prep() 57 ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in __io_getxattr_prep() 58 ix->ctx.size = READ_ONCE(sqe->len); in __io_getxattr_prep() 59 ix->ctx.flags = READ_ONCE(sqe->xattr_flags); in __io_getxattr_prep() 97 path = u64_to_user_ptr(READ_ONCE(sqe->addr3)); in io_getxattr_prep() 159 name = u64_to_user_ptr(READ_ONCE(sqe->addr)); in __io_setxattr_prep() 160 ix->ctx.cvalue = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in __io_setxattr_prep() 162 ix->ctx.size = READ_ONCE(sqe->len); in __io_setxattr_prep() 163 ix->ctx.flags = READ_ONCE(sqe->xattr_flags); in __io_setxattr_prep() 190 path = u64_to_user_ptr(READ_ONCE(sqe->addr3)); in io_setxattr_prep()
|
| /linux/include/net/ |
| A D | busy_poll.h | 37 return READ_ONCE(sysctl_net_busy_poll); in net_busy_loop_on() 42 return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current); in sk_can_busy_loop() 81 unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll); in busy_loop_timeout() 97 unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec); in sk_busy_loop_timeout() 112 unsigned int napi_id = READ_ONCE(sk->sk_napi_id); in sk_busy_loop() 116 READ_ONCE(sk->sk_prefer_busy_poll), in sk_busy_loop() 117 READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET); in sk_busy_loop() 138 if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id)) in sk_mark_napi_id() 160 if (!READ_ONCE(sk->sk_napi_id)) in __sk_mark_napi_id_once()
|
| /linux/include/linux/ |
| A D | srcutiny.h | 68 idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; in __srcu_read_lock() 69 WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1); in __srcu_read_lock() 90 idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1; in srcu_torture_stats_print() 93 data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])), in srcu_torture_stats_print() 94 data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])), in srcu_torture_stats_print() 95 data_race(READ_ONCE(ssp->srcu_idx)), in srcu_torture_stats_print() 96 data_race(READ_ONCE(ssp->srcu_idx_max))); in srcu_torture_stats_print()
|
| A D | objpool.h | 132 while (head != READ_ONCE(slot->last)) { in __objpool_try_get_slot() 147 if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) { in __objpool_try_get_slot() 148 head = READ_ONCE(slot->head); in __objpool_try_get_slot() 153 obj = READ_ONCE(slot->entries[head & slot->mask]); in __objpool_try_get_slot() 198 tail = READ_ONCE(slot->tail); in __objpool_try_add_slot() 201 head = READ_ONCE(slot->head); in __objpool_try_add_slot()
|
| /linux/mm/ |
| A D | page_counter.c | 30 protected = min(usage, READ_ONCE(c->min)); in propagate_protected_usage() 39 protected = min(usage, READ_ONCE(c->low)); in propagate_protected_usage() 101 if (new > READ_ONCE(c->local_watermark)) { in page_counter_charge() 103 if (new > READ_ONCE(c->watermark)) in page_counter_charge() 157 if (new > READ_ONCE(c->local_watermark)) { in page_counter_try_charge() 159 if (new > READ_ONCE(c->watermark)) in page_counter_try_charge() 444 counter->emin = READ_ONCE(counter->min); in page_counter_calculate_protection() 452 READ_ONCE(counter->min), in page_counter_calculate_protection() 453 READ_ONCE(parent->emin), in page_counter_calculate_protection() 458 READ_ONCE(counter->low), in page_counter_calculate_protection() [all …]
|
| /linux/Documentation/litmus-tests/locking/ |
| A D | DCL-broken.litmus | 21 r0 = READ_ONCE(*flag); 24 r1 = READ_ONCE(*flag); 31 r2 = READ_ONCE(*data); 40 r0 = READ_ONCE(*flag); 43 r1 = READ_ONCE(*flag); 50 r2 = READ_ONCE(*data);
|
| /linux/arch/arm64/include/asm/ |
| A D | preempt.h | 13 return READ_ONCE(current_thread_info()->preempt.count); in preempt_count() 47 u32 pc = READ_ONCE(current_thread_info()->preempt.count); in __preempt_count_add() 54 u32 pc = READ_ONCE(current_thread_info()->preempt.count); in __preempt_count_sub() 62 u64 pc = READ_ONCE(ti->preempt_count); in __preempt_count_dec_and_test() 74 return !pc || !READ_ONCE(ti->preempt_count); in __preempt_count_dec_and_test() 79 u64 pc = READ_ONCE(current_thread_info()->preempt_count); in should_resched()
|
| /linux/drivers/md/dm-vdo/ |
| A D | funnel-queue.c | 46 struct funnel_queue_entry *next = READ_ONCE(oldest->next); in get_oldest() 61 next = READ_ONCE(oldest->next); in get_oldest() 69 struct funnel_queue_entry *newest = READ_ONCE(queue->newest); in get_oldest() 86 next = READ_ONCE(oldest->next); in get_oldest() 116 queue->oldest = READ_ONCE(oldest->next); in vdo_funnel_queue_poll() 166 if (READ_ONCE(queue->newest) != &queue->stub) in vdo_is_funnel_queue_idle()
|
| /linux/arch/s390/lib/ |
| A D | spinlock.c | 131 old = READ_ONCE(lp->lock); in arch_spin_lock_queued() 168 while (READ_ONCE(node->prev) != NULL) { in arch_spin_lock_queued() 182 old = READ_ONCE(lp->lock); in arch_spin_lock_queued() 202 while ((next = READ_ONCE(node->next)) == NULL) in arch_spin_lock_queued() 218 owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL); in arch_spin_lock_classic() 258 owner = READ_ONCE(lp->lock); in arch_spin_trylock_retry() 272 while (READ_ONCE(rw->cnts) & 0x10000) in arch_read_lock_wait() 284 while (READ_ONCE(rw->cnts) & 0x10000) in arch_read_lock_wait() 301 old = READ_ONCE(rw->cnts); in arch_write_lock_wait() 317 cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK; in arch_spin_relax()
|
| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| A D | en_port.c | 165 packets += READ_ONCE(ring->packets); in mlx4_en_fold_software_stats() 166 bytes += READ_ONCE(ring->bytes); in mlx4_en_fold_software_stats() 176 packets += READ_ONCE(ring->packets); in mlx4_en_fold_software_stats() 177 bytes += READ_ONCE(ring->bytes); in mlx4_en_fold_software_stats() 254 sw_rx_dropped += READ_ONCE(ring->dropped); in mlx4_en_DUMP_ETH_STATS() 255 priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok); in mlx4_en_DUMP_ETH_STATS() 256 priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none); in mlx4_en_DUMP_ETH_STATS() 259 priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop); in mlx4_en_DUMP_ETH_STATS() 262 priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx); in mlx4_en_DUMP_ETH_STATS() 263 priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full); in mlx4_en_DUMP_ETH_STATS() [all …]
|
| /linux/drivers/cpufreq/ |
| A D | amd-pstate.c | 243 READ_ONCE(cpudata->cppc_req_cached)); in pstate_update_perf() 578 cap_perf = READ_ONCE(cpudata->highest_perf); in amd_pstate_update_freq() 579 min_perf = READ_ONCE(cpudata->lowest_perf); in amd_pstate_update_freq() 647 min_perf = READ_ONCE(cpudata->lowest_perf); in amd_pstate_adjust_perf() 675 max_freq = READ_ONCE(cpudata->max_freq); in amd_pstate_cpu_boost_update() 983 min_freq = READ_ONCE(cpudata->min_freq); in amd_pstate_cpu_init() 984 max_freq = READ_ONCE(cpudata->max_freq); in amd_pstate_cpu_init() 1079 max_freq = READ_ONCE(cpudata->max_freq); in show_amd_pstate_max_freq() 1109 perf = READ_ONCE(cpudata->highest_perf); in show_amd_pstate_highest_perf() 1449 min_freq = READ_ONCE(cpudata->min_freq); in amd_pstate_epp_cpu_init() [all …]
|
| /linux/drivers/powercap/ |
| A D | idle_inject.c | 133 duration_us = READ_ONCE(ii_dev->run_duration_us); in idle_inject_timer_fn() 134 duration_us += READ_ONCE(ii_dev->idle_duration_us); in idle_inject_timer_fn() 161 play_idle_precise(READ_ONCE(ii_dev->idle_duration_us) * NSEC_PER_USEC, in idle_inject_fn() 162 READ_ONCE(ii_dev->latency_us) * NSEC_PER_USEC); in idle_inject_fn() 194 *run_duration_us = READ_ONCE(ii_dev->run_duration_us); in idle_inject_get_duration() 195 *idle_duration_us = READ_ONCE(ii_dev->idle_duration_us); in idle_inject_get_duration() 223 unsigned int idle_duration_us = READ_ONCE(ii_dev->idle_duration_us); in idle_inject_start() 224 unsigned int run_duration_us = READ_ONCE(ii_dev->run_duration_us); in idle_inject_start()
|
| /linux/tools/memory-model/Documentation/ |
| A D | locking.txt | 38 r0 = READ_ONCE(y); 40 r1 = READ_ONCE(x); 61 r0 = READ_ONCE(y); 63 r1 = READ_ONCE(x); 111 READ_ONCE() of "flag" and the READ_ONCE() of "data". Second, there is 166 r0 = READ_ONCE(y); 175 r1 = READ_ONCE(x); 200 r0 = READ_ONCE(y); 209 r1 = READ_ONCE(x); 243 r0 = READ_ONCE(x); [all …]
|