| /include/net/ |
| A D | busy_poll.h | 42 return READ_ONCE(sysctl_net_busy_poll); in net_busy_loop_on() 47 return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current); in sk_can_busy_loop() 89 unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll); in busy_loop_timeout() 105 unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec); in sk_busy_loop_timeout() 120 unsigned int napi_id = READ_ONCE(sk->sk_napi_id); in sk_busy_loop() 124 READ_ONCE(sk->sk_prefer_busy_poll), in sk_busy_loop() 125 READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET); in sk_busy_loop() 152 if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id)) in sk_mark_napi_id() 174 if (!READ_ONCE(sk->sk_napi_id)) in __sk_mark_napi_id_once()
|
| A D | proto_memory.h | 21 return !!READ_ONCE(*prot->memory_pressure); in proto_memory_pressure() 38 return !!READ_ONCE(*sk->sk_prot->memory_pressure); in sk_under_memory_pressure() 68 if (unlikely(val >= READ_ONCE(net_hotdata.sysctl_mem_pcpu_rsv))) in sk_memory_allocated_add() 79 if (unlikely(val <= -READ_ONCE(net_hotdata.sysctl_mem_pcpu_rsv))) in sk_memory_allocated_sub()
|
| A D | inet_sock.h | 111 u32 mark = READ_ONCE(sk->sk_mark); in inet_request_mark() 122 int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); in inet_request_bound_dev_if() 126 if (!bound_dev_if && READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept)) in inet_request_bound_dev_if() 138 if (!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept)) in inet_sk_bound_l3mdev() 303 return READ_ONCE(inet->inet_flags) & IP_CMSG_ALL; in inet_cmsg_flags() 308 return inet_dsfield_to_dscp(READ_ONCE(inet->tos)); in inet_sk_dscp() 330 if (sk && READ_ONCE(sk->sk_state) == TCP_NEW_SYN_RECV) in sk_to_full_sk() 332 if (sk && READ_ONCE(sk->sk_state) == TCP_TIME_WAIT) in sk_to_full_sk() 342 if (sk && READ_ONCE(sk->sk_state) == TCP_NEW_SYN_RECV) in sk_const_to_full_sk() 344 if (sk && READ_ONCE(sk->sk_state) == TCP_TIME_WAIT) in sk_const_to_full_sk() [all …]
|
| A D | rps.h | 81 if (READ_ONCE(table->ents[index]) != val) in rps_record_sock_flow() 120 sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash)); in sock_rps_record_flow() 135 hash = READ_ONCE(sk->sk_rxhash); in sock_rps_delete_flow() 143 if (READ_ONCE(table->ents[index]) != RPS_NO_CPU) in sock_rps_delete_flow()
|
| A D | sock.h | 705 return READ_ONCE(sk->sk_peek_off); in sk_peek_offset() 713 s32 off = READ_ONCE(sk->sk_peek_off); in sk_peek_offset_bwd() 1062 return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog); in sk_acceptq_is_full() 1075 return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued); in sk_stream_wspace() 1375 if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf)) in __sk_stream_memory_free() 1877 .mark = READ_ONCE(sk->sk_mark), in sockcm_init() 1878 .tsflags = READ_ONCE(sk->sk_tsflags), in sockcm_init() 2098 return READ_ONCE(sk->sk_uid); in sk_uid() 2377 u32 txhash = READ_ONCE(sk->sk_txhash); in skb_set_hash_from_sk() 2666 return READ_ONCE(sk->sk_stamp); in sock_read_timestamp() [all …]
|
| A D | act_api.h | 79 if (READ_ONCE(tm->lastuse) != now) in tcf_lastuse_update() 81 if (unlikely(!READ_ONCE(tm->firstuse))) in tcf_lastuse_update() 89 dtm->install = jiffies_to_clock_t(now - READ_ONCE(stm->install)); in tcf_tm_dump() 90 dtm->lastuse = jiffies_to_clock_t(now - READ_ONCE(stm->lastuse)); in tcf_tm_dump() 92 firstuse = READ_ONCE(stm->firstuse); in tcf_tm_dump() 96 dtm->expires = jiffies_to_clock_t(READ_ONCE(stm->expires)); in tcf_tm_dump()
|
| A D | ip.h | 97 .tos = READ_ONCE(inet->tos), in ipcm_init_sk() 102 ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if); in ipcm_init_sk() 354 u32 range = READ_ONCE(net->ipv4.ip_local_ports.range); in inet_get_local_port_range() 376 return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock); in inet_port_requires_bind_service() 403 (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0) 439 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); in ip_dont_fragment() 448 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); in ip_sk_accept_pmtu() 461 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); in ip_sk_ignore_df() 476 if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) || in ip_dst_mtu_maybe_forward() 489 mtu = READ_ONCE(dst_dev(dst)->mtu); in ip_dst_mtu_maybe_forward() [all …]
|
| A D | inet_connection_sock.h | 190 return READ_ONCE(icsk->icsk_retransmit_timer.expires); in icsk_timeout() 196 return READ_ONCE(icsk->icsk_delack_timer.expires); in icsk_delack_timeout() 288 return inet_csk_reqsk_queue_len(sk) > READ_ONCE(sk->sk_max_ack_backlog); in inet_csk_reqsk_queue_is_full() 333 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pingpong_thresh); in inet_csk_enter_pingpong_mode() 344 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pingpong_thresh); in inet_csk_in_pingpong_mode()
|
| /include/linux/ |
| A D | srcutiny.h | 68 idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; in __srcu_read_lock() 69 WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1); in __srcu_read_lock() 115 idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1; in srcu_torture_stats_print() 118 data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])), in srcu_torture_stats_print() 119 data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])), in srcu_torture_stats_print() 120 data_race(READ_ONCE(ssp->srcu_idx)), in srcu_torture_stats_print() 121 data_race(READ_ONCE(ssp->srcu_idx_max))); in srcu_torture_stats_print()
|
| A D | objpool.h | 132 while (head != READ_ONCE(slot->last)) { in __objpool_try_get_slot() 147 if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) { in __objpool_try_get_slot() 148 head = READ_ONCE(slot->head); in __objpool_try_get_slot() 153 obj = READ_ONCE(slot->entries[head & slot->mask]); in __objpool_try_get_slot() 197 tail = READ_ONCE(slot->tail); in __objpool_try_add_slot() 200 head = READ_ONCE(slot->head); in __objpool_try_add_slot()
|
| A D | rcupdate_trace.h | 53 WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1); in rcu_read_lock_trace() 76 nesting = READ_ONCE(t->trc_reader_nesting) - 1; in rcu_read_unlock_trace() 80 if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) { in rcu_read_unlock_trace()
|
| A D | hung_task.h | 42 WARN_ON_ONCE(READ_ONCE(current->blocker)); in hung_task_set_blocker() 56 WARN_ON_ONCE(!READ_ONCE(current->blocker)); in hung_task_clear_blocker()
|
| A D | dynamic_queue_limits.h | 142 if (READ_ONCE(dql->stall_thrs)) in dql_queued() 149 return READ_ONCE(dql->adj_limit) - READ_ONCE(dql->num_queued); in dql_avail()
|
| A D | llist.h | 100 return READ_ONCE(node->next) != node; in llist_on_list() 218 return READ_ONCE(head->first) == NULL; in llist_empty() 223 return READ_ONCE(node->next); in llist_next() 238 struct llist_node *first = READ_ONCE(head->first); in llist_add_batch()
|
| A D | entry-common.h | 92 unsigned long work = READ_ONCE(current_thread_info()->syscall_work); in syscall_enter_from_user_mode_work() 155 unsigned long work = READ_ONCE(current_thread_info()->syscall_work); in syscall_exit_to_user_mode_work()
|
| A D | rcupdate.h | 81 #define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting) 166 if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \ 188 int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \ 190 if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \ 194 !READ_ONCE((t)->trc_reader_special.b.blocked)) { \ 525 typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \ 532 typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \ 546 typeof(p) local = READ_ONCE(p); \ 1143 rcu_callback_t func = READ_ONCE(rhp->func); in rcu_head_after_call_rcu()
|
| A D | livepatch_sched.h | 17 READ_ONCE(curr->__state) & TASK_FREEZABLE) in klp_sched_try_switch()
|
| A D | rcu_sync.h | 36 return !READ_ONCE(rsp->gp_state); /* GP_IDLE */ in rcu_sync_is_idle()
|
| A D | list_nulls.h | 84 return !READ_ONCE(h->pprev); in hlist_nulls_unhashed_lockless() 89 return is_a_nulls(READ_ONCE(h->first)); in hlist_nulls_empty()
|
| A D | task_work.h | 26 return READ_ONCE(task->task_works); in task_work_pending()
|
| /include/clocksource/ |
| A D | hyperv_timer.h | 67 sequence = READ_ONCE(tsc_pg->tsc_sequence); in hv_read_tsc_page_tsc() 76 scale = READ_ONCE(tsc_pg->tsc_scale); in hv_read_tsc_page_tsc() 77 offset = READ_ONCE(tsc_pg->tsc_offset); in hv_read_tsc_page_tsc() 86 } while (READ_ONCE(tsc_pg->tsc_sequence) != sequence); in hv_read_tsc_page_tsc()
|
| /include/drm/ |
| A D | spsc_queue.h | 98 node = READ_ONCE(queue->head); in spsc_queue_pop() 103 next = READ_ONCE(node->next); in spsc_queue_pop() 114 } while (unlikely(!(queue->head = READ_ONCE(node->next)))); in spsc_queue_pop()
|
| /include/asm-generic/bitops/ |
| A D | lock.h | 25 if (READ_ONCE(*p) & mask) in arch_test_and_set_bit_lock() 64 old = READ_ONCE(*p); in arch___clear_bit_unlock()
|
| /include/vdso/ |
| A D | helpers.h | 14 while (unlikely((seq = READ_ONCE(vc->seq)) & 1)) in vdso_read_begin() 27 seq = READ_ONCE(vc->seq); in vdso_read_retry()
|
| /include/asm-generic/ |
| A D | barrier.h | 150 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ 204 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ 250 VAL = READ_ONCE(*__PTR); \
|