/linux/fs/fscache/ |
A D | stats.c | 158 atomic_read(&fscache_n_marks), in fscache_stats_show() 159 atomic_read(&fscache_n_uncaches)); in fscache_stats_show() 163 atomic_read(&fscache_n_acquires), in fscache_stats_show() 182 atomic_read(&fscache_n_updates), in fscache_stats_show() 200 atomic_read(&fscache_n_allocs), in fscache_stats_show() 201 atomic_read(&fscache_n_allocs_ok), in fscache_stats_show() 206 atomic_read(&fscache_n_alloc_ops), in fscache_stats_show() 225 atomic_read(&fscache_n_stores), in fscache_stats_show() 245 atomic_read(&fscache_n_op_pend), in fscache_stats_show() 246 atomic_read(&fscache_n_op_run), in fscache_stats_show() [all …]
|
/linux/fs/netfs/ |
A D | stats.c | 35 atomic_read(&netfs_n_rh_readahead), in netfs_stats_show() 36 atomic_read(&netfs_n_rh_readpage), in netfs_stats_show() 37 atomic_read(&netfs_n_rh_write_begin), in netfs_stats_show() 39 atomic_read(&netfs_n_rh_rreq), in netfs_stats_show() 40 atomic_read(&netfs_n_rh_sreq)); in netfs_stats_show() 42 atomic_read(&netfs_n_rh_zero), in netfs_stats_show() 43 atomic_read(&netfs_n_rh_short_read), in netfs_stats_show() 46 atomic_read(&netfs_n_rh_download), in netfs_stats_show() 51 atomic_read(&netfs_n_rh_read), in netfs_stats_show() 52 atomic_read(&netfs_n_rh_read_done), in netfs_stats_show() [all …]
|
/linux/net/netfilter/ipvs/ |
A D | ip_vs_lblcr.c | 173 if ((atomic_read(&least->weight) > 0) in ip_vs_dest_set_min() 202 atomic_read(&least->activeconns), in ip_vs_dest_set_min() 204 atomic_read(&least->weight), loh); in ip_vs_dest_set_min() 222 if (atomic_read(&most->weight) > 0) { in ip_vs_dest_set_max() 247 atomic_read(&most->activeconns), in ip_vs_dest_set_max() 249 atomic_read(&most->weight), moh); in ip_vs_dest_set_max() 584 if (atomic_read(&dest->weight) > 0) { in __ip_vs_lblcr_schedule() 627 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) { in is_overloaded() 631 if (atomic_read(&d->activeconns)*2 in is_overloaded() 632 < atomic_read(&d->weight)) { in is_overloaded() [all …]
|
A D | ip_vs_nq.c | 45 return atomic_read(&dest->activeconns) + 1; in ip_vs_nq_dest_overhead() 77 !atomic_read(&dest->weight)) in ip_vs_nq_schedule() 83 if (atomic_read(&dest->activeconns) == 0) { in ip_vs_nq_schedule() 90 ((__s64)loh * atomic_read(&dest->weight) > in ip_vs_nq_schedule() 91 (__s64)doh * atomic_read(&least->weight))) { in ip_vs_nq_schedule() 107 atomic_read(&least->activeconns), in ip_vs_nq_schedule() 109 atomic_read(&least->weight), loh); in ip_vs_nq_schedule()
|
A D | ip_vs_sed.c | 49 return atomic_read(&dest->activeconns) + 1; in ip_vs_sed_dest_overhead() 80 atomic_read(&dest->weight) > 0) { in ip_vs_sed_schedule() 97 if ((__s64)loh * atomic_read(&dest->weight) > in ip_vs_sed_schedule() 98 (__s64)doh * atomic_read(&least->weight)) { in ip_vs_sed_schedule() 108 atomic_read(&least->activeconns), in ip_vs_sed_schedule() 110 atomic_read(&least->weight), loh); in ip_vs_sed_schedule()
|
A D | ip_vs_wlc.c | 52 atomic_read(&dest->weight) > 0) { in ip_vs_wlc_schedule() 69 if ((__s64)loh * atomic_read(&dest->weight) > in ip_vs_wlc_schedule() 70 (__s64)doh * atomic_read(&least->weight)) { in ip_vs_wlc_schedule() 80 atomic_read(&least->activeconns), in ip_vs_wlc_schedule() 82 atomic_read(&least->weight), loh); in ip_vs_wlc_schedule()
|
A D | ip_vs_lblc.c | 311 if (atomic_read(&tbl->entries) <= tbl->max_size) { in ip_vs_lblc_check_expire() 316 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; in ip_vs_lblc_check_expire() 420 if (atomic_read(&dest->weight) > 0) { in __ip_vs_lblc_schedule() 437 if ((__s64)loh * atomic_read(&dest->weight) > in __ip_vs_lblc_schedule() 438 (__s64)doh * atomic_read(&least->weight)) { in __ip_vs_lblc_schedule() 448 atomic_read(&least->activeconns), in __ip_vs_lblc_schedule() 450 atomic_read(&least->weight), loh); in __ip_vs_lblc_schedule() 463 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) { in is_overloaded() 467 if (atomic_read(&d->activeconns)*2 in is_overloaded() 468 < atomic_read(&d->weight)) { in is_overloaded() [all …]
|
A D | ip_vs_fo.c | 34 atomic_read(&dest->weight) > hw) { in ip_vs_fo_schedule() 36 hw = atomic_read(&dest->weight); in ip_vs_fo_schedule() 44 atomic_read(&hweight->activeconns), in ip_vs_fo_schedule() 45 atomic_read(&hweight->weight)); in ip_vs_fo_schedule()
|
A D | ip_vs_ovf.c | 36 w = atomic_read(&dest->weight); in ip_vs_ovf_schedule() 38 atomic_read(&dest->activeconns) > w || in ip_vs_ovf_schedule() 51 atomic_read(&h->activeconns), in ip_vs_ovf_schedule() 52 atomic_read(&h->weight)); in ip_vs_ovf_schedule()
|
/linux/drivers/crypto/bcm/ |
A D | util.c | 376 atomic_read(&ipriv->session_count)); in spu_debugfs_read() 379 atomic_read(&ipriv->stream_count)); in spu_debugfs_read() 388 op_cnt = atomic_read(&ipriv->cipher_cnt[alg][mode]); in spu_debugfs_read() 399 atomic_read(&ipriv->op_counts[SPU_OP_HASH])); in spu_debugfs_read() 401 op_cnt = atomic_read(&ipriv->hash_cnt[alg]); in spu_debugfs_read() 414 atomic_read(&ipriv->op_counts[SPU_OP_HMAC])); in spu_debugfs_read() 416 op_cnt = atomic_read(&ipriv->hmac_cnt[alg]); in spu_debugfs_read() 432 op_cnt = atomic_read(&ipriv->aead_cnt[alg]); in spu_debugfs_read() 448 atomic_read(&ipriv->mb_no_spc)); in spu_debugfs_read() 451 atomic_read(&ipriv->mb_send_fail)); in spu_debugfs_read() [all …]
|
/linux/drivers/infiniband/hw/vmw_pvrdma/ |
A D | pvrdma_ring.h | 71 const unsigned int idx = atomic_read(var); in pvrdma_idx() 80 __u32 idx = atomic_read(var) + 1; /* Increment. */ in pvrdma_idx_ring_inc() 89 const __u32 tail = atomic_read(&r->prod_tail); in pvrdma_idx_ring_has_space() 90 const __u32 head = atomic_read(&r->cons_head); in pvrdma_idx_ring_has_space() 103 const __u32 tail = atomic_read(&r->prod_tail); in pvrdma_idx_ring_has_data() 104 const __u32 head = atomic_read(&r->cons_head); in pvrdma_idx_ring_has_data()
|
/linux/kernel/sched/ |
A D | membarrier.c | 206 atomic_read(&mm->membarrier_state)); in ipi_sync_rq_state() 238 membarrier_state = atomic_read(&next_mm->membarrier_state); in membarrier_update_current_mm() 318 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited() 325 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited() 331 if (!(atomic_read(&mm->membarrier_state) & in membarrier_private_expedited() 337 (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1)) in membarrier_private_expedited() 427 int membarrier_state = atomic_read(&mm->membarrier_state); in sync_runqueues_membarrier_state() 431 if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1) { in sync_runqueues_membarrier_state() 488 if (atomic_read(&mm->membarrier_state) & in membarrier_register_global_expedited() 528 if ((atomic_read(&mm->membarrier_state) & ready_state) == ready_state) in membarrier_register_private_expedited()
|
/linux/drivers/s390/scsi/ |
A D | zfcp_erp.c | 139 if (atomic_read(&port->status) & in zfcp_erp_handle_failed() 148 if (atomic_read(&adapter->status) & in zfcp_erp_handle_failed() 176 p_status = atomic_read(&port->status); in zfcp_erp_required_act() 184 p_status = atomic_read(&port->status); in zfcp_erp_required_act() 189 p_status = atomic_read(&port->status); in zfcp_erp_required_act() 192 a_status = atomic_read(&adapter->status); in zfcp_erp_required_act() 202 a_status = atomic_read(&adapter->status); in zfcp_erp_required_act() 239 if (!(atomic_read(&zfcp_sdev->status) & in zfcp_erp_setup_act() 264 if (!(atomic_read(&adapter->status) & in zfcp_erp_setup_act() 767 if (!(atomic_read(&adapter->status) & in zfcp_erp_adapter_strat_fsf_xconf() [all …]
|
/linux/fs/afs/ |
A D | cell.c | 298 trace_afs_cell(cell->debug_id, atomic_read(&cell->ref), atomic_read(&cell->active), in afs_lookup_cell() 497 u = atomic_read(&cell->ref); in afs_cell_destroy() 544 if (atomic_read(&cell->ref) <= 0) in afs_get_cell() 561 a = atomic_read(&cell->active); in afs_put_cell() 565 a = atomic_read(&cell->active); in afs_put_cell() 579 if (atomic_read(&cell->ref) <= 0) in afs_use_cell() 582 u = atomic_read(&cell->ref); in afs_use_cell() 610 u = atomic_read(&cell->ref); in afs_unuse_cell() 626 u = atomic_read(&cell->ref); in afs_see_cell() 627 a = atomic_read(&cell->active); in afs_see_cell() [all …]
|
A D | proc.c | 50 atomic_read(&cell->ref), in afs_proc_cells_show() 51 atomic_read(&cell->active), in afs_proc_cells_show() 391 atomic_read(&server->ref), in afs_proc_servers_show() 588 atomic_read(&net->n_lookup), in afs_proc_stats_show() 589 atomic_read(&net->n_reval), in afs_proc_stats_show() 590 atomic_read(&net->n_inval), in afs_proc_stats_show() 591 atomic_read(&net->n_relpg)); in afs_proc_stats_show() 597 atomic_read(&net->n_dir_cr), in afs_proc_stats_show() 598 atomic_read(&net->n_dir_rm)); in afs_proc_stats_show() 601 atomic_read(&net->n_fetches), in afs_proc_stats_show() [all …]
|
/linux/sound/core/seq/ |
A D | seq_lock.c | 16 if (atomic_read(lockp) < 0) { in snd_use_lock_sync_helper() 17 pr_warn("ALSA: seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line); in snd_use_lock_sync_helper() 20 while (atomic_read(lockp) > 0) { in snd_use_lock_sync_helper() 22 pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line); in snd_use_lock_sync_helper()
|
/linux/kernel/ |
A D | cred.c | 76 return atomic_read(&cred->subscribers); in read_cred_subscribers() 102 atomic_read(&cred->usage) != 0 || in put_cred_rcu() 107 atomic_read(&cred->usage), in put_cred_rcu() 110 if (atomic_read(&cred->usage) != 0) in put_cred_rcu() 138 atomic_read(&cred->usage), in __put_cred() 453 atomic_read(&new->usage), in commit_creds() 536 atomic_read(&new->usage), in abort_creds() 559 atomic_read(&new->usage), in override_creds() 582 atomic_read(&old->usage), in override_creds() 600 atomic_read(&old->usage), in revert_creds() [all …]
|
/linux/fs/f2fs/ |
A D | debug.c | 81 si->ext_tree = atomic_read(&sbi->total_ext_tree); in update_general_status() 83 si->ext_node = atomic_read(&sbi->total_ext_node); in update_general_status() 96 si->vw_cnt = atomic_read(&sbi->vw_cnt); in update_general_status() 97 si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt); in update_general_status() 98 si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt); in update_general_status() 108 atomic_read(&SM_I(sbi)->fcc_info->issued_flush); in update_general_status() 110 atomic_read(&SM_I(sbi)->fcc_info->queued_flush); in update_general_status() 137 si->inline_xattr = atomic_read(&sbi->inline_xattr); in update_general_status() 138 si->inline_inode = atomic_read(&sbi->inline_inode); in update_general_status() 139 si->inline_dir = atomic_read(&sbi->inline_dir); in update_general_status() [all …]
|
/linux/arch/openrisc/kernel/ |
A D | sync-timer.c | 53 while (atomic_read(&count_count_start) != 1) in synchronise_count_master() 74 while (atomic_read(&count_count_stop) != 1) in synchronise_count_master() 104 while (atomic_read(&count_count_start) != 2) in synchronise_count_slave() 114 while (atomic_read(&count_count_stop) != 2) in synchronise_count_slave()
|
/linux/net/rxrpc/ |
A D | conn_object.c | 107 if (!conn || atomic_read(&conn->usage) == 0) in rxrpc_find_connection_rcu() 117 if (!conn || atomic_read(&conn->usage) == 0) { in rxrpc_find_connection_rcu() 283 int n = atomic_read(&conn->usage); in rxrpc_see_connection() 354 _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage)); in rxrpc_destroy_connection() 356 ASSERTCMP(atomic_read(&conn->usage), ==, 0); in rxrpc_destroy_connection() 395 ASSERTCMP(atomic_read(&conn->usage), >, 0); in rxrpc_service_connection_reaper() 396 if (likely(atomic_read(&conn->usage) > 1)) in rxrpc_service_connection_reaper() 408 conn->debug_id, atomic_read(&conn->usage), in rxrpc_service_connection_reaper() 445 ASSERTCMP(atomic_read(&conn->usage), ==, 0); in rxrpc_service_connection_reaper() 473 conn, atomic_read(&conn->usage)); in rxrpc_destroy_all_connections() [all …]
|
/linux/arch/mips/kernel/ |
A D | sync-r4k.c | 50 while (atomic_read(&count_count_start) != 1) in synchronise_count_master() 71 while (atomic_read(&count_count_stop) != 1) in synchronise_count_master() 104 while (atomic_read(&count_count_start) != 2) in synchronise_count_slave() 114 while (atomic_read(&count_count_stop) != 2) in synchronise_count_slave()
|
/linux/fs/xfs/ |
A D | xfs_trans_buf.c | 149 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_get_buf_map() 185 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_getsb() 282 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_read_buf_map() 360 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_brelse() 412 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_bhold() 433 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_bhold_release() 461 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_dirty_buf() 548 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_binval() 602 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_inode_buf() 627 ASSERT(atomic_read(&bip->bli_refcount) > 0); in xfs_trans_stale_inode_buf() [all …]
|
/linux/net/batman-adv/ |
A D | gateway_common.c | 140 gw_mode = atomic_read(&bat_priv->gw.mode); in batadv_gw_tvlv_container_update() 148 down = atomic_read(&bat_priv->gw.bandwidth_down); in batadv_gw_tvlv_container_update() 149 up = atomic_read(&bat_priv->gw.bandwidth_up); in batadv_gw_tvlv_container_update() 177 down_curr = (unsigned int)atomic_read(&bat_priv->gw.bandwidth_down); in batadv_gw_bandwidth_set() 178 up_curr = (unsigned int)atomic_read(&bat_priv->gw.bandwidth_up); in batadv_gw_bandwidth_set() 246 atomic_read(&bat_priv->gw.mode) == BATADV_GW_MODE_CLIENT) in batadv_gw_tvlv_ogm_handler_v1()
|
/linux/arch/x86/platform/uv/ |
A D | uv_nmi.c | 524 nmi = atomic_read(&hub_nmi->in_nmi); in uv_check_nmi() 552 nmi = atomic_read(&hub_nmi->in_nmi); in uv_check_nmi() 562 nmi = atomic_read(&uv_in_nmi); in uv_check_nmi() 770 while (atomic_read(&uv_nmi_cpus_in_nmi) > 0) in uv_nmi_sync_exit() 774 while (atomic_read(&uv_nmi_slave_continue)) in uv_nmi_sync_exit() 783 int in = atomic_read(&uv_nmi_cpus_in_nmi); in uv_nmi_action_health() 789 while (!atomic_read(&uv_nmi_slave_continue)) in uv_nmi_action_health() 805 atomic_read(&uv_nmi_cpus_in_nmi), cpu); in uv_nmi_dump_state() 823 while (!atomic_read(&uv_nmi_slave_continue)) in uv_nmi_dump_state() 917 sig = atomic_read(&uv_nmi_slave_continue); in uv_call_kgdb_kdb() [all …]
|
/linux/include/asm-generic/ |
A D | qspinlock.h | 28 return atomic_read(&lock->val); in queued_spin_is_locked() 44 return !atomic_read(&lock.val); in queued_spin_value_unlocked() 54 return atomic_read(&lock->val) & ~_Q_LOCKED_MASK; in queued_spin_is_contended() 63 int val = atomic_read(&lock->val); in queued_spin_trylock()
|