| /net/mptcp/ |
| A D | token.c | 111 struct token_bucket *bucket; in mptcp_token_new_request() local 122 bucket = token_bucket(token); in mptcp_token_new_request() 123 spin_lock_bh(&bucket->lock); in mptcp_token_new_request() 130 bucket->chain_len++; in mptcp_token_new_request() 157 struct token_bucket *bucket; in mptcp_token_new_connect() local 164 spin_lock_bh(&bucket->lock); in mptcp_token_new_connect() 177 bucket->chain_len++; in mptcp_token_new_connect() 196 struct token_bucket *bucket; in mptcp_token_accept() local 200 spin_lock_bh(&bucket->lock); in mptcp_token_accept() 364 bucket->chain_len--; in mptcp_token_destroy_request() [all …]
|
| /net/ceph/crush/ |
| A D | mapper.c | 88 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) % in bucket_perm_choose() 89 bucket->size; in bucket_perm_choose() 113 i = crush_hash32_3(bucket->hash, x, bucket->id, p) % in bucket_perm_choose() 114 (bucket->size - p); in bucket_perm_choose() 131 return bucket->items[s]; in bucket_perm_choose() 148 __u64 w = crush_hash32_4(bucket->h.hash, x, bucket->h.items[i], in bucket_list_choose() 149 r, bucket->h.id); in bucket_list_choose() 153 i, x, r, bucket->h.items[i], bucket->item_weights[i], in bucket_list_choose() 164 return bucket->h.items[0]; in bucket_list_choose() 237 draw = crush_hash32_3(bucket->h.hash, x, bucket->h.items[i], r); in bucket_straw_choose() [all …]
|
| /net/sched/ |
| A D | sch_hhf.c | 333 bucket->head = skb->next; in dequeue_head() 341 if (bucket->head == NULL) in bucket_add() 342 bucket->head = skb; in bucket_add() 344 bucket->tail->next = skb; in bucket_add() 345 bucket->tail = skb; in bucket_add() 352 struct wdrr_bucket *bucket; in hhf_drop() local 356 if (!bucket->head) in hhf_drop() 359 if (bucket->head) { in hhf_drop() 381 bucket = &q->buckets[idx]; in hhf_enqueue() 382 bucket_add(bucket, skb); in hhf_enqueue() [all …]
|
| /net/vmw_vsock/ |
| A D | diag.c | 52 unsigned int bucket; in vsock_diag_dump() local 63 bucket = cb->args[1]; in vsock_diag_dump() 72 while (bucket < ARRAY_SIZE(vsock_bind_table)) { in vsock_diag_dump() 73 struct list_head *head = &vsock_bind_table[bucket]; in vsock_diag_dump() 94 bucket++; in vsock_diag_dump() 98 bucket = 0; in vsock_diag_dump() 102 while (bucket < ARRAY_SIZE(vsock_connected_table)) { in vsock_diag_dump() 103 struct list_head *head = &vsock_connected_table[bucket]; in vsock_diag_dump() 128 bucket++; in vsock_diag_dump() 135 cb->args[1] = bucket; in vsock_diag_dump()
|
| /net/rxrpc/ |
| A D | proc.c | 316 unsigned int bucket, n; in rxrpc_peer_seq_start() local 326 bucket = *_pos >> shift; in rxrpc_peer_seq_start() 328 if (bucket >= HASH_SIZE(rxnet->peer_hash)) { in rxrpc_peer_seq_start() 333 if (bucket == 0) in rxrpc_peer_seq_start() 342 bucket++; in rxrpc_peer_seq_start() 344 *_pos = (bucket << shift) | n; in rxrpc_peer_seq_start() 351 unsigned int bucket, n; in rxrpc_peer_seq_next() local 358 bucket = *_pos >> shift; in rxrpc_peer_seq_next() 365 bucket++; in rxrpc_peer_seq_next() 367 *_pos = (bucket << shift) | n; in rxrpc_peer_seq_next() [all …]
|
| /net/ipv4/ |
| A D | nexthop.c | 1127 bucket->migrated_time = now; in nh_res_bucket_set_idle() 1468 struct nh_res_bucket *bucket; in nexthop_select_path_res() local 1475 nh_res_bucket_set_busy(bucket); in nexthop_select_path_res() 1699 if (bucket->occupied) { in nh_res_bucket_unset_nh() 1702 bucket->occupied = false; in nh_res_bucket_unset_nh() 1709 nh_res_bucket_unset_nh(bucket); in nh_res_bucket_set_nh() 1711 bucket->occupied = true; in nh_res_bucket_set_nh() 1724 if (!bucket->occupied) { in nh_res_bucket_should_migrate() 1866 bucket, in nh_res_table_upkeep() 3683 struct nh_res_bucket *bucket; in rtm_dump_nexthop_bucket_nh() local [all …]
|
| A D | tcp_ipv4.c | 2623 for (; st->bucket <= hinfo->lhash2_mask; st->bucket++) { 2668 ++st->bucket; 2677 st->bucket = 0; 2704 for (; st->bucket <= hinfo->ehash_mask; ++st->bucket) { 2744 ++st->bucket; 2753 st->bucket = 0; 2783 int bucket = st->bucket; local 2793 while (offset-- && rc && bucket == st->bucket) 2804 while (offset-- && rc && bucket == st->bucket) 3117 ++st->bucket; [all …]
|
| A D | udp.c | 3285 for (state->bucket = start; state->bucket <= udptable->mask; in udp_get_first() 3286 ++state->bucket) { in udp_get_first() 3338 state->bucket = MAX_UDP_PORTS; in udp_seq_start() 3365 if (state->bucket <= udptable->mask) in udp_seq_stop() 3372 int bucket) in udp4_format_sock() argument 3413 int bucket __aligned(8); 3461 resume_bucket = state->bucket; in bpf_iter_udp_batch() 3465 state->bucket++; in bpf_iter_udp_batch() 3483 for (; state->bucket <= udptable->mask; state->bucket++) { in bpf_iter_udp_batch() 3606 ctx.bucket = bucket; in udp_prog_seq_show() [all …]
|
| A D | ping.c | 1021 for (state->bucket = start; state->bucket < PING_HTABLE_SIZE; in ping_get_first() 1022 ++state->bucket) { in ping_get_first() 1025 hslot = &ping_table.hash[state->bucket]; in ping_get_first() 1051 return ping_get_first(seq, state->bucket + 1); in ping_get_next() 1069 state->bucket = 0; in ping_seq_start() 1105 int bucket) in ping_v4_format_sock() argument 1115 bucket, src, srcp, dest, destp, sp->sk_state, in ping_v4_format_sock() 1135 ping_v4_format_sock(v, seq, state->bucket); in ping_v4_seq_show()
|
| A D | raw.c | 958 static struct sock *raw_get_first(struct seq_file *seq, int bucket) in raw_get_first() argument 965 for (state->bucket = bucket; state->bucket < RAW_HTABLE_SIZE; in raw_get_first() 966 ++state->bucket) { in raw_get_first() 967 hlist = &h->ht[state->bucket]; in raw_get_first() 985 return raw_get_first(seq, state->bucket + 1); in raw_get_next() 1058 raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket); in raw_seq_show()
|
| /net/atm/ |
| A D | proc.c | 69 int bucket; member 78 static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l) in __vcc_walk() argument 83 for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) { in __vcc_walk() 84 struct hlist_head *head = &vcc_hash[*bucket]; in __vcc_walk() 98 if (!sk && ++*bucket < VCC_HTABLE_SIZE) { in __vcc_walk() 99 sk = sk_head(&vcc_hash[*bucket]); in __vcc_walk() 113 return __vcc_walk(&state->sk, family, &state->bucket, l) ? in vcc_walk()
|
| /net/core/ |
| A D | sock_map.c | 897 struct bpf_shtab_bucket *bucket; in __sock_hash_lookup_elem() local 921 struct bpf_shtab_bucket *bucket; in sock_hash_delete_from_link() local 930 spin_lock_bh(&bucket->lock); in sock_hash_delete_from_link() 938 spin_unlock_bh(&bucket->lock); in sock_hash_delete_from_link() 952 spin_lock_bh(&bucket->lock); in sock_hash_delete_elem() 960 spin_unlock_bh(&bucket->lock); in sock_hash_delete_elem() 1020 spin_lock_bh(&bucket->lock); in sock_hash_update_common() 1046 spin_unlock_bh(&bucket->lock); in sock_hash_update_common() 1049 spin_unlock_bh(&bucket->lock); in sock_hash_update_common() 1167 spin_lock_bh(&bucket->lock); in sock_hash_free() [all …]
|
| /net/ipv6/ |
| A D | route.c | 1522 if (!bucket) in rt6_exception_remove_oldest() 1665 if (bucket) { in fib6_nh_get_excptn_bucket() 1792 if (!bucket) in fib6_nh_flush_exceptions() 1806 bucket++; in fib6_nh_flush_exceptions() 2068 if (!bucket) in rt6_exceptions_update_pmtu() 2083 bucket++; in rt6_exceptions_update_pmtu() 2102 if (bucket) { in fib6_nh_exceptions_clean_tohost() 2115 bucket++; in fib6_nh_exceptions_clean_tohost() 2179 if (bucket) { in fib6_nh_age_exceptions() 2186 bucket++; in fib6_nh_age_exceptions() [all …]
|
| /net/netfilter/ |
| A D | xt_hashlimit.c | 1058 unsigned int *bucket; in dl_seq_start() local 1065 if (!bucket) in dl_seq_start() 1068 *bucket = *pos; in dl_seq_start() 1069 return bucket; in dl_seq_start() 1075 unsigned int *bucket = v; in dl_seq_next() local 1077 *pos = ++(*bucket); in dl_seq_next() 1082 return bucket; in dl_seq_next() 1089 unsigned int *bucket = v; in dl_seq_stop() local 1091 if (!IS_ERR(bucket)) in dl_seq_stop() 1092 kfree(bucket); in dl_seq_stop() [all …]
|
| A D | xt_recent.c | 476 unsigned int bucket; member 489 for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++) in recent_seq_start() 490 list_for_each_entry(e, &t->iphash[st->bucket], list) in recent_seq_start() 504 while (head == &t->iphash[st->bucket]) { in recent_seq_next() 505 if (++st->bucket >= ip_list_hash_size) in recent_seq_next() 507 head = t->iphash[st->bucket].next; in recent_seq_next()
|
| A D | nf_conntrack_expect.c | 567 unsigned int bucket; member 575 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { in ct_expect_get_first() 576 n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket])); in ct_expect_get_first() 590 if (++st->bucket >= nf_ct_expect_hsize) in ct_expect_get_next() 592 head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket])); in ct_expect_get_next()
|
| A D | nf_conntrack_core.c | 741 unsigned int bucket, hsize; in ____nf_conntrack_find() local 745 bucket = reciprocal_scale(hash, hsize); in ____nf_conntrack_find() 764 if (get_nulls_value(n) != bucket) { in ____nf_conntrack_find() 1468 unsigned int i, bucket; in early_drop() local 1477 bucket = reciprocal_scale(hash, hsize); in early_drop() 1479 bucket = (bucket + 1) % hsize; in early_drop() 1481 drops = early_drop_list(net, &ct_hash[bucket]); in early_drop() 2317 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { in get_next_corpse() 2365 unsigned int bucket = 0; in nf_ct_iterate_cleanup() local 2547 int i, bucket; in nf_conntrack_hash_resize() local [all …]
|
| /net/netfilter/ipvs/ |
| A D | ip_vs_lblc.c | 103 struct hlist_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ member 171 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblc_hash() 184 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblc_get() 238 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { in ip_vs_lblc_flush() 267 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblc_full_check() 324 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblc_check_expire() 363 INIT_HLIST_HEAD(&tbl->bucket[i]); in ip_vs_lblc_init_svc()
|
| A D | ip_vs_lblcr.c | 273 struct hlist_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ member 334 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblcr_hash() 347 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblcr_get() 404 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { in ip_vs_lblcr_flush() 432 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblcr_full_check() 488 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { in ip_vs_lblcr_check_expire() 526 INIT_HLIST_HEAD(&tbl->bucket[i]); in ip_vs_lblcr_init_svc()
|
| /net/batman-adv/ |
| A D | bridge_loop_avoidance.c | 2238 int bucket = cb->args[0]; in batadv_bla_claim_dump() local 2255 while (bucket < hash->size) { in batadv_bla_claim_dump() 2257 hash, bucket, &idx)) in batadv_bla_claim_dump() 2259 bucket++; in batadv_bla_claim_dump() 2262 cb->args[0] = bucket; in batadv_bla_claim_dump() 2358 unsigned int bucket, int *idx_skip) in batadv_bla_backbone_dump_bucket() argument 2399 int bucket = cb->args[0]; in batadv_bla_backbone_dump() local 2416 while (bucket < hash->size) { in batadv_bla_backbone_dump() 2418 hash, bucket, &idx)) in batadv_bla_backbone_dump() 2420 bucket++; in batadv_bla_backbone_dump() [all …]
|
| A D | distributed-arp-table.c | 900 struct batadv_hashtable *hash, unsigned int bucket, in batadv_dat_cache_dump_bucket() argument 906 spin_lock_bh(&hash->list_locks[bucket]); in batadv_dat_cache_dump_bucket() 909 hlist_for_each_entry(dat_entry, &hash->table[bucket], hash_entry) { in batadv_dat_cache_dump_bucket() 914 spin_unlock_bh(&hash->list_locks[bucket]); in batadv_dat_cache_dump_bucket() 923 spin_unlock_bh(&hash->list_locks[bucket]); in batadv_dat_cache_dump_bucket() 942 int bucket = cb->args[0]; in batadv_dat_cache_dump() local 959 while (bucket < hash->size) { in batadv_dat_cache_dump() 960 if (batadv_dat_cache_dump_bucket(msg, portid, cb, hash, bucket, in batadv_dat_cache_dump() 964 bucket++; in batadv_dat_cache_dump() 968 cb->args[0] = bucket; in batadv_dat_cache_dump()
|
| A D | multicast.c | 2025 unsigned int bucket, long *idx_skip) in batadv_mcast_flags_dump_bucket() argument 2030 spin_lock_bh(&hash->list_locks[bucket]); in batadv_mcast_flags_dump_bucket() 2033 hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) { in batadv_mcast_flags_dump_bucket() 2042 spin_unlock_bh(&hash->list_locks[bucket]); in batadv_mcast_flags_dump_bucket() 2051 spin_unlock_bh(&hash->list_locks[bucket]); in batadv_mcast_flags_dump_bucket() 2070 struct batadv_priv *bat_priv, long *bucket, long *idx) in __batadv_mcast_flags_dump() argument 2073 long bucket_tmp = *bucket; in __batadv_mcast_flags_dump() 2085 *bucket = bucket_tmp; in __batadv_mcast_flags_dump() 2143 long *bucket = &cb->args[0]; in batadv_mcast_flags_dump() local 2152 ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx); in batadv_mcast_flags_dump()
|
| /net/openvswitch/ |
| A D | vport.c | 97 struct hlist_head *bucket = hash_bucket(net, name); in ovs_vport_locate() local 100 hlist_for_each_entry_rcu(vport, bucket, hash_node, in ovs_vport_locate() 212 struct hlist_head *bucket; in ovs_vport_add() local 223 bucket = hash_bucket(ovs_dp_get_net(vport->dp), in ovs_vport_add() 225 hlist_add_head_rcu(&vport->hash_node, bucket); in ovs_vport_add()
|
| A D | meter.c | 392 band->bucket = band->burst_size * 1000ULL; in dp_meter_create() 393 band_max_delta_t = div_u64(band->bucket, band->rate); in dp_meter_create() 653 band->bucket += delta_ms * band->rate; in ovs_meter_execute() 654 if (band->bucket > max_bucket_size) in ovs_meter_execute() 655 band->bucket = max_bucket_size; in ovs_meter_execute() 657 if (band->bucket >= cost) { in ovs_meter_execute() 658 band->bucket -= cost; in ovs_meter_execute()
|
| /net/llc/ |
| A D | llc_proc.c | 67 static struct sock *laddr_hash_next(struct llc_sap *sap, int bucket) in laddr_hash_next() argument 72 while (++bucket < LLC_SK_LADDR_HASH_ENTRIES) in laddr_hash_next() 73 sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket]) in laddr_hash_next()
|