| /net/netfilter/ipvs/ |
| A D | ip_vs_lblc.c | 278 tbl->rover = j; in ip_vs_lblc_full_check() 295 struct ip_vs_lblc_table *tbl = timer_container_of(tbl, t, in ip_vs_lblc_check_expire() local 311 if (atomic_read(&tbl->entries) <= tbl->max_size) { in ip_vs_lblc_check_expire() 312 tbl->counter++; in ip_vs_lblc_check_expire() 316 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; in ip_vs_lblc_check_expire() 336 tbl->rover = j; in ip_vs_lblc_check_expire() 351 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); in ip_vs_lblc_init_svc() 352 if (tbl == NULL) in ip_vs_lblc_init_svc() 366 tbl->rover = 0; in ip_vs_lblc_init_svc() 369 tbl->svc = svc; in ip_vs_lblc_init_svc() [all …]
|
| A D | ip_vs_lblcr.c | 442 tbl->rover = j; in ip_vs_lblcr_full_check() 459 struct ip_vs_lblcr_table *tbl = timer_container_of(tbl, t, in ip_vs_lblcr_check_expire() local 475 if (atomic_read(&tbl->entries) <= tbl->max_size) { in ip_vs_lblcr_check_expire() 476 tbl->counter++; in ip_vs_lblcr_check_expire() 480 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; in ip_vs_lblcr_check_expire() 500 tbl->rover = j; in ip_vs_lblcr_check_expire() 514 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); in ip_vs_lblcr_init_svc() 515 if (tbl == NULL) in ip_vs_lblcr_init_svc() 529 tbl->rover = 0; in ip_vs_lblcr_init_svc() 532 tbl->svc = svc; in ip_vs_lblcr_init_svc() [all …]
|
| A D | ip_vs_ctl.c | 4268 struct ctl_table *tbl; in ip_vs_control_net_init_sysctl() local 4284 if (tbl == NULL) in ip_vs_control_net_init_sysctl() 4287 tbl = vs_vars; in ip_vs_control_net_init_sysctl() 4314 tbl[idx].mode = 0444; in ip_vs_control_net_init_sysctl() 4319 tbl[idx].mode = 0444; in ip_vs_control_net_init_sysctl() 4330 tbl[idx].extra2 = ipvs; in ip_vs_control_net_init_sysctl() 4347 tbl[idx].mode = 0444; in ip_vs_control_net_init_sysctl() 4348 tbl[idx].extra2 = ipvs; in ip_vs_control_net_init_sysctl() 4353 tbl[idx].mode = 0444; in ip_vs_control_net_init_sysctl() 4359 tbl[idx].mode = 0444; in ip_vs_control_net_init_sysctl() [all …]
|
| /net/core/ |
| A D | neighbour.c | 538 n->tbl = tbl; in neigh_alloc() 811 if (tbl->pconstructor && tbl->pconstructor(n)) { in pneigh_create() 1750 p->tbl = tbl; in neigh_parms_alloc() 1810 list_add(&tbl->parms.list, &tbl->parms_list); in neigh_table_init() 1832 if (!tbl->nht || !tbl->phash_buckets) in neigh_table_init() 2398 if (!tbl) in neightbl_set() 3319 struct neigh_table *tbl = state->tbl; in pneigh_get_first() local 3343 struct neigh_table *tbl = state->tbl; in pneigh_get_next() local 3401 state->tbl = tbl; in neigh_seq_start() 3445 struct neigh_table *tbl = state->tbl; in neigh_seq_stop() local [all …]
|
| A D | sysctl_net_core.c | 292 struct ctl_table tbl = { in set_default_qdisc() local 732 struct ctl_table *tbl; in sysctl_core_net_init() local 734 tbl = netns_core_table; in sysctl_core_net_init() 737 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); in sysctl_core_net_init() 738 if (tbl == NULL) in sysctl_core_net_init() 748 tbl[i].mode &= ~0222; in sysctl_core_net_init() 758 if (tbl != netns_core_table) in sysctl_core_net_init() 759 kfree(tbl); in sysctl_core_net_init() 766 const struct ctl_table *tbl; in sysctl_core_net_exit() local 770 BUG_ON(tbl == netns_core_table); in sysctl_core_net_exit() [all …]
|
| A D | ieee8021q_helpers.c | 11 #define TT_MAP_SIZE_OK(tbl) \ argument 12 compiletime_assert(ARRAY_SIZE(tbl) == IEEE8021Q_TT_MAX, \ 13 #tbl " size mismatch")
|
| /net/mac80211/ |
| A D | mesh_pathtbl.c | 82 struct mesh_table *tbl = tblptr; in mesh_path_rht_free() local 84 mesh_path_free_rcu(tbl, mpath); in mesh_path_rht_free() 91 atomic_set(&tbl->entries, 0); in mesh_table_init() 339 struct mesh_table *tbl; in mesh_path_add_gate() local 354 spin_lock(&tbl->gates_lock); in mesh_path_add_gate() 683 struct mesh_table *tbl; in mesh_path_add() local 725 struct mesh_table *tbl; in mpp_path_add() local 802 mesh_gate_del(tbl, mpath); in mesh_path_free_rcu() 806 atomic_dec(&tbl->entries); in mesh_path_free_rcu() 966 struct mesh_table *tbl; in mesh_path_send_to_gates() local [all …]
|
| /net/netfilter/ |
| A D | xt_repldata.h | 24 } *tbl; \ 28 tbl = kzalloc(term_offset + sizeof(*term), GFP_KERNEL); \ 29 if (tbl == NULL) \ 32 strscpy(tbl->repl.name, info->name); \ 34 tbl->repl.valid_hooks = hook_mask; \ 35 tbl->repl.num_entries = nhooks + 1; \ 36 tbl->repl.size = nhooks * sizeof(struct type##_standard) + \ 41 tbl->repl.hook_entry[hooknum] = bytes; \ 42 tbl->repl.underflow[hooknum] = bytes; \ 43 tbl->entries[i++] = (struct type##_standard) \ [all …]
|
| /net/sctp/ |
| A D | sysctl.c | 392 struct ctl_table tbl; in proc_sctp_do_hmac_alg() local 401 tbl.data = tmp; in proc_sctp_do_hmac_alg() 405 tbl.maxlen = strlen(tbl.data); in proc_sctp_do_hmac_alg() 439 struct ctl_table tbl; in proc_sctp_do_rto_min() local 446 tbl.data = &new_value; in proc_sctp_do_rto_min() 467 struct ctl_table tbl; in proc_sctp_do_rto_max() local 474 tbl.data = &new_value; in proc_sctp_do_rto_max() 503 struct ctl_table tbl; in proc_sctp_do_auth() local 510 tbl.data = &new_value; in proc_sctp_do_auth() 536 struct ctl_table tbl; in proc_sctp_do_udp_port() local [all …]
|
| /net/ipv4/ |
| A D | sysctl_net_ipv4.c | 242 tbl.data = kmalloc(tbl.maxlen, GFP_USER); in proc_tcp_available_congestion_control() 243 if (!tbl.data) in proc_tcp_available_congestion_control() 258 tbl.data = kmalloc(tbl.maxlen, GFP_USER); in proc_allowed_congestion_control() 259 if (!tbl.data) in proc_allowed_congestion_control() 262 tcp_get_allowed_congestion_control(tbl.data, tbl.maxlen); in proc_allowed_congestion_control() 304 tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL); in proc_tcp_fastopen_key() 318 off += snprintf(tbl.data + off, tbl.maxlen - off, in proc_tcp_fastopen_key() 329 off += snprintf(tbl.data + off, tbl.maxlen - off, ","); in proc_tcp_fastopen_key() 381 tbl.data = kmalloc(tbl.maxlen, GFP_USER); in proc_tcp_available_ulp() 408 memset(&tbl, 0, sizeof(tbl)); in proc_tcp_ehash_entries() [all …]
|
| A D | udp_diag.c | 28 static int udp_dump_one(struct udp_table *tbl, in udp_dump_one() argument 44 req->id.idiag_if, 0, tbl, NULL); in udp_dump_one() 52 req->id.idiag_if, 0, tbl, NULL); in udp_dump_one() 169 struct udp_table *tbl) in __udp_diag_destroy() argument 181 req->id.idiag_if, 0, tbl, NULL); in __udp_diag_destroy() 189 req->id.idiag_if, 0, tbl, NULL); in __udp_diag_destroy() 197 req->id.idiag_if, 0, tbl, NULL); in __udp_diag_destroy()
|
| A D | fib_rules.c | 115 struct fib_table *tbl; in fib4_rule_action() local 136 tbl = fib_get_table(rule->fr_net, tb_id); in fib4_rule_action() 137 if (tbl) in fib4_rule_action() 138 err = fib_table_lookup(tbl, &flp->u.ip4, in fib4_rule_action()
|
| A D | route.c | 3600 struct ctl_table *tbl; in sysctl_route_net_init() local 3603 tbl = ipv4_route_netns_table; in sysctl_route_net_init() 3607 tbl = kmemdup(tbl, sizeof(ipv4_route_netns_table), GFP_KERNEL); in sysctl_route_net_init() 3608 if (!tbl) in sysctl_route_net_init() 3623 tbl[0].extra1 = net; in sysctl_route_net_init() 3626 tbl, table_size); in sysctl_route_net_init() 3632 if (tbl != ipv4_route_netns_table) in sysctl_route_net_init() 3633 kfree(tbl); in sysctl_route_net_init() 3640 const struct ctl_table *tbl; in sysctl_route_net_exit() local 3644 BUG_ON(tbl == ipv4_route_netns_table); in sysctl_route_net_exit() [all …]
|
| A D | devinet.c | 2751 struct ctl_table *tbl; in devinet_init_net() local 2773 tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL); in devinet_init_net() 2774 if (!tbl) in devinet_init_net() 2777 tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1]; in devinet_init_net() 2778 tbl[0].extra1 = all; in devinet_init_net() 2779 tbl[0].extra2 = net; in devinet_init_net() 2817 forw_hdr = register_net_sysctl_sz(net, "net/ipv4", tbl, in devinet_init_net() 2839 kfree(tbl); in devinet_init_net() 2854 const struct ctl_table *tbl; in devinet_exit_net() local 2860 tbl = net->ipv4.forw_hdr->ctl_table_arg; in devinet_exit_net() [all …]
|
| /net/openvswitch/ |
| A D | meter.c | 110 rcu_assign_pointer(tbl->ti, new_ti); in dp_meter_instance_realloc() 149 tbl->count++; in attach_meter() 150 if (tbl->count >= tbl->max_meters_allowed) { in attach_meter() 155 if (tbl->count >= ti->n_meters && in attach_meter() 165 tbl->count--; in attach_meter() 177 ti = rcu_dereference_ovsl(tbl->ti); in detach_meter() 180 tbl->count--; in detach_meter() 205 tbl->count++; in detach_meter() 743 if (!tbl->max_meters_allowed) in ovs_meters_init() 746 rcu_assign_pointer(tbl->ti, ti); in ovs_meters_init() [all …]
|
| A D | flow_table.c | 254 old = ovsl_dereference(tbl->mask_array); in tbl_mask_array_realloc() 265 rcu_assign_pointer(tbl->mask_array, new); in tbl_mask_array_realloc() 277 err = tbl_mask_array_realloc(tbl, ma->max + in tbl_mask_array_add_mask() 282 ma = ovsl_dereference(tbl->mask_array); in tbl_mask_array_add_mask() 324 tbl_mask_array_realloc(tbl, ma->max / 2); in tbl_mask_array_del_mask() 342 tbl_mask_array_del_mask(tbl, mask); in flow_mask_remove() 791 struct table_instance *ti = rcu_dereference(tbl->ti); in ovs_flow_tbl_lookup_stats() 824 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, in ovs_flow_tbl_lookup_stats() 993 ma = ovsl_dereference(tbl->mask_array); in flow_mask_find() 1011 mask = flow_mask_find(tbl, new); in flow_mask_insert() [all …]
|
| A D | flow_table.h | 100 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
|
| /net/mptcp/ |
| A D | ctrl.c | 135 struct ctl_table tbl = { in proc_scheduler() local 143 ret = proc_dostring(&tbl, write, buffer, lenp, ppos); in proc_scheduler() 157 tbl.data = kmalloc(tbl.maxlen, GFP_USER); in proc_available_schedulers() 158 if (!tbl.data) in proc_available_schedulers() 162 ret = proc_dostring(&tbl, write, buffer, lenp, ppos); in proc_available_schedulers() 163 kfree(tbl.data); in proc_available_schedulers() 208 const struct ctl_table tbl = { in proc_path_manager() local 263 tbl.data = kmalloc(tbl.maxlen, GFP_USER); in proc_available_path_managers() 264 if (!tbl.data) in proc_available_path_managers() 267 mptcp_pm_get_available(tbl.data, MPTCP_PM_BUF_MAX); in proc_available_path_managers() [all …]
|
| /net/rds/ |
| A D | tcp.c | 544 struct ctl_table *tbl; in rds_tcp_init_net() local 553 tbl = rds_tcp_sysctl_table; in rds_tcp_init_net() 555 tbl = kmemdup(rds_tcp_sysctl_table, in rds_tcp_init_net() 557 if (!tbl) { in rds_tcp_init_net() 561 rtn->ctl_table = tbl; in rds_tcp_init_net() 563 tbl[RDS_TCP_SNDBUF].data = &rtn->sndbuf_size; in rds_tcp_init_net() 564 tbl[RDS_TCP_RCVBUF].data = &rtn->rcvbuf_size; in rds_tcp_init_net() 565 rtn->rds_tcp_sysctl = register_net_sysctl_sz(net, "net/rds/tcp", tbl, in rds_tcp_init_net() 599 kfree(tbl); in rds_tcp_init_net()
|
| /net/netlabel/ |
| A D | netlabel_domainhash.c | 36 struct list_head *tbl; member 146 bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt]; in netlbl_domhsh_search() 374 hsh_tbl->tbl = kcalloc(hsh_tbl->size, in netlbl_domhsh_init() 377 if (hsh_tbl->tbl == NULL) { in netlbl_domhsh_init() 382 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); in netlbl_domhsh_init() 438 &rcu_dereference(netlbl_domhsh)->tbl[bkt]); in netlbl_domhsh_add() 954 iter_list = &rcu_dereference(netlbl_domhsh)->tbl[iter_bkt]; in netlbl_domhsh_walk()
|
| A D | netlabel_unlabeled.c | 63 struct list_head *tbl; member 209 bkt_list = &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]; in netlbl_unlhsh_search_iface() 332 &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]); in netlbl_unlhsh_add_iface() 1177 iter_list = &rcu_dereference(netlbl_unlhsh)->tbl[iter_bkt]; in netlbl_unlabel_staticlist() 1420 hsh_tbl->tbl = kcalloc(hsh_tbl->size, in netlbl_unlabel_init() 1423 if (hsh_tbl->tbl == NULL) { in netlbl_unlabel_init() 1428 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); in netlbl_unlabel_init()
|
| /net/netlink/ |
| A D | diag.c | 93 struct netlink_table *tbl = &nl_table[protocol]; in __netlink_diag_dump() local 118 rhashtable_walk_enter(&tbl->hash, hti); in __netlink_diag_dump() 157 sk_for_each_bound(sk, &tbl->mc_list) { in __netlink_diag_dump()
|
| A D | af_netlink.c | 531 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; in netlink_update_listeners() local 536 listeners = nl_deref_protected(tbl->listeners); in netlink_update_listeners() 540 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { in netlink_update_listeners() 542 sk_for_each_bound(sk, &tbl->mc_list) { in netlink_update_listeners() 2087 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; in __netlink_change_ngroups() local 2092 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { in __netlink_change_ngroups() 2096 old = nl_deref_protected(tbl->listeners); in __netlink_change_ngroups() 2097 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); in __netlink_change_ngroups() 2098 rcu_assign_pointer(tbl->listeners, new); in __netlink_change_ngroups() 2102 tbl->groups = groups; in __netlink_change_ngroups() [all …]
|
| /net/bridge/ |
| A D | br_vlan_tunnel.c | 37 static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl, in br_vlan_tunnel_lookup() argument 40 return rhashtable_lookup_fast(tbl, &tunnel_id, in br_vlan_tunnel_lookup()
|
| /net/ipv6/ |
| A D | ip6_fib.c | 2629 lockdep_is_held(&iter->tbl->tb6_lock)); in ipv6_route_yield() 2643 iter->w.root = &iter->tbl->tb6_root; in ipv6_route_seq_setup_walk() 2658 if (tbl) { in ipv6_route_seq_next_table() 2659 h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1; in ipv6_route_seq_next_table() 2660 node = rcu_dereference(hlist_next_rcu(&tbl->tb6_hlist)); in ipv6_route_seq_next_table() 2703 spin_lock_bh(&iter->tbl->tb6_lock); in ipv6_route_seq_next() 2705 spin_unlock_bh(&iter->tbl->tb6_lock); in ipv6_route_seq_next() 2714 iter->tbl = ipv6_route_seq_next_table(iter->tbl, net); in ipv6_route_seq_next() 2715 if (!iter->tbl) in ipv6_route_seq_next() 2729 iter->tbl = ipv6_route_seq_next_table(NULL, net); in ipv6_route_seq_start() [all …]
|