| /net/ipv4/ |
| A D | udp_tunnel_nic.c | 175 entry = &utn->entries[table][idx]; in __udp_tunnel_nic_get_port() 220 entry = &utn->entries[table][idx]; in udp_tunnel_nic_device_sync_one() 276 entry = &utn->entries[i][j]; in udp_tunnel_nic_device_sync_by_table() 346 entry = &utn->entries[i][j]; in udp_tunnel_nic_has_collision() 470 entry = &utn->entries[i][j]; in udp_tunnel_nic_add_new() 560 entry = &utn->entries[i][j]; in __udp_tunnel_nic_reset_ntf() 625 utn->entries[table][j].port) || in __udp_tunnel_nic_dump_write() 698 sizeof(**utn->entries))); in udp_tunnel_nic_flush() 766 if (!utn->entries[i]) in udp_tunnel_nic_alloc() 774 kfree(utn->entries[i]); in udp_tunnel_nic_alloc() [all …]
|
| /net/bridge/netfilter/ |
| A D | ebtables.c | 1148 newinfo->entries, tmp.entries, tmp.entries_size) != 0) { in do_replace() 1216 newinfo->entries = p; in ebt_register_table() 1552 char *entries; in copy_everything_to_user() local 1557 entries = t->private->entries; in copy_everything_to_user() 1562 entries = t->table->entries; in copy_everything_to_user() 1586 ebt_entry_to_user, entries, tmp.entries); in copy_everything_to_user() 1839 const void *entries = info->entries; in compat_table_info() local 1865 tinfo.entries = t->private->entries; in compat_copy_everything_to_user() 1870 tinfo.entries = t->table->entries; in compat_copy_everything_to_user() 2248 repl->entries = compat_ptr(tmp.entries); in compat_copy_ebt_replace_from_user() [all …]
|
| A D | ebtable_filter.c | 43 .entries = (char *)initial_chains,
|
| A D | ebtable_nat.c | 43 .entries = (char *)initial_chains,
|
| A D | ebtable_broute.c | 36 .entries = (char *)&initial_chain,
|
| /net/sched/ |
| A D | act_gate.c | 90 if (list_is_last(&next->list, &p->entries)) in gate_timer_func() 91 next = list_first_entry(&p->entries, in gate_timer_func() 259 list_add_tail(&entry->list, &sched->entries); in parse_gate_list() 268 release_entry_list(&sched->entries); in parse_gate_list() 380 INIT_LIST_HEAD(&gact->param.entries); in tcf_gate_init() 402 list_for_each_entry(entry, &p->entries, list) in tcf_gate_init() 463 release_entry_list(&p->entries); in tcf_gate_cleanup() 545 list_for_each_entry(entry, &p->entries, list) { in tcf_gate_dump() 590 entry->gate.entries = tcf_gate_get_list(act); in tcf_gate_get_entries() 592 if (!entry->gate.entries) in tcf_gate_get_entries() [all …]
|
| A D | sch_taprio.c | 80 struct list_head entries; member 127 list_for_each_entry(entry, &sched->entries, list) { in taprio_calculate_gate_durations() 355 list_for_each_entry(entry, &sched->entries, list) { in find_entry_to_transmit() 874 if (list_is_last(&entry->list, &oper->entries)) in should_restart_cycle() 1117 list_add_tail(&entry->list, &sched->entries); in parse_sched_list() 1156 list_for_each_entry(entry, &new->entries, list) in parse_taprio_schedule() 1248 first = list_first_entry(&sched->entries, in setup_first_end_time() 1368 list_for_each_entry(entry, &sched->entries, list) { in setup_txtime() 1469 list_for_each_entry(entry, &sched->entries, list) { in taprio_sched_to_offload() 1877 INIT_LIST_HEAD(&new_admin->entries); in taprio_change() [all …]
|
| /net/netfilter/ipvs/ |
| A D | ip_vs_lblc.c | 106 atomic_t entries; /* number of entries */ member 172 atomic_inc(&tbl->entries); in ip_vs_lblc_hash() 240 atomic_dec(&tbl->entries); in ip_vs_lblc_flush() 274 atomic_dec(&tbl->entries); in ip_vs_lblc_full_check() 311 if (atomic_read(&tbl->entries) <= tbl->max_size) { in ip_vs_lblc_check_expire() 316 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; in ip_vs_lblc_check_expire() 329 atomic_dec(&tbl->entries); in ip_vs_lblc_check_expire() 370 atomic_set(&tbl->entries, 0); in ip_vs_lblc_init_svc()
|
| A D | ip_vs_lblcr.c | 274 atomic_t entries; /* number of entries */ member 335 atomic_inc(&tbl->entries); in ip_vs_lblcr_hash() 438 atomic_dec(&tbl->entries); in ip_vs_lblcr_full_check() 475 if (atomic_read(&tbl->entries) <= tbl->max_size) { in ip_vs_lblcr_check_expire() 480 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; in ip_vs_lblcr_check_expire() 493 atomic_dec(&tbl->entries); in ip_vs_lblcr_check_expire() 533 atomic_set(&tbl->entries, 0); in ip_vs_lblcr_init_svc()
|
| /net/netfilter/ |
| A D | xt_repldata.h | 23 struct type##_standard entries[]; \ 26 size_t term_offset = (offsetof(typeof(*tbl), entries[nhooks]) + \ 43 tbl->entries[i++] = (struct type##_standard) \
|
| A D | xt_recent.c | 82 unsigned int entries; member 149 t->entries--; in recent_entry_remove() 185 if (t->entries >= ip_list_tot) { in recent_entry_init() 205 t->entries++; in recent_entry_init()
|
| A D | nf_dup_netdev.c | 98 entry = &flow->rule->action.entries[ctx->num_actions++]; in nft_fwd_dup_netdev_offload()
|
| /net/xfrm/ |
| A D | xfrm_algo.c | 650 int entries; member 656 .entries = ARRAY_SIZE(aead_list), 662 .entries = ARRAY_SIZE(aalg_list), 668 .entries = ARRAY_SIZE(ealg_list), 674 .entries = ARRAY_SIZE(calg_list), 685 for (i = 0; i < algo_list->entries; i++) { in xfrm_find_algo()
|
| /net/ipv4/netfilter/ |
| A D | arp_tables.c | 209 table_base = private->entries; in arpt_do_table() 685 loc_cpu_entry = private->entries; in copy_entries_to_user() 778 loc_cpu_entry = info->entries; in compat_table_info() 929 loc_cpu_old_entry = oldinfo->entries; in __do_replace() 978 loc_cpu_entry = newinfo->entries; in do_replace() 1062 struct compat_arpt_entry entries[]; member 1201 memset(newinfo->entries, 0, size); in translate_compat_table() 1208 entry1 = newinfo->entries; in translate_compat_table() 1280 loc_cpu_entry = newinfo->entries; in compat_do_replace() 1500 loc_cpu_entry = private->entries; in __arpt_unregister_table() [all …]
|
| A D | ip_tables.c | 262 table_base = private->entries; in ipt_do_table() 750 xt_entry_foreach(iter, t->entries, t->size) { in get_counters() 825 loc_cpu_entry = private->entries; in copy_entries_to_user() 933 loc_cpu_entry = info->entries; in compat_table_info() 1130 loc_cpu_entry = newinfo->entries; in do_replace() 1214 struct compat_ipt_entry entries[]; member 1433 memset(newinfo->entries, 0, size); in translate_compat_table() 1440 entry1 = newinfo->entries; in translate_compat_table() 1518 loc_cpu_entry = newinfo->entries; in compat_do_replace() 1715 loc_cpu_entry = private->entries; in __ipt_unregister_table() [all …]
|
| A D | iptable_filter.c | 46 ((struct ipt_standard *)repl->entries)[1].target.verdict = in iptable_filter_table_init()
|
| /net/xdp/ |
| A D | xsk_queue.h | 298 u32 entries = q->cached_prod - q->cached_cons; in xskq_cons_nb_entries() local 300 if (entries >= max) in xskq_cons_nb_entries() 304 entries = q->cached_prod - q->cached_cons; in xskq_cons_nb_entries() 306 return entries >= max ? max : entries; in xskq_cons_nb_entries()
|
| A D | xsk.c | 1018 static int xsk_init_queue(u32 entries, struct xsk_queue **queue, in xsk_init_queue() argument 1023 if (entries == 0 || *queue || !is_power_of_2(entries)) in xsk_init_queue() 1026 q = xskq_create(entries, umem_queue); in xsk_init_queue() 1358 int entries; in xsk_setsockopt() local 1360 if (optlen < sizeof(entries)) in xsk_setsockopt() 1362 if (copy_from_sockptr(&entries, optval, sizeof(entries))) in xsk_setsockopt() 1371 err = xsk_init_queue(entries, q, false); in xsk_setsockopt() 1425 int entries; in xsk_setsockopt() local 1427 if (optlen < sizeof(entries)) in xsk_setsockopt() 1429 if (copy_from_sockptr(&entries, optval, sizeof(entries))) in xsk_setsockopt() [all …]
|
| /net/ipv6/netfilter/ |
| A D | ip6_tables.c | 284 table_base = private->entries; in ip6t_do_table() 767 xt_entry_foreach(iter, t->entries, t->size) { in get_counters() 841 loc_cpu_entry = private->entries; in copy_entries_to_user() 949 loc_cpu_entry = info->entries; in compat_table_info() 1147 loc_cpu_entry = newinfo->entries; in do_replace() 1230 struct compat_ip6t_entry entries[]; member 1448 memset(newinfo->entries, 0, size); in translate_compat_table() 1455 entry1 = newinfo->entries; in translate_compat_table() 1527 loc_cpu_entry = newinfo->entries; in compat_do_replace() 1724 loc_cpu_entry = private->entries; in __ip6t_unregister_table() [all …]
|
| A D | ip6table_filter.c | 45 ((struct ip6t_standard *)repl->entries)[1].target.verdict = in ip6table_filter_table_init()
|
| /net/core/ |
| A D | drop_monitor.c | 73 struct net_dm_hw_entry entries[]; member 239 for (i = 0; i < msg->entries; i++) { in trace_drop_common() 246 if (msg->entries == dm_hit_limit) in trace_drop_common() 255 msg->entries++; in trace_drop_common() 309 hw_entries = kzalloc(struct_size(hw_entries, entries, dm_hit_limit), in net_dm_hw_reset_per_cpu_data() 363 rc = net_dm_hw_entry_put(msg, &hw_entries->entries[i]); in net_dm_hw_entries_put() 460 hw_entry = &hw_entries->entries[i]; in net_dm_hw_trap_summary_probe() 470 hw_entry = &hw_entries->entries[hw_entries->num_entries]; in net_dm_hw_trap_summary_probe()
|
| A D | flow_offload.c | 15 rule = kzalloc(struct_size(rule, action.entries, num_actions), in flow_rule_alloc() 25 rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE; in flow_rule_alloc() 36 fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions), in offload_action_alloc() 46 fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE; in offload_action_alloc()
|
| /net/can/ |
| A D | af_can.c | 479 dev_rcv_lists->entries++; in can_rx_register() 556 dev_rcv_lists->entries--; in can_rx_unregister() 586 if (dev_rcv_lists->entries == 0) in can_rcv_filter()
|
| /net/mpls/ |
| A D | Kconfig | 23 that have had MPLS stack entries pushed onto them and thus
|
| /net/sunrpc/ |
| A D | cache.c | 89 cd->entries --; in sunrpc_begin_cache_remove_entry() 137 detail->entries++; in sunrpc_cache_add_entry() 236 detail->entries++; in sunrpc_cache_update() 404 cd->entries = 0; in sunrpc_init_cache_detail() 550 if (!detail->entries) { in cache_purge() 555 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name); in cache_purge()
|