| /linux/include/net/ |
| A D | hotdata.h | 10 struct net_hotdata { struct 46 #define inet_ehash_secret net_hotdata.tcp_protocol.secret argument 47 #define udp_ehash_secret net_hotdata.udp_protocol.secret 48 #define inet6_ehash_secret net_hotdata.tcpv6_protocol.secret 49 #define tcp_ipv6_hash_secret net_hotdata.tcpv6_offload.secret 50 #define udp6_ehash_secret net_hotdata.udpv6_protocol.secret 51 #define udp_ipv6_hash_secret net_hotdata.udpv6_offload.secret 53 extern struct net_hotdata net_hotdata;
|
| A D | proto_memory.h | 68 if (unlikely(val >= READ_ONCE(net_hotdata.sysctl_mem_pcpu_rsv))) in sk_memory_allocated_add() 79 if (unlikely(val <= -READ_ONCE(net_hotdata.sysctl_mem_pcpu_rsv))) in sk_memory_allocated_sub()
|
| A D | rps.h | 72 u32 val = hash & ~net_hotdata.rps_cpu_mask; in rps_record_sock_flow() 94 sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table); in sock_rps_record_flow_hash()
|
| A D | gro.h | 527 if (napi->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch)) in gro_normal_one()
|
| /linux/net/core/ |
| A D | hotdata.c | 8 struct net_hotdata net_hotdata __cacheline_aligned = { 9 .offload_base = LIST_HEAD_INIT(net_hotdata.offload_base), 10 .ptype_all = LIST_HEAD_INIT(net_hotdata.ptype_all), 25 EXPORT_SYMBOL(net_hotdata);
|
| A D | sysctl_net_core.c | 145 net_hotdata.rps_sock_flow_table, in rps_sock_flow_sysctl() 166 net_hotdata.rps_cpu_mask = in rps_sock_flow_sysctl() 178 rcu_assign_pointer(net_hotdata.rps_sock_flow_table, in rps_sock_flow_sysctl() 387 .data = &net_hotdata.sysctl_mem_pcpu_rsv, 416 .data = &net_hotdata.max_backlog, 475 .data = &net_hotdata.tstamp_prequeue, 553 .data = &net_hotdata.netdev_budget, 567 .data = &net_hotdata.sysctl_max_skb_frags, 576 .data = &net_hotdata.netdev_budget_usecs, 609 .data = &net_hotdata.gro_normal_batch, [all …]
|
| A D | gso.c | 20 list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) { in skb_eth_gso_segment() 51 list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) { in skb_mac_gso_segment()
|
| A D | net-procfs.c | 188 list_for_each_entry_rcu(pt, &net_hotdata.ptype_all, list) { in ptype_get_idx() 236 nxt = net_hotdata.ptype_all.next; in ptype_seq_next() 242 if (nxt != &net_hotdata.ptype_all) in ptype_seq_next()
|
| A D | gro.c | 32 list_for_each_entry(elem, &net_hotdata.offload_base, list) { in dev_add_offload() 56 struct list_head *head = &net_hotdata.offload_base; in __dev_remove_offload() 259 struct list_head *head = &net_hotdata.offload_base; in napi_gro_complete() 468 struct list_head *head = &net_hotdata.offload_base; in dev_gro_receive() 573 struct list_head *offload_head = &net_hotdata.offload_base; in gro_find_receive_by_type() 587 struct list_head *offload_head = &net_hotdata.offload_base; in gro_find_complete_by_type()
|
| A D | gro_cells.c | 30 if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) { in gro_cells_receive()
|
| A D | skbuff.c | 423 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, in slab_build_skb() 475 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, in __build_skb() 587 obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, in kmalloc_reserve() 658 ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache; in __alloc_skb() 1084 kmem_cache_free(net_hotdata.skb_small_head_cache, head); in skb_kfree_head() 1148 kmem_cache_free(net_hotdata.skbuff_cache, skb); in kfree_skbmem() 1169 kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones); in kfree_skbmem() 1469 kmem_cache_size(net_hotdata.skbuff_cache)); in napi_skb_cache_put() 2084 n = kmem_cache_alloc(net_hotdata.skbuff_cache, gfp_mask); in skb_clone() 5994 kmem_cache_free(net_hotdata.skbuff_cache, skb); in kfree_skb_partial() [all …]
|
| A D | xdp.c | 589 n_skb = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, n_skb, skbs); in xdp_alloc_skb_bulk() 658 skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC); in xdp_build_skb_from_frame()
|
| A D | dev.c | 574 return pt->dev ? &pt->dev->ptype_all : &net_hotdata.ptype_all; in ptype_head() 2280 return !list_empty(&net_hotdata.ptype_all) || in dev_nit_active() 2292 struct list_head *ptype_list = &net_hotdata.ptype_all; in dev_queue_xmit_nit() 2340 if (ptype_list == &net_hotdata.ptype_all) { in dev_queue_xmit_nit() 4650 if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask) in get_rps_cpu() 4653 next_cpu = ident & net_hotdata.rps_cpu_mask; in get_rps_cpu() 4824 if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1)) in skb_flow_limit() 4875 max_backlog = READ_ONCE(net_hotdata.max_backlog); in enqueue_to_backlog() 5880 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), in netif_receive_skb_list_internal() 6107 napi->weight = READ_ONCE(net_hotdata.dev_rx_weight); in process_backlog() [all …]
|
| /linux/net/ipv6/ |
| A D | udp_offload.c | 192 net_hotdata.udpv6_offload = (struct net_offload) { in udpv6_offload_init() 199 return inet6_add_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP); in udpv6_offload_init() 204 return inet6_del_offload(&net_hotdata.udpv6_offload, IPPROTO_UDP); in udpv6_offload_exit()
|
| A D | tcpv6_offload.c | 189 net_hotdata.tcpv6_offload = (struct net_offload) { in tcpv6_offload_init() 196 return inet6_add_offload(&net_hotdata.tcpv6_offload, IPPROTO_TCP); in tcpv6_offload_init()
|
| A D | ip6_offload.c | 470 net_hotdata.ipv6_packet_offload = (struct packet_offload) { in ipv6_offload_init() 478 dev_add_offload(&net_hotdata.ipv6_packet_offload); in ipv6_offload_init()
|
| A D | udp.c | 1811 net_hotdata.udpv6_protocol = (struct inet6_protocol) { in udpv6_init() 1816 ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP); in udpv6_init() 1827 inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP); in udpv6_init() 1834 inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP); in udpv6_exit()
|
| A D | tcp_ipv6.c | 2405 net_hotdata.tcpv6_protocol = (struct inet6_protocol) { 2410 ret = inet6_add_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP); 2435 inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP); 2443 inet6_del_protocol(&net_hotdata.tcpv6_protocol, IPPROTO_TCP);
|
| /linux/net/ipv4/ |
| A D | af_inet.c | 1882 net_hotdata.ip_packet_offload = (struct packet_offload) { in ipv4_offload_init() 1890 dev_add_offload(&net_hotdata.ip_packet_offload); in ipv4_offload_init() 1945 net_hotdata.udp_protocol = (struct net_protocol) { in inet_init() 1950 if (inet_add_protocol(&net_hotdata.udp_protocol, IPPROTO_UDP) < 0) in inet_init() 1953 net_hotdata.tcp_protocol = (struct net_protocol) { in inet_init() 1959 if (inet_add_protocol(&net_hotdata.tcp_protocol, IPPROTO_TCP) < 0) in inet_init()
|
| A D | tcp_offload.c | 489 net_hotdata.tcpv4_offload = (struct net_offload) { in tcpv4_offload_init() 496 return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP); in tcpv4_offload_init()
|
| A D | udp_offload.c | 759 net_hotdata.udpv4_offload = (struct net_offload) { in udpv4_offload_init() 766 return inet_add_offload(&net_hotdata.udpv4_offload, IPPROTO_UDP); in udpv4_offload_init()
|
| /linux/net/xfrm/ |
| A D | espintcp.c | 174 READ_ONCE(net_hotdata.max_backlog)) in espintcp_queue_out()
|
| A D | xfrm_input.c | 786 if (skb_queue_len(&trans->queue) >= READ_ONCE(net_hotdata.max_backlog)) in xfrm_trans_queue_net()
|
| /linux/kernel/bpf/ |
| A D | cpumap.c | 334 m = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, in cpu_map_kthread_run()
|
| /linux/net/sched/ |
| A D | sch_generic.c | 413 int quota = READ_ONCE(net_hotdata.dev_tx_weight); in __qdisc_run()
|