| /net/hsr/ |
| A D | hsr_framereg.c | 77 return node; in find_node_by_addr_A() 135 kfree(node); in hsr_del_nodes() 203 return node; in hsr_add_node() 237 return node; in hsr_get_node() 242 return node; in hsr_get_node() 251 return node; in hsr_get_node() 571 node->seq_start[rcv_port] = node->seq_expected[rcv_port]; in prp_register_frame_out() 729 if (node) in hsr_get_next_node() 731 return node; in hsr_get_next_node() 734 node = _pos; in hsr_get_next_node() [all …]
|
| A D | prp_dup_discard_test.c | 11 struct hsr_node node; member 20 data->frame.node_src = &data->node; in build_prp_test_data() 23 data->node.seq_start[HSR_PT_SLAVE_A] = 1; in build_prp_test_data() 25 data->node.seq_start[HSR_PT_SLAVE_B] = 1; in build_prp_test_data() 27 data->node.seq_out[HSR_PT_MASTER] = 0; in build_prp_test_data() 58 data->node.seq_out[HSR_PT_MASTER]); in prp_dup_discard_forward() 72 data->node.seq_out[HSR_PT_MASTER] = 2; in prp_dup_discard_inside_dropwindow() 93 data->node.time_out[HSR_PT_MASTER] = in prp_dup_discard_node_timeout() 99 data->node.seq_out[HSR_PT_MASTER]); in prp_dup_discard_node_timeout() 121 data->node.seq_out[HSR_PT_MASTER]); in prp_dup_discard_out_of_sequence() [all …]
|
| A D | hsr_debugfs.c | 22 struct hsr_node *node; in hsr_node_table_show() local 34 list_for_each_entry_rcu(node, &priv->node_db, mac_list) { in hsr_node_table_show() 36 if (hsr_addr_is_self(priv, node->macaddress_A)) in hsr_node_table_show() 38 seq_printf(sfp, "%pM ", &node->macaddress_A[0]); in hsr_node_table_show() 39 seq_printf(sfp, "%pM ", &node->macaddress_B[0]); in hsr_node_table_show() 40 seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_A]); in hsr_node_table_show() 41 seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_B]); in hsr_node_table_show() 42 seq_printf(sfp, "%14x, ", node->addr_B_port); in hsr_node_table_show() 46 node->san_a, node->san_b, in hsr_node_table_show() 47 (node->san_a == 0 && node->san_b == 0)); in hsr_node_table_show()
|
| /net/qrtr/ |
| A D | af_qrtr.c | 197 kfree(node); in __qrtr_node_release() 203 if (node) in qrtr_node_acquire() 205 return node; in qrtr_node_acquire() 211 if (!node) in qrtr_node_release() 376 rc = node->ep->xmit(node->ep, skb); in qrtr_node_enqueue() 401 node = qrtr_node_acquire(node); in qrtr_node_lookup() 437 struct qrtr_node *node = ep->node; in qrtr_endpoint_post() local 592 node = kzalloc(sizeof(*node), GFP_KERNEL); in qrtr_endpoint_register() 593 if (!node) in qrtr_endpoint_register() 610 ep->node = node; in qrtr_endpoint_register() [all …]
|
| A D | ns.c | 77 if (node) in node_get() 81 node = kzalloc(sizeof(*node), GFP_KERNEL); in node_get() 82 if (!node) in node_get() 93 return node; in node_get() 126 pkt.server.node = cpu_to_le32(srv->node); in service_announce_new() 153 pkt.server.node = cpu_to_le32(srv->node); in service_announce_del() 183 pkt.server.node = cpu_to_le32(srv->node); in lookup_notify() 203 if (!node) in announce_servers() 242 if (!node) in server_add() 350 if (!node) in ctrl_cmd_bye() [all …]
|
| /net/netfilter/ |
| A D | nft_set_rbtree.c | 27 struct rb_node node; member 351 for (node = first; node != NULL; node = next) { in __nft_rbtree_insert() 354 rbe = rb_entry(node, struct nft_rbtree_elem, node); in __nft_rbtree_insert() 593 struct rb_node *node; in nft_rbtree_walk() local 596 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { in nft_rbtree_walk() 597 rbe = rb_entry(node, struct nft_rbtree_elem, node); in nft_rbtree_walk() 637 for (node = rb_first(&priv->root); node ; node = next) { in nft_rbtree_gc() 640 rbe = rb_entry(node, struct nft_rbtree_elem, node); in nft_rbtree_gc() 713 rbe = rb_entry(node, struct nft_rbtree_elem, node); in nft_rbtree_destroy() 776 if (!node) in nft_rbtree_adjust_maxsize() [all …]
|
| A D | nf_conncount.c | 42 struct list_head node; member 50 struct rb_node node; member 91 list_del(&conn->node); in conn_free() 460 struct rb_node *node; in tree_gc_worker() local 468 for (node = rb_first(root); node != NULL; node = rb_next(node)) { in tree_gc_worker() 469 rbconn = rb_entry(node, struct nf_conncount_rb, node); in tree_gc_worker() 485 rbconn = rb_entry(node, struct nf_conncount_rb, node); in tree_gc_worker() 486 node = rb_next(node); in tree_gc_worker() 566 struct rb_node *node; in destroy_tree() local 569 rbconn = rb_entry(node, struct nf_conncount_rb, node); in destroy_tree() [all …]
|
| A D | nft_set_hash.c | 32 struct rhash_head node; member 76 .head_offset = offsetof(struct nft_rhash_elem, node), 500 struct hlist_node node; member 516 hlist_for_each_entry_rcu(he, &priv->table[hash], node) { in nft_hash_lookup() 535 hlist_for_each_entry_rcu(he, &priv->table[hash], node) { in nft_hash_get() 592 hlist_for_each_entry(he, &priv->table[hash], node) { in nft_hash_insert() 600 hlist_add_head_rcu(&this->node, &priv->table[hash]); in nft_hash_insert() 631 hlist_for_each_entry(he, &priv->table[hash], node) { in nft_hash_deactivate() 648 hlist_del_rcu(&he->node); in nft_hash_remove() 659 hlist_for_each_entry_rcu(he, &priv->table[i], node, in nft_hash_walk() [all …]
|
| /net/xdp/ |
| A D | xskmap.c | 18 struct xsk_map_node *node; in xsk_map_node_alloc() local 20 node = bpf_map_kzalloc(&map->map, sizeof(*node), in xsk_map_node_alloc() 22 if (!node) in xsk_map_node_alloc() 28 node->map = map; in xsk_map_node_alloc() 29 node->map_entry = map_entry; in xsk_map_node_alloc() 30 return node; in xsk_map_node_alloc() 38 kfree(node); in xsk_map_node_free() 45 list_add_tail(&node->node, &xs->map_list); in xsk_map_sock_add() 57 list_del(&n->node); in xsk_map_sock_delete() 190 if (IS_ERR(node)) { in xsk_map_update_elem() [all …]
|
| /net/tipc/ |
| A D | node.c | 266 return (node) ? node->addr : 0; in tipc_node_get_addr() 338 if (node->addr != addr || node->preliminary) in tipc_node_find() 341 node = NULL; in tipc_node_find() 345 return node; in tipc_node_find() 415 node = n->addr; in tipc_node_write_unlock() 702 if (!node) { in tipc_node_add_conn() 732 if (!node) in tipc_node_remove_conn() 1575 if (!node) in tipc_node_get_linkname() 2334 if (!node) { in tipc_nl_node_dump() 2448 if (!node) in tipc_nl_node_set_link() [all …]
|
| A D | group.c | 69 u32 node; member 233 u32 node, u32 port) in tipc_group_find_member() argument 253 u32 node, u32 port) in tipc_group_find_dest() argument 264 u32 node) in tipc_group_find_node() argument 271 if (m->node == node) in tipc_group_find_node() 303 u32 node, u32 port, in tipc_group_create_member() argument 316 m->node = node; in tipc_group_create_member() 504 u32 node, port; in tipc_group_filter_msg() local 511 node = msg_orignode(hdr); in tipc_group_filter_msg() 682 evt.port.node = m->node; in tipc_group_create_event() [all …]
|
| A D | name_table.c | 337 u32 node = p->sk.node; in tipc_service_insert_publ() local 351 if (_p->key == key && (!_p->sk.node || _p->sk.node == node)) { in tipc_service_insert_publ() 354 node, p->sk.ref, key); in tipc_service_insert_publ() 388 u32 node = sk->node; in tipc_service_remove_publ() local 391 if (p->key != key || (node && node != p->sk.node)) in tipc_service_remove_publ() 543 sk->node, sk->ref, key); in tipc_nametbl_remove_publ() 594 if (sk->node == self) { in tipc_nametbl_lookup_anycast() 1146 if (dst->node == node && dst->port == port) in tipc_dest_find() 1162 dst->node = node; in tipc_dest_push() 1177 if (node) in tipc_dest_pop() [all …]
|
| A D | name_distr.c | 280 u32 node, u32 dtype) in tipc_update_nametbl() argument 290 sk.node = node; in tipc_update_nametbl() 295 tipc_node_subscribe(net, &p->binding_node, node); in tipc_update_nametbl() 301 tipc_node_unsubscribe(net, &p->binding_node, node); in tipc_update_nametbl() 306 ua.sr.type, ua.sr.lower, node); in tipc_update_nametbl() 371 u32 count, node; in tipc_named_rcv() local 376 node = msg_orignode(hdr); in tipc_named_rcv() 380 tipc_update_nametbl(net, item, node, msg_type(hdr)); in tipc_named_rcv() 406 p->sk.node = self; in tipc_named_reinit() 408 p->sk.node = self; in tipc_named_reinit()
|
| A D | name_table.h | 148 u32 node; member 151 struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port); 152 bool tipc_dest_push(struct list_head *l, u32 node, u32 port); 153 bool tipc_dest_pop(struct list_head *l, u32 *node, u32 *port); 154 bool tipc_dest_del(struct list_head *l, u32 node, u32 port);
|
| /net/ceph/ |
| A D | string_table.c | 21 exist = rb_entry(*p, struct ceph_string, node); in ceph_find_or_create_string() 32 rb_erase(&exist->node, &string_tree); in ceph_find_or_create_string() 33 RB_CLEAR_NODE(&exist->node); in ceph_find_or_create_string() 56 exist = rb_entry(*p, struct ceph_string, node); in ceph_find_or_create_string() 68 rb_link_node(&cs->node, parent, p); in ceph_find_or_create_string() 69 rb_insert_color(&cs->node, &string_tree); in ceph_find_or_create_string() 71 rb_erase(&exist->node, &string_tree); in ceph_find_or_create_string() 72 RB_CLEAR_NODE(&exist->node); in ceph_find_or_create_string() 93 if (!RB_EMPTY_NODE(&cs->node)) { in ceph_release_string() 94 rb_erase(&cs->node, &string_tree); in ceph_release_string() [all …]
|
| /net/bridge/ |
| A D | br_multicast_eht.c | 49 while (node) { in br_multicast_eht_host_lookup() 57 node = node->rb_left; in br_multicast_eht_host_lookup() 59 node = node->rb_right; in br_multicast_eht_host_lookup() 85 while (node) { in br_multicast_eht_set_entry_lookup() 93 node = node->rb_left; in br_multicast_eht_set_entry_lookup() 95 node = node->rb_right; in br_multicast_eht_set_entry_lookup() 109 while (node) { in br_multicast_eht_set_lookup() 117 node = node->rb_left; in br_multicast_eht_set_lookup() 119 node = node->rb_right; in br_multicast_eht_set_lookup() 182 struct rb_node *node; in br_multicast_del_eht_set() local [all …]
|
| /net/batman-adv/ |
| A D | hash.h | 84 struct hlist_node *node; in batadv_hash_add() local 96 hlist_for_each(node, head) { in batadv_hash_add() 97 if (!compare(node, data)) in batadv_hash_add() 135 struct hlist_node *node; in batadv_hash_remove() local 143 hlist_for_each(node, head) { in batadv_hash_remove() 144 if (!compare(node, data)) in batadv_hash_remove() 147 data_save = node; in batadv_hash_remove() 148 hlist_del_rcu(node); in batadv_hash_remove()
|
| A D | multicast.c | 1617 WARN_ON(!hlist_unhashed(node)); in batadv_mcast_want_unsnoop_update() 1628 WARN_ON(hlist_unhashed(node)); in batadv_mcast_want_unsnoop_update() 1630 hlist_del_init_rcu(node); in batadv_mcast_want_unsnoop_update() 1673 WARN_ON(hlist_unhashed(node)); in batadv_mcast_want_ipv4_update() 1675 hlist_del_init_rcu(node); in batadv_mcast_want_ipv4_update() 1718 WARN_ON(hlist_unhashed(node)); in batadv_mcast_want_ipv6_update() 1720 hlist_del_init_rcu(node); in batadv_mcast_want_ipv6_update() 1763 WARN_ON(hlist_unhashed(node)); in batadv_mcast_want_rtr4_update() 1765 hlist_del_init_rcu(node); in batadv_mcast_want_rtr4_update() 1808 WARN_ON(hlist_unhashed(node)); in batadv_mcast_want_rtr6_update() [all …]
|
| /net/shaper/ |
| A D | shaper.c | 895 struct net_shaper *node, in net_shaper_parent_from_leaves() argument 908 node->parent = parent; in net_shaper_parent_from_leaves() 932 node->handle.scope, node->handle.id); in __net_shaper_group() 941 leaves, node, in __net_shaper_group() 948 net_shaper_default_parent(&node->handle, &node->parent); in __net_shaper_group() 955 node->parent.scope, node->parent.id); in __net_shaper_group() 987 node->leaves++; in __net_shaper_group() 1023 node.handle = shaper->parent; in net_shaper_pre_del_node() 1026 node = *cur; in net_shaper_pre_del_node() 1060 leaves, &node, extack); in net_shaper_pre_del_node() [all …]
|
| /net/802/ |
| A D | garp.c | 203 rb_erase(&attr->node, &app->gid); in garp_attr_destroy() 209 struct rb_node *node, *next; in garp_attr_destroy_all() local 212 for (node = rb_first(&app->gid); in garp_attr_destroy_all() 213 next = node ? rb_next(node) : NULL, node != NULL; in garp_attr_destroy_all() 214 node = next) { in garp_attr_destroy_all() 215 attr = rb_entry(node, struct garp_attr, node); in garp_attr_destroy_all() 396 struct rb_node *node, *next; in garp_gid_event() local 399 for (node = rb_first(&app->gid); in garp_gid_event() 400 next = node ? rb_next(node) : NULL, node != NULL; in garp_gid_event() 401 node = next) { in garp_gid_event() [all …]
|
| A D | mrp.c | 292 rb_erase(&attr->node, &app->mad); in mrp_attr_destroy() 298 struct rb_node *node, *next; in mrp_attr_destroy_all() local 301 for (node = rb_first(&app->mad); in mrp_attr_destroy_all() 302 next = node ? rb_next(node) : NULL, node != NULL; in mrp_attr_destroy_all() 303 node = next) { in mrp_attr_destroy_all() 304 attr = rb_entry(node, struct mrp_attr, node); in mrp_attr_destroy_all() 581 struct rb_node *node, *next; in mrp_mad_event() local 584 for (node = rb_first(&app->mad); in mrp_mad_event() 585 next = node ? rb_next(node) : NULL, node != NULL; in mrp_mad_event() 586 node = next) { in mrp_mad_event() [all …]
|
| /net/x25/ |
| A D | x25_route.c | 34 list_for_each_entry(rt, &x25_route_list, node) { in x25_add_route() 52 list_add(&rt->node, &x25_route_list); in x25_add_route() 68 if (rt->node.next) { in __x25_remove_route() 69 list_del(&rt->node); in __x25_remove_route() 82 list_for_each_entry(rt, &x25_route_list, node) { in x25_del_route() 106 rt = list_entry(entry, struct x25_route, node); in x25_route_device_down() 141 list_for_each_entry(rt, &x25_route_list, node) { in x25_get_route() 200 rt = list_entry(entry, struct x25_route, node); in x25_route_free()
|
| A D | x25_forward.c | 48 list_for_each_entry(x25_frwd, &x25_forward_list, node) { in x25_forward_call() 67 list_add(&new_frwd->node, &x25_forward_list); in x25_forward_call() 99 list_for_each_entry(frwd, &x25_forward_list, node) { in x25_forward_data() 134 list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) { in x25_clear_forward_by_lci() 136 list_del(&fwd->node); in x25_clear_forward_by_lci() 150 list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) { in x25_clear_forward_by_dev() 152 list_del(&fwd->node); in x25_clear_forward_by_dev()
|
| /net/core/ |
| A D | dev_addr_lists.c | 45 rb_insert_color(&new->node, &list->tree); in __hw_addr_insert() 122 rb_link_node(&ha->node, parent, ins_point); in __hw_addr_add_ex() 123 rb_insert_color(&ha->node, &list->tree); in __hw_addr_add_ex() 158 rb_erase(&ha->node, &list->tree); in __hw_addr_del_entry() 170 struct rb_node *node; in __hw_addr_lookup() local 172 node = list->tree.rb_node; in __hw_addr_lookup() 174 while (node) { in __hw_addr_lookup() 175 struct netdev_hw_addr *ha = rb_entry(node, struct netdev_hw_addr, node); in __hw_addr_lookup() 182 node = node->rb_left; in __hw_addr_lookup() 184 node = node->rb_right; in __hw_addr_lookup() [all …]
|
| /net/ipv4/ |
| A D | udp_tunnel_nic.c | 707 struct udp_tunnel_nic_shared_node *node; in udp_tunnel_nic_replay() local 723 udp_tunnel_get_rx_info(node->dev); in udp_tunnel_nic_replay() 791 struct udp_tunnel_nic_shared_node *node = NULL; in udp_tunnel_nic_register() local 823 node = kzalloc(sizeof(*node), GFP_KERNEL); in udp_tunnel_nic_register() 824 if (!node) in udp_tunnel_nic_register() 827 node->dev = dev; in udp_tunnel_nic_register() 835 kfree(node); in udp_tunnel_nic_register() 873 struct udp_tunnel_nic_shared_node *node, *first; in udp_tunnel_nic_unregister() local 876 if (node->dev == dev) in udp_tunnel_nic_unregister() 883 list_del(&node->list); in udp_tunnel_nic_unregister() [all …]
|