| /net/netfilter/ipvs/ |
| A D | ip_vs_sh.c | 110 return (!dest || is_unavailable(dest)) ? NULL : dest; in ip_vs_sh_get() 130 dest = rcu_dereference(s->buckets[ihash].dest); in ip_vs_sh_get_fallback() 131 if (!dest) in ip_vs_sh_get_fallback() 137 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port)); in ip_vs_sh_get_fallback() 145 dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get_fallback() 152 IP_VS_DBG_ADDR(dest->af, &dest->addr), in ip_vs_sh_get_fallback() 177 dest = rcu_dereference_protected(b->dest, 1); in ip_vs_sh_reassign() 188 RCU_INIT_POINTER(b->dest, dest); in ip_vs_sh_reassign() 191 i, IP_VS_DBG_ADDR(dest->af, &dest->addr), in ip_vs_sh_reassign() 218 dest = rcu_dereference_protected(b->dest, 1); in ip_vs_sh_flush() [all …]
|
| A D | ip_vs_mh.c | 113 dest = rcu_dereference_protected(l->dest, 1); in ip_vs_mh_reset() 114 if (dest) { in ip_vs_mh_reset() 211 if (dest) in ip_vs_mh_populate() 242 return (!dest || is_unavailable(dest)) ? NULL : dest; in ip_vs_mh_get() 257 dest = rcu_dereference(s->lookup[ihash].dest); in ip_vs_mh_get_fallback() 258 if (!dest) in ip_vs_mh_get_fallback() 264 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port)); in ip_vs_mh_get_fallback() 273 dest = rcu_dereference(s->lookup[hash].dest); in ip_vs_mh_get_fallback() 274 if (!dest) in ip_vs_mh_get_fallback() 280 IP_VS_DBG_ADDR(dest->af, &dest->addr), in ip_vs_mh_get_fallback() [all …]
|
| A D | ip_vs_dh.c | 103 struct ip_vs_dest *dest; in ip_vs_dh_reassign() local 110 dest = rcu_dereference_protected(b->dest, 1); in ip_vs_dh_reassign() 111 if (dest) in ip_vs_dh_reassign() 112 ip_vs_dest_put(dest); in ip_vs_dh_reassign() 121 RCU_INIT_POINTER(b->dest, dest); in ip_vs_dh_reassign() 142 dest = rcu_dereference_protected(b->dest, 1); in ip_vs_dh_flush() 143 if (dest) { in ip_vs_dh_flush() 144 ip_vs_dest_put(dest); in ip_vs_dh_flush() 223 if (!dest in ip_vs_dh_schedule() 233 IP_VS_DBG_ADDR(dest->af, &dest->addr), in ip_vs_dh_schedule() [all …]
|
| A D | ip_vs_lblcr.c | 106 if (e->dest == dest) in ip_vs_dest_set_insert() 116 e->dest = dest; in ip_vs_dest_set_insert() 139 if (e->dest == dest) { in ip_vs_dest_set_erase() 184 dest = e->dest; in ip_vs_dest_set_min() 232 dest = e->dest; in ip_vs_dest_set_max() 238 most = dest; in ip_vs_dest_set_max() 678 if (dest && !is_overloaded(dest, svc)) in ip_vs_lblcr_schedule() 683 if (!dest) { in ip_vs_lblcr_schedule() 698 if (!dest) { in ip_vs_lblcr_schedule() 712 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port)); in ip_vs_lblcr_schedule() [all …]
|
| A D | ip_vs_wrr.c | 71 struct ip_vs_dest *dest; in ip_vs_wrr_gcd_weight() local 170 dest = mark->cl; in ip_vs_wrr_schedule() 174 last = dest; in ip_vs_wrr_schedule() 183 if (dest == stop) in ip_vs_wrr_schedule() 214 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), in ip_vs_wrr_schedule() 218 mark->cl = dest; in ip_vs_wrr_schedule() 222 return dest; in ip_vs_wrr_schedule() 225 mark->cl = dest; in ip_vs_wrr_schedule() 226 dest = NULL; in ip_vs_wrr_schedule() 231 mark->cl = dest; in ip_vs_wrr_schedule() [all …]
|
| A D | ip_vs_ctl.c | 592 hash = ip_vs_rs_hashkey(dest->af, &dest->addr, port); in ip_vs_rs_hash() 624 (dest->protocol == protocol || dest->vfwmark) && in ip_vs_has_real_service() 655 (dest->protocol == protocol || dest->vfwmark) && in ip_vs_find_real_service() 743 if (!dest) in ip_vs_find_dest() 745 return dest; in ip_vs_find_dest() 795 IP_VS_DBG_ADDR(dest->af, &dest->addr), in ip_vs_trash_get_dest() 812 dest = NULL; in ip_vs_trash_get_dest() 817 return dest; in ip_vs_trash_get_dest() 1254 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), in __ip_vs_del_dest() 1353 IP_VS_DBG_ADDR(dest->af, &dest->addr), in ip_vs_dest_trash_expire() [all …]
|
| A D | ip_vs_rr.c | 42 if (p == &dest->n_list) in ip_vs_rr_del_dest() 57 struct ip_vs_dest *dest, *last; in ip_vs_rr_schedule() local 67 list_for_each_entry_continue_rcu(dest, in ip_vs_rr_schedule() 70 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && in ip_vs_rr_schedule() 71 atomic_read(&dest->weight) > 0) in ip_vs_rr_schedule() 74 if (dest == last) in ip_vs_rr_schedule() 89 svc->sched_data = &dest->n_list; in ip_vs_rr_schedule() 93 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), in ip_vs_rr_schedule() 94 atomic_read(&dest->activeconns), in ip_vs_rr_schedule() 95 refcount_read(&dest->refcnt), atomic_read(&dest->weight)); in ip_vs_rr_schedule() [all …]
|
| A D | ip_vs_lblc.c | 204 if (en->dest == dest) in ip_vs_lblc_new() 216 ip_vs_dest_hold(dest); in ip_vs_lblc_new() 217 en->dest = dest; in ip_vs_lblc_new() 421 least = dest; in __ip_vs_lblc_schedule() 439 least = dest; in __ip_vs_lblc_schedule() 463 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) { in is_overloaded() 505 dest = en->dest; in ip_vs_lblc_schedule() 507 atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc)) in ip_vs_lblc_schedule() 513 if (!dest) { in ip_vs_lblc_schedule() 527 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port)); in ip_vs_lblc_schedule() [all …]
|
| A D | ip_vs_sed.c | 43 ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) in ip_vs_sed_dest_overhead() argument 49 return atomic_read(&dest->activeconns) + 1; in ip_vs_sed_dest_overhead() 60 struct ip_vs_dest *dest, *least; in ip_vs_sed_schedule() local 78 list_for_each_entry_rcu(dest, &svc->destinations, n_list) { in ip_vs_sed_schedule() 79 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && in ip_vs_sed_schedule() 80 atomic_read(&dest->weight) > 0) { in ip_vs_sed_schedule() 81 least = dest; in ip_vs_sed_schedule() 94 if (dest->flags & IP_VS_DEST_F_OVERLOAD) in ip_vs_sed_schedule() 96 doh = ip_vs_sed_dest_overhead(dest); in ip_vs_sed_schedule() 97 if ((__s64)loh * atomic_read(&dest->weight) > in ip_vs_sed_schedule() [all …]
|
| A D | ip_vs_twos.c | 48 struct ip_vs_dest *dest, *choice1 = NULL, *choice2 = NULL; in ip_vs_twos_schedule() local 55 list_for_each_entry_rcu(dest, &svc->destinations, n_list) { in ip_vs_twos_schedule() 56 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD)) { in ip_vs_twos_schedule() 57 weight = atomic_read(&dest->weight); in ip_vs_twos_schedule() 60 choice1 = dest; in ip_vs_twos_schedule() 79 if (dest->flags & IP_VS_DEST_F_OVERLOAD) in ip_vs_twos_schedule() 82 weight = atomic_read(&dest->weight); in ip_vs_twos_schedule() 90 choice1 = dest; in ip_vs_twos_schedule() 92 overhead1 = ip_vs_dest_conn_overhead(dest); in ip_vs_twos_schedule() 96 choice2 = dest; in ip_vs_twos_schedule() [all …]
|
| A D | ip_vs_conn.c | 590 if (!dest) in ip_vs_bind_dest() 612 cp->dest = dest; in ip_vs_bind_dest() 642 ip_vs_dest_totalconns(dest) >= dest->u_threshold) in ip_vs_bind_dest() 665 if (dest) { in ip_vs_try_bind_dest() 706 struct ip_vs_dest *dest = cp->dest; in ip_vs_unbind_dest() local 708 if (!dest) in ip_vs_unbind_dest() 738 if (ip_vs_dest_totalconns(dest) < dest->l_threshold) in ip_vs_unbind_dest() 741 if (ip_vs_dest_totalconns(dest) * 4 < dest->u_threshold * 3) in ip_vs_unbind_dest() 769 struct ip_vs_dest *dest = ct->dest; in ip_vs_check_template() local 1424 dest = cp->dest; in ip_vs_expire_nodest_conn_flush() [all …]
|
| A D | ip_vs_nq.c | 39 ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) in ip_vs_nq_dest_overhead() argument 45 return atomic_read(&dest->activeconns) + 1; in ip_vs_nq_dest_overhead() 56 struct ip_vs_dest *dest, *least = NULL; in ip_vs_nq_schedule() local 74 list_for_each_entry_rcu(dest, &svc->destinations, n_list) { in ip_vs_nq_schedule() 76 if (dest->flags & IP_VS_DEST_F_OVERLOAD || in ip_vs_nq_schedule() 77 !atomic_read(&dest->weight)) in ip_vs_nq_schedule() 80 doh = ip_vs_nq_dest_overhead(dest); in ip_vs_nq_schedule() 83 if (atomic_read(&dest->activeconns) == 0) { in ip_vs_nq_schedule() 84 least = dest; in ip_vs_nq_schedule() 90 ((__s64)loh * atomic_read(&dest->weight) > in ip_vs_nq_schedule() [all …]
|
| A D | ip_vs_wlc.c | 32 struct ip_vs_dest *dest, *least; in ip_vs_wlc_schedule() local 50 list_for_each_entry_rcu(dest, &svc->destinations, n_list) { in ip_vs_wlc_schedule() 51 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && in ip_vs_wlc_schedule() 52 atomic_read(&dest->weight) > 0) { in ip_vs_wlc_schedule() 53 least = dest; in ip_vs_wlc_schedule() 65 list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) { in ip_vs_wlc_schedule() 66 if (dest->flags & IP_VS_DEST_F_OVERLOAD) in ip_vs_wlc_schedule() 68 doh = ip_vs_dest_conn_overhead(dest); in ip_vs_wlc_schedule() 69 if ((__s64)loh * atomic_read(&dest->weight) > in ip_vs_wlc_schedule() 71 least = dest; in ip_vs_wlc_schedule()
|
| A D | ip_vs_xmit.c | 301 struct ip_vs_dest *dest, in __ip_vs_get_out_rt() argument 311 if (dest) { in __ip_vs_get_out_rt() 373 if (!dest) in __ip_vs_get_out_rt() 377 if ((dest->tun_flags & in __ip_vs_get_out_rt() 459 struct ip_vs_dest *dest, in __ip_vs_get_out_rt_v6() argument 470 if (dest) { in __ip_vs_get_out_rt_v6() 536 if (!dest) in __ip_vs_get_out_rt_v6() 540 if ((dest->tun_flags & in __ip_vs_get_out_rt_v6() 1057 dport = cp->dest->tun_port; in ipvs_gue_encap() 1058 udph->dest = dport; in ipvs_gue_encap() [all …]
|
| A D | ip_vs_core.c | 124 struct ip_vs_dest *dest = cp->dest; in ip_vs_in_stats() local 127 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { in ip_vs_in_stats() 160 struct ip_vs_dest *dest = cp->dest; in ip_vs_out_stats() local 163 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { in ip_vs_out_stats() 382 dest = ct->dest; in ip_vs_sched_persist() 400 cp = ip_vs_conn_new(¶m, dest->af, &dest->addr, dport, flags, dest, in ip_vs_sched_persist() 549 cp = ip_vs_conn_new(&p, dest->af, &dest->addr, in ip_vs_schedule() 550 dest->port ? dest->port : vport, in ip_vs_schedule() 1516 if (!dest) in ipvs_udp_decap() 1550 if (!dest) in ipvs_gre_decap() [all …]
|
| A D | ip_vs_fo.c | 24 struct ip_vs_dest *dest, *hweight = NULL; in ip_vs_fo_schedule() local 32 list_for_each_entry_rcu(dest, &svc->destinations, n_list) { in ip_vs_fo_schedule() 33 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && in ip_vs_fo_schedule() 34 atomic_read(&dest->weight) > hw) { in ip_vs_fo_schedule() 35 hweight = dest; in ip_vs_fo_schedule() 36 hw = atomic_read(&dest->weight); in ip_vs_fo_schedule()
|
| A D | ip_vs_lc.c | 27 struct ip_vs_dest *dest, *least = NULL; in ip_vs_lc_schedule() local 41 list_for_each_entry_rcu(dest, &svc->destinations, n_list) { in ip_vs_lc_schedule() 42 if ((dest->flags & IP_VS_DEST_F_OVERLOAD) || in ip_vs_lc_schedule() 43 atomic_read(&dest->weight) == 0) in ip_vs_lc_schedule() 45 doh = ip_vs_dest_conn_overhead(dest); in ip_vs_lc_schedule() 47 least = dest; in ip_vs_lc_schedule()
|
| A D | ip_vs_ovf.c | 28 struct ip_vs_dest *dest, *h = NULL; in ip_vs_ovf_schedule() local 35 list_for_each_entry_rcu(dest, &svc->destinations, n_list) { in ip_vs_ovf_schedule() 36 w = atomic_read(&dest->weight); in ip_vs_ovf_schedule() 37 if ((dest->flags & IP_VS_DEST_F_OVERLOAD) || in ip_vs_ovf_schedule() 38 atomic_read(&dest->activeconns) > w || in ip_vs_ovf_schedule() 42 h = dest; in ip_vs_ovf_schedule()
|
| /net/netfilter/ |
| A D | nft_meta.c | 62 u32 *dest) in nft_meta_get_eval_time() argument 81 u32 *dest) in nft_meta_get_eval_pkttype_lo() argument 131 u32 *dest, in nft_meta_get_eval_skugid() argument 179 u32 *dest, in nft_meta_get_eval_kind() argument 226 *dest = dev->group; in nft_meta_store_ifgroup() 282 *dest = dst->tclassid; in nft_meta_get_eval_rtclassid() 319 *dest = skb->len; in nft_meta_get_eval() 333 *dest = skb->priority; in nft_meta_get_eval() 336 *dest = skb->mark; in nft_meta_get_eval() 362 *dest = skb->secmark; in nft_meta_get_eval() [all …]
|
| A D | nft_ct_fast.c | 12 u32 *dest = ®s->data[priv->dreg]; in nft_ct_get_fast_eval() local 27 *dest = state; in nft_ct_get_fast_eval() 40 nft_reg_store8(dest, CTINFO2DIR(ctinfo)); in nft_ct_get_fast_eval() 43 *dest = ct->status; in nft_ct_get_fast_eval() 47 *dest = ct->mark; in nft_ct_get_fast_eval() 52 *dest = ct->secmark; in nft_ct_get_fast_eval()
|
| /net/6lowpan/ |
| A D | nhc_udp.c | 49 fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest)); in udp_uncompress() 54 uh.dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT); in udp_uncompress() 59 fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest)); in udp_uncompress() 64 uh.dest = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val & 0x0f)); in udp_uncompress() 71 ntohs(uh.source), ntohs(uh.dest)); in udp_uncompress() 122 ((ntohs(uh->dest) & LOWPAN_NHC_UDP_4BIT_MASK) == in udp_compress() 129 tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_4BIT_PORT + in udp_compress() 132 } else if ((ntohs(uh->dest) & LOWPAN_NHC_UDP_8BIT_MASK) == in udp_compress() 141 tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_8BIT_PORT; in udp_compress() 153 lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest)); in udp_compress() [all …]
|
| /net/mac802154/ |
| A D | scan.c | 539 frame.mhr.dest.mode = coord->mode; in mac802154_perform_association() 540 frame.mhr.dest.pan_id = coord->pan_id; in mac802154_perform_association() 544 frame.mhr.dest.short_addr = coord->short_addr; in mac802154_perform_association() 613 struct ieee802154_addr *dest = &mac_cb(skb)->dest; in mac802154_process_association_resp() local 666 frame.mhr.dest.mode = target->mode; in mac802154_send_disassociation_notif() 667 frame.mhr.dest.pan_id = wpan_dev->pan_id; in mac802154_send_disassociation_notif() 725 frame.mhr.dest.mode = IEEE802154_ADDR_LONG; in mac802154_send_association_resp_locked() 726 frame.mhr.dest.pan_id = wpan_dev->pan_id; in mac802154_send_association_resp_locked() 762 struct ieee802154_addr *dest = &mac_cb(skb)->dest; in mac802154_process_association_req() local 868 struct ieee802154_addr *dest = &mac_cb(skb)->dest; in mac802154_process_disassociation_notif() local [all …]
|
| /net/batman-adv/ |
| A D | multicast_forw.c | 664 eth_zero_addr(dest); in batadv_mcast_forw_scrub_dests() 665 dest += ETH_ALEN; in batadv_mcast_forw_scrub_dests() 675 eth_zero_addr(dest); in batadv_mcast_forw_scrub_dests() 695 eth_zero_addr(dest); in batadv_mcast_forw_scrub_dests() 936 u8 *dest, *next_dest; in batadv_mcast_forw_packet() local 963 if (is_zero_ether_addr(dest)) in batadv_mcast_forw_packet() 967 if (is_multicast_ether_addr(dest)) { in batadv_mcast_forw_packet() 968 eth_zero_addr(dest); in batadv_mcast_forw_packet() 973 eth_zero_addr(dest); in batadv_mcast_forw_packet() 980 eth_zero_addr(dest); in batadv_mcast_forw_packet() [all …]
|
| /net/ax25/ |
| A D | ax25_in.c | 186 ax25_address src, dest, *next_digi = NULL; in ax25_rcv() local 221 if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi) in ax25_rcv() 232 ax25_send_to_raw(&dest, skb, skb->data[1]); in ax25_rcv() 234 if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0) in ax25_rcv() 260 sk = ax25_get_socket(&dest, &src, SOCK_DGRAM); in ax25_rcv() 303 if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) { in ax25_rcv() 328 ax25_return_dm(dev, &src, &dest, &dp); in ax25_rcv() 336 sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET); in ax25_rcv() 345 ax25_return_dm(dev, &src, &dest, &dp); in ax25_rcv() 366 ax25_return_dm(dev, &src, &dest, &dp); in ax25_rcv() [all …]
|
| /net/sctp/ |
| A D | bind_addr.c | 43 int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest, in sctp_bind_addr_copy() argument 52 dest->port = src->port; in sctp_bind_addr_copy() 56 error = sctp_copy_one_addr(net, dest, &addr->a, scope, in sctp_bind_addr_copy() 68 error = sctp_copy_one_addr(net, dest, &addr->a, in sctp_bind_addr_copy() 79 if (list_empty(&dest->address_list)) in sctp_bind_addr_copy() 84 sctp_bind_addr_clean(dest); in sctp_bind_addr_copy() 94 int sctp_bind_addr_dup(struct sctp_bind_addr *dest, in sctp_bind_addr_dup() argument 102 dest->port = src->port; in sctp_bind_addr_dup() 105 error = sctp_add_bind_addr(dest, &addr->a, sizeof(addr->a), in sctp_bind_addr_dup() 466 error = sctp_copy_local_addr_list(net, dest, scope, gfp, flags); in sctp_copy_one_addr() [all …]
|