| /net/l2tp/ |
| A D | l2tp_debugfs.c | 49 pd->tunnel = l2tp_tunnel_get_next(pd->net, &pd->tkey); in l2tp_dfs_next_tunnel() 50 pd->tkey++; in l2tp_dfs_next_tunnel() 59 pd->session = l2tp_session_get_next(pd->net, pd->tunnel->sock, in l2tp_dfs_next_session() 61 pd->tunnel->tunnel_id, &pd->skey); in l2tp_dfs_next_session() 90 if (!pd->tunnel && !pd->session) in l2tp_dfs_seq_start() 94 return pd; in l2tp_dfs_seq_start() 107 if (!pd || pd == SEQ_START_TOKEN) in l2tp_dfs_seq_stop() 272 pd = kzalloc(sizeof(*pd), GFP_KERNEL); in l2tp_dfs_seq_open() 273 if (!pd) in l2tp_dfs_seq_open() 296 put_net_track(pd->net, &pd->ns_tracker); in l2tp_dfs_seq_open() [all …]
|
| A D | l2tp_ppp.c | 1414 pd->tunnel = l2tp_tunnel_get_next(net, &pd->tkey); in pppol2tp_next_tunnel() 1415 pd->tkey++; in pppol2tp_next_tunnel() 1418 if (!pd->tunnel || pd->tunnel->version == 2) in pppol2tp_next_tunnel() 1431 pd->session = l2tp_session_get_next(net, pd->tunnel->sock, in pppol2tp_next_session() 1433 pd->tunnel->tunnel_id, &pd->skey); in pppol2tp_next_session() 1434 pd->skey++; in pppol2tp_next_session() 1452 pd = NULL; in pppol2tp_seq_start() 1465 if (!pd->tunnel && !pd->session) in pppol2tp_seq_start() 1466 pd = NULL; in pppol2tp_seq_start() 1469 return pd; in pppol2tp_seq_start() [all …]
|
| /net/netfilter/ipvs/ |
| A D | ip_vs_proto.c | 69 struct ip_vs_proto_data *pd = in register_ip_vs_proto_netns() local 72 if (!pd) in register_ip_vs_proto_netns() 85 kfree(pd); in register_ip_vs_proto_netns() 125 if (*pd_p == pd) { in unregister_ip_vs_proto_netns() 126 *pd_p = pd->next; in unregister_ip_vs_proto_netns() 128 pd->pp->exit_netns(ipvs, pd); in unregister_ip_vs_proto_netns() 129 kfree(pd); in unregister_ip_vs_proto_netns() 163 for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) { in ip_vs_proto_data_get() 165 return pd; in ip_vs_proto_data_get() 181 for (pd = ipvs->proto_data_table[i]; pd; pd = pd->next) { in ip_vs_protocol_timeout_change() [all …]
|
| A D | ip_vs_proto_tcp.c | 36 struct ip_vs_proto_data *pd, in tcp_conn_schedule() argument 544 pd->pp->name, in set_tcp_state() 578 if (likely(pd)) in set_tcp_state() 590 struct ip_vs_proto_data *pd) in tcp_state_transition() argument 605 set_tcp_state(pd, cp, direction, th); in tcp_state_transition() 633 atomic_inc(&pd->appcnt); in tcp_register_app() 645 atomic_dec(&pd->appcnt); in tcp_unregister_app() 699 cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN] in ip_vs_tcp_conn_listen() 713 if (!pd->timeout_table) in __ip_vs_tcp_init() 715 pd->tcp_state_table = tcp_states; in __ip_vs_tcp_init() [all …]
|
| A D | ip_vs_proto_udp.c | 32 struct ip_vs_proto_data *pd, in udp_conn_schedule() argument 78 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); in udp_conn_schedule() 81 *verdict = ip_vs_leave(svc, skb, pd, iph); in udp_conn_schedule() 378 atomic_inc(&pd->appcnt); in udp_register_app() 390 atomic_dec(&pd->appcnt); in udp_unregister_app() 454 struct ip_vs_proto_data *pd) in udp_state_transition() argument 456 if (unlikely(!pd)) { in udp_state_transition() 461 cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL]; in udp_state_transition() 469 pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts, in __udp_init() 471 if (!pd->timeout_table) in __udp_init() [all …]
|
| A D | ip_vs_proto_sctp.c | 17 struct ip_vs_proto_data *pd, in sctp_conn_schedule() argument 70 *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); in sctp_conn_schedule() 73 *verdict = ip_vs_leave(svc, skb, pd, iph); in sctp_conn_schedule() 443 pd->pp->name, in set_sctp_state() 469 if (likely(pd)) in set_sctp_state() 477 const struct sk_buff *skb, struct ip_vs_proto_data *pd) in sctp_state_transition() argument 480 set_sctp_state(pd, cp, direction, skb); in sctp_state_transition() 507 atomic_inc(&pd->appcnt); in sctp_register_app() 517 atomic_dec(&pd->appcnt); in sctp_unregister_app() 566 if (!pd->timeout_table) in __ip_vs_sctp_init() [all …]
|
| A D | ip_vs_core.c | 223 struct ip_vs_proto_data *pd) in ip_vs_set_state() argument 226 pd->pp->state_transition(cp, direction, skb, pd); in ip_vs_set_state() 1335 struct ip_vs_proto_data *pd; in ip_vs_out_hook() local 1381 if (unlikely(!pd)) in ip_vs_out_hook() 1383 pp = pd->pp; in ip_vs_out_hook() 1689 if (!pd) in ip_vs_in_icmp() 1691 pp = pd->pp; in ip_vs_in_icmp() 1848 if (!pd) in ip_vs_in_icmp_v6() 1850 pp = pd->pp; in ip_vs_in_icmp_v6() 1980 if (unlikely(!pd)) { in ip_vs_in_hook() [all …]
|
| A D | ip_vs_conn.c | 666 struct ip_vs_proto_data *pd; in ip_vs_try_bind_dest() local 692 pd = ip_vs_proto_data_get(cp->ipvs, cp->protocol); in ip_vs_try_bind_dest() 693 if (pd && atomic_read(&pd->appcnt)) in ip_vs_try_bind_dest() 694 ip_vs_bind_app(cp, pd->pp); in ip_vs_try_bind_dest() 947 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->ipvs, in ip_vs_conn_new() local 1024 if (unlikely(pd && atomic_read(&pd->appcnt))) in ip_vs_conn_new() 1025 ip_vs_bind_app(cp, pd->pp); in ip_vs_conn_new()
|
| A D | ip_vs_proto_ah_esp.c | 104 struct ip_vs_proto_data *pd, in ah_esp_conn_schedule() argument
|
| A D | ip_vs_ctl.c | 2578 struct ip_vs_proto_data *pd; in ip_vs_set_timeout() local 2600 pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); in ip_vs_set_timeout() 2601 pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] in ip_vs_set_timeout() 2606 pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); in ip_vs_set_timeout() 2607 pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] in ip_vs_set_timeout() 2614 pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP); in ip_vs_set_timeout() 2615 pd->timeout_table[IP_VS_UDP_S_NORMAL] in ip_vs_set_timeout() 2975 struct ip_vs_proto_data *pd; in __ip_vs_get_timeouts() local 2981 pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); in __ip_vs_get_timeouts() 2986 pd = ip_vs_proto_data_get(ipvs, IPPROTO_UDP); in __ip_vs_get_timeouts() [all …]
|
| A D | ip_vs_xmit.c | 1029 __be16 *pd; in ipvs_gue_encap() local 1039 pd = data; in ipvs_gue_encap() 1040 pd[0] = htons(csum_start); in ipvs_gue_encap() 1041 pd[1] = htons(csum_start + skb->csum_offset); in ipvs_gue_encap()
|
| A D | ip_vs_sync.c | 945 struct ip_vs_proto_data *pd; in ip_vs_proc_conn() local 947 pd = ip_vs_proto_data_get(ipvs, protocol); in ip_vs_proc_conn() 948 if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table) in ip_vs_proc_conn() 949 cp->timeout = pd->timeout_table[state]; in ip_vs_proc_conn()
|
| /net/9p/ |
| A D | trans_rdma.c | 77 struct ib_pd *pd; member 370 if (rdma->pd && !IS_ERR(rdma->pd)) in rdma_destroy_trans() 371 ib_dealloc_pd(rdma->pd); in rdma_destroy_trans() 400 sge.lkey = rdma->pd->local_dma_lkey; in post_recv() 497 sge.lkey = rdma->pd->local_dma_lkey; in rdma_request() 704 rdma->pd = ib_alloc_pd(rdma->cm_id->device, 0); in rdma_create_trans() 705 if (IS_ERR(rdma->pd)) in rdma_create_trans() 720 err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr); in rdma_create_trans()
|
| /net/rds/ |
| A D | ib.c | 108 if (rds_ibdev->pd) in rds_ib_dev_free() 109 ib_dealloc_pd(rds_ibdev->pd); in rds_ib_dev_free() 186 rds_ibdev->pd = ib_alloc_pd(device, 0); in rds_ib_add_one() 187 if (IS_ERR(rds_ibdev->pd)) { in rds_ib_add_one() 188 ret = PTR_ERR(rds_ibdev->pd); in rds_ib_add_one() 189 rds_ibdev->pd = NULL; in rds_ib_add_one()
|
| A D | ib_rdma.c | 576 ib_mr = ib_reg_user_mr(rds_ibdev->pd, start, length, virt_addr, in rds_ib_get_mr() 601 ib_advise_mr(rds_ibdev->pd, in rds_ib_get_mr()
|
| A D | ib.h | 248 struct ib_pd *pd; member
|
| A D | ib_frmr.c | 78 frmr->mr = ib_alloc_mr(rds_ibdev->pd, IB_MR_TYPE_MEM_REG, in rds_ib_alloc_frmr()
|
| A D | ib_cm.c | 540 ic->i_pd = rds_ibdev->pd; in rds_ib_setup_qp()
|
| /net/sctp/ |
| A D | ulpevent.c | 777 struct sctp_pdapi_event *pd; in sctp_ulpevent_make_pdapi() local 786 pd = skb_put(skb, sizeof(struct sctp_pdapi_event)); in sctp_ulpevent_make_pdapi() 794 pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; in sctp_ulpevent_make_pdapi() 795 pd->pdapi_flags = flags; in sctp_ulpevent_make_pdapi() 796 pd->pdapi_stream = sid; in sctp_ulpevent_make_pdapi() 797 pd->pdapi_seq = seq; in sctp_ulpevent_make_pdapi() 805 pd->pdapi_length = sizeof(struct sctp_pdapi_event); in sctp_ulpevent_make_pdapi() 811 pd->pdapi_indication = indication; in sctp_ulpevent_make_pdapi() 818 pd->pdapi_assoc_id = sctp_assoc2id(asoc); in sctp_ulpevent_make_pdapi()
|
| /net/ipv4/ |
| A D | fou_core.c | 94 __be16 *pd = data; in gue_remcsum() local 95 size_t start = ntohs(pd[0]); in gue_remcsum() 96 size_t offset = ntohs(pd[1]); in gue_remcsum() 304 __be16 *pd = data; in gue_gro_remcsum() local 305 size_t start = ntohs(pd[0]); in gue_gro_remcsum() 306 size_t offset = ntohs(pd[1]); in gue_gro_remcsum() 1009 __be16 *pd = data; in __gue_build_header() local 1015 pd[0] = htons(csum_start); in __gue_build_header() 1016 pd[1] = htons(csum_start + skb->csum_offset); in __gue_build_header()
|
| /net/ethtool/ |
| A D | Makefile | 11 module.o cmis_fw_update.o cmis_cdb.o pse-pd.o plca.o mm.o \
|
| /net/smc/ |
| A D | smc_ib.h | 101 int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
|
| A D | smc_ib.c | 715 int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, in smc_ib_get_memory_region() argument 722 ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order); in smc_ib_get_memory_region()
|