| /net/ipv4/ |
| A D | inet_fragment.c | 471 if (!fp) { in inet_frag_reasm_prepare() 561 struct sk_buff *fp; in inet_frag_reasm_finish() local 572 while (rbn || fp) { in inet_frag_reasm_finish() 577 while (fp) { in inet_frag_reasm_finish() 592 fp->prev = NULL; in inet_frag_reasm_finish() 593 memset(&fp->rbnode, 0, sizeof(fp->rbnode)); in inet_frag_reasm_finish() 594 fp->sk = NULL; in inet_frag_reasm_finish() 600 *nextp = fp; in inet_frag_reasm_finish() 601 nextp = &fp->next; in inet_frag_reasm_finish() 604 fp = next_frag; in inet_frag_reasm_finish() [all …]
|
| /net/sched/ |
| A D | cls_route.c | 318 struct route4_filter __rcu **fp; in route4_delete() local 332 fp = &nf->next, nf = rtnl_dereference(*fp)) { in route4_delete() 390 struct route4_filter *fp; in route4_set_parms() local 450 fp; in route4_set_parms() 451 fp = rtnl_dereference(fp->next)) in route4_set_parms() 452 if (fp->handle == f->handle) in route4_set_parms() 534 fp = &f->bkt->ht[h]; in route4_change() 537 fp = &f1->next) in route4_change() 543 rcu_assign_pointer(*fp, f); in route4_change() 550 fp = &b->ht[h]; in route4_change() [all …]
|
| A D | cls_fw.c | 161 struct fw_filter __rcu **fp; in fw_delete() local 169 fp = &head->ht[fw_hash(f->id)]; in fw_delete() 171 for (pfp = rtnl_dereference(*fp); pfp; in fw_delete() 172 fp = &pfp->next, pfp = rtnl_dereference(*fp)) { in fw_delete() 174 RCU_INIT_POINTER(*fp, rtnl_dereference(f->next)); in fw_delete() 260 struct fw_filter __rcu **fp; in fw_change() local 287 fp = &head->ht[fw_hash(fnew->id)]; in fw_change() 288 for (pfp = rtnl_dereference(*fp); pfp; in fw_change() 289 fp = &pfp->next, pfp = rtnl_dereference(*fp)) in fw_change() 294 rcu_assign_pointer(*fp, fnew); in fw_change()
|
| A D | act_bpf.c | 191 struct bpf_prog *fp; in tcf_bpf_init_from_ops() local 210 ret = bpf_prog_create(&fp, &fprog_tmp); in tcf_bpf_init_from_ops() 218 cfg->filter = fp; in tcf_bpf_init_from_ops() 226 struct bpf_prog *fp; in tcf_bpf_init_from_efd() local 232 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_ACT); in tcf_bpf_init_from_efd() 233 if (IS_ERR(fp)) in tcf_bpf_init_from_efd() 234 return PTR_ERR(fp); in tcf_bpf_init_from_efd() 239 bpf_prog_put(fp); in tcf_bpf_init_from_efd() 245 cfg->filter = fp; in tcf_bpf_init_from_efd()
|
| A D | sch_mqprio.c | 31 u32 fp[TC_QOPT_MAX_QUEUE]; member 68 mqprio_fp_to_offload(priv->fp, &mqprio); in mqprio_enable_offload() 169 static int mqprio_parse_tc_entry(u32 fp[TC_QOPT_MAX_QUEUE], in mqprio_parse_tc_entry() 197 fp[tc] = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_FP]); in mqprio_parse_tc_entry() 210 u32 fp[TC_QOPT_MAX_QUEUE]; in mqprio_parse_tc_entries() local 216 fp[tc] = priv->fp[tc]; in mqprio_parse_tc_entries() 220 err = mqprio_parse_tc_entry(fp, n, &seen_tcs, extack); in mqprio_parse_tc_entries() 226 priv->fp[tc] = fp[tc]; in mqprio_parse_tc_entries() 227 if (fp[tc] == TC_FP_PREEMPTIBLE) in mqprio_parse_tc_entries() 374 priv->fp[tc] = TC_FP_EXPRESS; in mqprio_init() [all …]
|
| A D | cls_bpf.c | 343 struct bpf_prog *fp; in cls_bpf_prog_from_ops() local 362 ret = bpf_prog_create(&fp, &fprog_tmp); in cls_bpf_prog_from_ops() 371 prog->filter = fp; in cls_bpf_prog_from_ops() 379 struct bpf_prog *fp; in cls_bpf_prog_from_efd() local 387 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw); in cls_bpf_prog_from_efd() 388 if (IS_ERR(fp)) in cls_bpf_prog_from_efd() 389 return PTR_ERR(fp); in cls_bpf_prog_from_efd() 394 bpf_prog_put(fp); in cls_bpf_prog_from_efd() 401 prog->filter = fp; in cls_bpf_prog_from_efd() 403 if (fp->dst_needed) in cls_bpf_prog_from_efd()
|
| A D | sch_mqprio_lib.c | 117 void mqprio_fp_to_offload(u32 fp[TC_QOPT_MAX_QUEUE], in mqprio_fp_to_offload() 124 if (fp[tc] == TC_FP_PREEMPTIBLE) in mqprio_fp_to_offload()
|
| A D | sch_taprio.c | 110 u32 fp[TC_QOPT_MAX_QUEUE]; /* only for dump and offloading */ member 1554 mqprio_fp_to_offload(q->fp, &offload->mqprio); in taprio_enable_offload() 1702 u32 fp[TC_QOPT_MAX_QUEUE], in taprio_parse_tc_entry() 1741 fp[tc] = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_FP]); in taprio_parse_tc_entry() 1755 u32 fp[TC_QOPT_MAX_QUEUE]; in taprio_parse_tc_entries() local 1762 fp[tc] = q->fp[tc]; in taprio_parse_tc_entries() 1766 err = taprio_parse_tc_entry(sch, n, max_sdu, fp, &seen_tcs, in taprio_parse_tc_entries() 1774 q->fp[tc] = fp[tc]; in taprio_parse_tc_entries() 1775 if (fp[tc] != TC_FP_EXPRESS) in taprio_parse_tc_entries() 2121 q->fp[tc] = TC_FP_EXPRESS; in taprio_init() [all …]
|
| A D | sch_mqprio_lib.h | 17 void mqprio_fp_to_offload(u32 fp[TC_QOPT_MAX_QUEUE],
|
| /net/core/ |
| A D | scm.c | 101 fpp = &fpl->fp[fpl->count]; in scm_fp_copy() 141 scm->fp = NULL; in __scm_destroy() 143 fput(fpl->fp[i]); in __scm_destroy() 194 err=scm_fp_copy(cmsg, &p->fp); in __scm_send() 242 if (p->fp && !p->fp->count) in __scm_send() 244 kfree(p->fp); in __scm_send() 245 p->fp = NULL; in __scm_send() 373 err = scm_recv_one_fd(scm->fp->fp[i], cmsg_data + i, o_flags); in scm_detach_fds() 395 if (i < scm->fp->count || (scm->fp->count && fdmax <= 0)) in scm_detach_fds() 418 get_file(fpl->fp[i]); in scm_fp_dup() [all …]
|
| A D | filter.c | 611 fp = prog; in bpf_convert_filter() 1324 fp = bpf_prog_select_runtime(fp, &err); in bpf_migrate_filter() 1346 err = bpf_check_classic(fp->insns, fp->len); in bpf_prepare_filter() 1356 err = trans(fp->insns, fp->len); in bpf_prepare_filter() 1372 fp = bpf_migrate_filter(fp); in bpf_prepare_filter() 1397 if (!fp) in bpf_prog_create() 1412 fp = bpf_prepare_filter(fp, NULL); in bpf_prog_create() 1444 if (!fp) in bpf_prog_create_from_user() 1466 fp = bpf_prepare_filter(fp, trans); in bpf_prog_create_from_user() 1485 fp = kmalloc(sizeof(*fp), GFP_KERNEL); in __sk_attach_prog() [all …]
|
| A D | dev.c | 4321 const struct bpf_mprog_fp *fp; in tcx_run() local 4327 bpf_mprog_foreach_prog(entry, fp, prog) { in tcx_run()
|
| /net/ipv6/ |
| A D | exthdrs_core.c | 89 __be16 _frag_off, *fp; in ipv6_skip_exthdr() local 90 fp = skb_header_pointer(skb, in ipv6_skip_exthdr() 95 if (!fp) in ipv6_skip_exthdr() 98 *frag_offp = *fp; in ipv6_skip_exthdr() 238 __be16 *fp; in ipv6_find_hdr() local 242 fp = skb_header_pointer(skb, in ipv6_find_hdr() 247 if (!fp) in ipv6_find_hdr() 250 _frag_off = ntohs(*fp) & ~0x7; in ipv6_find_hdr()
|
| /net/bridge/netfilter/ |
| A D | ebt_vlan.c | 40 const struct vlan_hdr *fp; in ebt_vlan_mt() local 43 fp = skb_header_pointer(skb, 0, sizeof(_frame), &_frame); in ebt_vlan_mt() 44 if (fp == NULL) in ebt_vlan_mt() 47 TCI = ntohs(fp->h_vlan_TCI); in ebt_vlan_mt() 48 encap = fp->h_vlan_encapsulated_proto; in ebt_vlan_mt()
|
| /net/unix/ |
| A D | af_unix.c | 1967 UNIXCB(skb).fp = scm->fp; in unix_attach_fds() 1968 scm->fp = NULL; in unix_attach_fds() 1978 scm->fp = UNIXCB(skb).fp; in unix_detach_fds() 1986 scm->fp = scm_fp_dup(UNIXCB(skb).fp); in unix_peek_fds() 1995 if (UNIXCB(skb).fp) in unix_destruct_scm() 2073 struct scm_fp_list *fp = UNIXCB(skb).fp; in scm_stat_add() local 2076 if (unlikely(fp && fp->count)) { in scm_stat_add() 2084 struct scm_fp_list *fp = UNIXCB(skb).fp; in scm_stat_del() local 2087 if (unlikely(fp && fp->count)) { in scm_stat_del() 2089 unix_del_edges(fp); in scm_stat_del() [all …]
|
| A D | garbage.c | 209 struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]); in unix_add_edges() 581 if (UNIXCB(skb).fp) in __unix_gc() 582 UNIXCB(skb).fp->dead = true; in __unix_gc()
|
| A D | af_unix.h | 17 struct scm_fp_list *fp; /* Passed files */ member
|
| /net/ |
| A D | compat.c | 295 int fdmax = min_t(int, scm_max_fds_compat(msg), scm->fp->count); in scm_detach_fds_compat() 300 err = scm_recv_one_fd(scm->fp->fp[i], cmsg_data + i, o_flags); in scm_detach_fds_compat() 322 if (i < scm->fp->count || (scm->fp->count && fdmax <= 0)) in scm_detach_fds_compat()
|
| /net/802/ |
| A D | hippi.c | 58 hip->fp.fixed = htonl(0x04800018); in hippi_header() 59 hip->fp.d2_size = htonl(len + 8); in hippi_header()
|
| /net/6lowpan/ |
| A D | debugfs.c | 123 static ssize_t lowpan_ctx_pfx_write(struct file *fp, in lowpan_ctx_pfx_write() argument 128 struct seq_file *file = fp->private_data; in lowpan_ctx_pfx_write()
|
| /net/bluetooth/ |
| A D | 6lowpan.c | 1088 static ssize_t lowpan_control_write(struct file *fp, in lowpan_control_write() argument
|
| /net/openvswitch/ |
| A D | flow_netlink.c | 463 static bool is_all_zero(const u8 *fp, size_t size) in is_all_zero() argument 467 if (!fp) in is_all_zero() 471 if (fp[i]) in is_all_zero()
|