Home
last modified time | relevance | path

Searched refs:flows (Results 1 – 19 of 19) sorted by relevance

/net/sched/
A Dsch_fq_codel.c164 flow = &q->flows[idx]; in fq_codel_drop()
205 flow = &q->flows[idx]; in fq_codel_enqueue()
342 struct fq_codel_flow *flow = q->flows + i; in fq_codel_reset()
379 if (q->flows) in fq_codel_change()
464 kvfree(q->flows); in fq_codel_destroy()
496 if (!q->flows) { in fq_codel_init()
497 q->flows = kvcalloc(q->flows_cnt, in fq_codel_init()
500 if (!q->flows) { in fq_codel_init()
523 kvfree(q->flows); in fq_codel_init()
524 q->flows = NULL; in fq_codel_init()
[all …]
A Dsch_fq_pie.c58 struct fq_pie_flow *flows; member
152 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue()
308 if (q->flows) { in fq_pie_change()
401 &q->flows[q->flows_cursor].vars, in fq_pie_timer()
402 q->flows[q->flows_cursor].backlog); in fq_pie_timer()
449 q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow), in fq_pie_init()
451 if (!q->flows) { in fq_pie_init()
456 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_init()
541 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_reset()
559 kvfree(q->flows); in fq_pie_destroy()
A Dsch_cake.c151 struct cake_flow flows[CAKE_QUEUES]; member
787 q->flows[reduced_hash].set)) { in cake_hash()
805 if (!q->flows[outer_hash + k].set) { in cake_hash()
820 if (!q->flows[outer_hash + k].set) { in cake_hash()
862 q->flows[reduced_hash].srchost = srchost_idx; in cake_hash()
1575 flow = &b->flows[idx]; in cake_drop()
1764 flow = &b->flows[idx]; in cake_enqueue()
2082 q->cur_flow = flow - b->flows; in cake_dequeue()
2768 struct cake_flow *flow = b->flows + j; in cake_init()
3015 flow = &b->flows[idx % CAKE_QUEUES]; in cake_dump_class_stats()
[all …]
A Dsch_fq.c143 u32 flows; member
295 q->flows -= fcnt; in fq_gc()
327 if (q->flows != q->inactive_flows + q->throttled_flows) in fq_fastpath_check()
458 q->flows++; in fq_classify()
831 q->flows = 0; in fq_reset()
876 q->flows -= fcnt; in fq_rehash()
1301 st.flows = q->flows; in fq_dump_stats()
A Dsch_sfq.c689 if (ctl->flows) in sfq_change()
690 maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); in sfq_change()
842 opt.v0.flows = q->maxflows; in sfq_dump()
A DKconfig248 and Keep for responsive flows, CHOose and Kill for unresponsive
249 flows). This is a variation of RED which tries to penalize flows
/net/core/
A Dpktgen.c417 struct flow_state *flows; member
2381 pkt_dev->flows[flow].count = 0; in f_pick()
2382 pkt_dev->flows[flow].flags = 0; in f_pick()
2392 pkt_dev->flows[flow].count = 0; in f_pick()
2393 pkt_dev->flows[flow].flags = 0; in f_pick()
2428 pkt_dev->flows[flow].x = x; in get_ipsec_sa()
2661 pkt_dev->flows[flow].count++; in mod_cur_headers()
2748 pkt_dev->flows[i].x = NULL; in free_SAs()
3849 if (pkt_dev->flows == NULL) { in pktgen_add_device()
3910 vfree(pkt_dev->flows); in pktgen_add_device()
[all …]
A Dnet-sysfs.c1124 table->flows[count].cpu = RPS_NO_CPU; in store_rps_dev_flow_table_cnt()
A Ddev.c4872 rflow = &flow_table->flows[flow_id]; in set_rps_cpu()
4945 rflow = &flow_table->flows[rfs_slot(hash, flow_table)]; in get_rps_cpu()
5013 rflow = &flow_table->flows[flow_id]; in rps_may_expire_flow()
/net/mctp/test/
A Droute-test.c1012 struct mctp_flow *flows[2]; in mctp_test_fragment_flow() local
1036 flows[0] = skb_ext_find(tx_skbs[0], SKB_EXT_MCTP); in mctp_test_fragment_flow()
1037 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]); in mctp_test_fragment_flow()
1038 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]->key); in mctp_test_fragment_flow()
1039 KUNIT_ASSERT_PTR_EQ(test, flows[0]->key->sk, sock->sk); in mctp_test_fragment_flow()
1041 flows[1] = skb_ext_find(tx_skbs[1], SKB_EXT_MCTP); in mctp_test_fragment_flow()
1042 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[1]); in mctp_test_fragment_flow()
1043 KUNIT_ASSERT_PTR_EQ(test, flows[1]->key, flows[0]->key); in mctp_test_fragment_flow()
/net/ethtool/
A Drss.c755 struct nlattr *flows[ETHTOOL_A_FLOW_MAX + 1]; in ethnl_set_rss_fields() local
768 ret = nla_parse_nested(flows, ARRAY_SIZE(ethnl_rss_flows_policy) - 1, in ethnl_set_rss_fields()
779 if (!flows[i]) in ethnl_set_rss_fields()
782 fields.data = nla_get_u32(flows[i]); in ethnl_set_rss_fields()
787 NL_SET_ERR_MSG_ATTR(info->extack, flows[i], in ethnl_set_rss_fields()
/net/ipv4/
A DKconfig527 other Reno and H-TCP flows.
570 can coexist safely is when the CA flows have RTTs << CC flows RTTs.
637 - Low latency (short flows, queries),
659 o Coexist with flows that use loss-based congestion control.
677 coexist with flows that use loss-based congestion control, and can
/net/
A DKconfig377 backlog reaches netdev_max_backlog. If a few out of many active flows
379 maintain capacity for the other flows. This feature provides servers
/net/mac80211/
A Ddebugfs_netdev.c637 txqi->tin.flows, in ieee80211_if_fmt_aqm()
A Ddebugfs_sta.c167 txqi->tin.flows, in sta_aqm_read()
A Dcfg.c4785 txqstats->flows = txqi->tin.flows; in ieee80211_fill_txq_stats()
A Dtx.c1371 flow = &fq->flows[cvars - local->cvars]; in codel_dequeue_func()
1408 cvars = &local->cvars[flow - fq->flows]; in fq_tin_dequeue_func()
/net/netfilter/
A DKconfig392 policies to flows, instead of using the global timeout policy.
520 choose what flows are placed into the hardware.
/net/wireless/
A Dnl80211.c1302 PUT_TXQVAL_U32(FLOWS, flows); in nl80211_put_txq_stats()

Completed in 123 milliseconds