/linux-6.3-rc2/include/net/ |
A D | fq_impl.h | 36 idx = flow - fq->flows; in __fq_adjust_removal() 152 flow = &fq->flows[idx]; in fq_flow_classify() 160 tin->flows++; in fq_flow_classify() 173 struct fq_flow *cur = &fq->flows[i]; in fq_find_fattest_flow() 361 fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); in fq_init() 362 if (!fq->flows) in fq_init() 367 kvfree(fq->flows); in fq_init() 368 fq->flows = NULL; in fq_init() 373 fq_flow_init(&fq->flows[i]); in fq_init() 386 kvfree(fq->flows); in fq_reset() [all …]
|
A D | fq.h | 57 u32 flows; member 69 struct fq_flow *flows; member
|
/linux-6.3-rc2/drivers/crypto/allwinner/sun8i-ss/ |
A D | sun8i-ss-core.c | 75 ss->flows[flow].stat_req++; in sun8i_ss_run_task() 132 ss->flows[flow].status = 0; in sun8i_ss_run_task() 139 if (ss->flows[flow].status == 0) { in sun8i_ss_run_task() 158 ss->flows[flow].status = 1; in ss_irq_handler() 524 if (!ss->flows) in allocate_flows() 532 if (!ss->flows[i].biv) { in allocate_flows() 540 if (!ss->flows[i].iv[j]) { in allocate_flows() 549 if (!ss->flows[i].pad) { in allocate_flows() 553 ss->flows[i].result = in allocate_flows() 557 if (!ss->flows[i].result) { in allocate_flows() [all …]
|
A D | sun8i-ss-prng.c | 134 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_prng_generate() 135 ss->flows[flow].status = 0; in sun8i_ss_prng_generate() 141 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_prng_generate() 143 if (ss->flows[flow].status == 0) { in sun8i_ss_prng_generate()
|
A D | sun8i-ss-hash.c | 299 ss->flows[flow].stat_req++; in sun8i_ss_run_hash_task() 332 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_run_hash_task() 333 ss->flows[flow].status = 0; in sun8i_ss_run_hash_task() 338 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_run_hash_task() 340 if (ss->flows[flow].status == 0) { in sun8i_ss_run_hash_task() 416 engine = ss->flows[e].engine; in sun8i_ss_hash_digest() 514 result = ss->flows[rctx->flow].result; in sun8i_ss_hash_run() 515 pad = ss->flows[rctx->flow].pad; in sun8i_ss_hash_run()
|
A D | sun8i-ss-cipher.c | 125 struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; in sun8i_ss_setup_ivs() 186 struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; in sun8i_ss_cipher() 355 engine = op->ss->flows[e].engine; in sun8i_ss_skdecrypt() 376 engine = op->ss->flows[e].engine; in sun8i_ss_skencrypt()
|
/linux-6.3-rc2/samples/bpf/ |
A D | do_hbm_test.sh | 78 flows=1 150 -f=*|--flows=*) 151 flows="${i#*=}" 278 while [ $flow_cnt -le $flows ] ; do 320 while [ $flow_cnt -le $flows ] ; do 346 iperf3 -c $host -p $port -i 0 -P $flows -f m -t $dur > iperf.$id 366 while [ $flow_cnt -le $flows ] ; do 386 while [ $flow_cnt -le $flows ] ; do
|
/linux-6.3-rc2/net/sched/ |
A D | sch_fq_codel.c | 164 flow = &q->flows[idx]; in fq_codel_drop() 204 flow = &q->flows[idx]; in fq_codel_enqueue() 343 struct fq_codel_flow *flow = q->flows + i; in fq_codel_reset() 380 if (q->flows) in fq_codel_change() 456 kvfree(q->flows); in fq_codel_destroy() 488 if (!q->flows) { in fq_codel_init() 489 q->flows = kvcalloc(q->flows_cnt, in fq_codel_init() 492 if (!q->flows) { in fq_codel_init() 515 kvfree(q->flows); in fq_codel_init() 516 q->flows = NULL; in fq_codel_init() [all …]
|
A D | sch_fq_pie.c | 57 struct fq_pie_flow *flows; member 149 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue() 298 if (q->flows) { in fq_pie_change() 380 pie_calculate_probability(&q->p_params, &q->flows[idx].vars, in fq_pie_timer() 381 q->flows[idx].backlog); in fq_pie_timer() 421 q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow), in fq_pie_init() 423 if (!q->flows) { in fq_pie_init() 428 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_init() 512 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_reset() 530 kvfree(q->flows); in fq_pie_destroy()
|
A D | sch_fq.c | 120 u32 flows; member 254 q->flows -= fcnt; in fq_gc() 304 if (q->flows >= (2U << q->fq_trees_log) && in fq_classify() 305 q->inactive_flows > q->flows/2) in fq_classify() 359 q->flows++; in fq_classify() 692 q->flows = 0; in fq_reset() 737 q->flows -= fcnt; in fq_rehash() 1020 st.flows = q->flows; in fq_dump_stats()
|
A D | sch_cake.c | 150 struct cake_flow flows[CAKE_QUEUES]; member 743 q->flows[reduced_hash].set)) { in cake_hash() 761 if (!q->flows[outer_hash + k].set) { in cake_hash() 776 if (!q->flows[outer_hash + k].set) { in cake_hash() 819 q->flows[reduced_hash].srchost = srchost_idx; in cake_hash() 1525 flow = &b->flows[idx]; in cake_drop() 1715 flow = &b->flows[idx]; in cake_enqueue() 2052 q->cur_flow = flow - b->flows; in cake_dequeue() 2753 struct cake_flow *flow = b->flows + j; in cake_init() 2996 flow = &b->flows[idx % CAKE_QUEUES]; in cake_dump_class_stats() [all …]
|
/linux-6.3-rc2/drivers/dma/ti/ |
A D | k3-udma-glue.c | 84 struct k3_udma_glue_rx_flow *flows; member 959 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_rx_chn_priv() 960 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_rx_chn_priv() 961 if (!rx_chn->flows) { in k3_udma_glue_request_rx_chn_priv() 971 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_rx_chn_priv() 1040 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_remote_rx_chn() 1041 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_remote_rx_chn() 1042 if (!rx_chn->flows) { in k3_udma_glue_request_remote_rx_chn() 1071 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_remote_rx_chn() 1145 flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_rx_flow_get_fdq_id() [all …]
|
/linux-6.3-rc2/drivers/media/platform/amphion/ |
A D | vpu_dbg.c | 186 for (i = 0; i < ARRAY_SIZE(inst->flows); i++) { in vpu_dbg_instance() 187 u32 idx = (inst->flow_idx + i) % (ARRAY_SIZE(inst->flows)); in vpu_dbg_instance() 189 if (!inst->flows[idx]) in vpu_dbg_instance() 192 inst->flows[idx] >= VPU_MSG_ID_NOOP ? "M" : "C", in vpu_dbg_instance() 193 inst->flows[idx]); in vpu_dbg_instance() 485 inst->flows[inst->flow_idx] = flow; in vpu_inst_record_flow() 486 inst->flow_idx = (inst->flow_idx + 1) % (ARRAY_SIZE(inst->flows)); in vpu_inst_record_flow()
|
/linux-6.3-rc2/drivers/infiniband/hw/hfi1/ |
A D | tid_rdma.c | 1612 kfree(req->flows); in hfi1_kern_exp_rcv_free_flows() 1613 req->flows = NULL; in hfi1_kern_exp_rcv_free_flows() 1637 if (likely(req->flows)) in hfi1_kern_exp_rcv_alloc_flows() 1639 flows = kmalloc_node(MAX_FLOWS * sizeof(*flows), gfp, in hfi1_kern_exp_rcv_alloc_flows() 1641 if (!flows) in hfi1_kern_exp_rcv_alloc_flows() 1645 flows[i].req = req; in hfi1_kern_exp_rcv_alloc_flows() 1646 flows[i].npagesets = 0; in hfi1_kern_exp_rcv_alloc_flows() 1650 req->flows = flows; in hfi1_kern_exp_rcv_alloc_flows() 1691 flow = &req->flows[tail]; in find_flow_ib() 3070 flow = &req->flows[fidx]; in hfi1_tid_rdma_restart_req() [all …]
|
/linux-6.3-rc2/Documentation/networking/ |
A D | nf_flowtable.rst | 33 specifies what flows are placed into the flowtable. Hence, packets follow the 34 classic IP forwarding path unless the user explicitly instruct flows to use this 111 You can identify offloaded flows through the [OFFLOAD] tag when listing your 130 instead the real device is sufficient for the flowtable to track your flows. 198 There is a workqueue that adds the flows to the hardware. Note that a few 202 You can identify hardware offloaded flows through the [HW_OFFLOAD] tag when
|
A D | scaling.rst | 31 of logical flows. Packets for each flow are steered to a separate receive 188 to the same CPU is CPU load imbalance if flows vary in packet rate. 194 Flow Limit is an optional RPS feature that prioritizes small flows 195 during CPU contention by dropping packets from large flows slightly 196 ahead of those from small flows. It is active only when an RPS or RFS 202 new packet is dropped. Packets from other flows are still only 206 even large flows maintain connectivity. 224 identification of large flows and fewer false positives. The default 261 flows to the CPUs where those flows are being processed. The flow hash 266 same CPU. Indeed, with many flows and few CPUs, it is very likely that [all …]
|
A D | openvswitch.rst | 16 table" that userspace populates with "flows" that map from keys based 104 A wildcarded flow can represent a group of exact match flows. Each '1' bit 108 by reduce the number of new flows need to be processed by the user space program. 120 two possible approaches: reactively install flows as they miss the kernel 130 The behavior when using overlapping wildcarded flows is undefined. It is the 133 performs best-effort detection of overlapping wildcarded flows and may reject 146 future operations. The kernel is not required to index flows by the original
|
A D | pktgen.rst | 97 flows: 0 flowlen: 0 112 flows: 0 285 pgset "flows 1" 379 flows
|
/linux-6.3-rc2/Documentation/admin-guide/pm/ |
A D | system-wide.rst | 11 suspend-flows
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/ |
A D | en_rep.h | 182 struct list_head flows; member 207 struct list_head flows; member
|
A D | eswitch_offloads.c | 1047 struct mlx5_flow_handle **flows; in esw_add_fdb_peer_miss_rules() local 1063 flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL); in esw_add_fdb_peer_miss_rules() 1064 if (!flows) { in esw_add_fdb_peer_miss_rules() 1084 flows[vport->index] = flow; in esw_add_fdb_peer_miss_rules() 1096 flows[vport->index] = flow; in esw_add_fdb_peer_miss_rules() 1110 flows[vport->index] = flow; in esw_add_fdb_peer_miss_rules() 1120 if (!flows[vport->index]) in esw_add_fdb_peer_miss_rules() 1122 mlx5_del_flow_rules(flows[vport->index]); in esw_add_fdb_peer_miss_rules() 1135 kvfree(flows); in esw_add_fdb_peer_miss_rules() 1143 struct mlx5_flow_handle **flows; in esw_del_fdb_peer_miss_rules() local [all …]
|
/linux-6.3-rc2/Documentation/userspace-api/media/mediactl/ |
A D | media-controller-model.rst | 26 by an entity flows from the entity's output to one or more entity 31 pads, either on the same entity or on different entities. Data flows
|
/linux-6.3-rc2/net/core/ |
A D | pktgen.c | 415 struct flow_state *flows; member 2320 pkt_dev->flows[flow].count = 0; in f_pick() 2321 pkt_dev->flows[flow].flags = 0; in f_pick() 2331 pkt_dev->flows[flow].count = 0; in f_pick() 2332 pkt_dev->flows[flow].flags = 0; in f_pick() 2366 pkt_dev->flows[flow].x = x; in get_ipsec_sa() 2597 pkt_dev->flows[flow].count++; in mod_cur_headers() 2681 pkt_dev->flows[i].x = NULL; in free_SAs() 3749 if (pkt_dev->flows == NULL) { in pktgen_add_device() 3809 vfree(pkt_dev->flows); in pktgen_add_device() [all …]
|
/linux-6.3-rc2/Documentation/admin-guide/blockdev/drbd/ |
A D | figures.rst | 5 Data flows that Relate some functions, and write packets
|
/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/en/ |
A D | tc_tun_encap.c | 291 list_for_each_entry(efi, &e->flows, list) { in mlx5e_take_all_encap_flows() 412 list_for_each_entry_safe(efi, tmp, &e->flows, list) { in mlx5e_tc_update_neigh_used_value() 457 WARN_ON(!list_empty(&e->flows)); in mlx5e_encap_dealloc() 474 WARN_ON(!list_empty(&d->flows)); in mlx5e_decap_dealloc() 846 INIT_LIST_HEAD(&e->flows); in mlx5e_attach_encap() 883 list_add(&flow->encaps[out_index].list, &e->flows); in mlx5e_attach_encap() 951 INIT_LIST_HEAD(&d->flows); in mlx5e_attach_decap() 974 list_add(&flow->l3_to_l2_reformat, &d->flows); in mlx5e_attach_decap()
|