Home
last modified time | relevance | path

Searched refs:fq (Results 1 – 25 of 60) sorted by relevance

123

/linux/include/net/
A Dfq_impl.h40 static void fq_adjust_removal(struct fq *fq, in fq_adjust_removal() argument
197 static void fq_tin_enqueue(struct fq *fq, in fq_tin_enqueue() argument
230 oom = (fq->memory_usage > fq->memory_limit); in fq_tin_enqueue()
231 while (fq->backlog > fq->limit || oom) { in fq_tin_enqueue()
248 static void fq_flow_filter(struct fq *fq, in fq_flow_filter() argument
269 static void fq_tin_filter(struct fq *fq, in fq_tin_filter() argument
285 static void fq_flow_reset(struct fq *fq, in fq_flow_reset() argument
307 static void fq_tin_reset(struct fq *fq, in fq_tin_reset() argument
349 memset(fq, 0, sizeof(fq[0])); in fq_init()
357 fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); in fq_init()
[all …]
A Dipv6_frag.h37 fq->ecn = 0; in ip6frag_init()
50 return jhash2((const u32 *)&fq->key.v6, in ip6frag_obj_hashfn()
58 const struct inet_frag_queue *fq = ptr; in ip6frag_obj_cmpfn() local
70 if (fq->q.fqdir->dead) in ip6frag_expire_frag_queue()
72 spin_lock(&fq->q.lock); in ip6frag_expire_frag_queue()
74 if (fq->q.flags & INET_FRAG_COMPLETE) in ip6frag_expire_frag_queue()
77 inet_frag_kill(&fq->q); in ip6frag_expire_frag_queue()
94 head = inet_frag_pull_head(&fq->q); in ip6frag_expire_frag_queue()
99 spin_unlock(&fq->q.lock); in ip6frag_expire_frag_queue()
106 spin_unlock(&fq->q.lock); in ip6frag_expire_frag_queue()
[all …]
A Dfq.h64 struct fq { struct
81 typedef struct sk_buff *fq_tin_dequeue_t(struct fq *, argument
85 typedef void fq_skb_free_t(struct fq *,
91 typedef bool fq_skb_filter_t(struct fq *,
97 typedef struct fq_flow *fq_flow_get_default_t(struct fq *,
/linux/block/
A Dblk-flush.c168 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_flush_complete_seq()
248 running = &fq->flush_queue[fq->flush_running_idx]; in flush_end_io()
249 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); in flush_end_io()
286 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_kick_flush()
468 fq = kzalloc_node(sizeof(*fq), flags, node); in blk_alloc_flush_queue()
469 if (!fq) in blk_alloc_flush_queue()
476 if (!fq->flush_rq) in blk_alloc_flush_queue()
483 return fq; in blk_alloc_flush_queue()
486 kfree(fq); in blk_alloc_flush_queue()
494 if (!fq) in blk_free_flush_queue()
[all …]
/linux/net/ipv6/netfilter/
A Dnf_conntrack_reasm.c137 struct frag_queue *fq; in nf_ct_frag6_expire() local
141 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); in nf_ct_frag6_expire()
205 if (end < fq->q.len || in nf_ct_frag6_queue()
206 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) { in nf_ct_frag6_queue()
211 fq->q.len = end; in nf_ct_frag6_queue()
224 if (end > fq->q.len) { in nf_ct_frag6_queue()
230 fq->q.len = end; in nf_ct_frag6_queue()
268 fq->ecn |= ecn; in nf_ct_frag6_queue()
277 fq->nhoffset = nhoff; in nf_ct_frag6_queue()
282 fq->q.meat == fq->q.len) { in nf_ct_frag6_queue()
[all …]
/linux/net/ieee802154/6lowpan/
A Dreassembly.c47 struct frag_queue *fq; in lowpan_frag_expire() local
51 spin_lock(&fq->q.lock); in lowpan_frag_expire()
56 inet_frag_kill(&fq->q); in lowpan_frag_expire()
59 inet_frag_put(&fq->q); in lowpan_frag_expire()
108 if (end < fq->q.len || in lowpan_frag_queue()
109 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) in lowpan_frag_queue()
112 fq->q.len = end; in lowpan_frag_queue()
114 if (end > fq->q.len) { in lowpan_frag_queue()
118 fq->q.len = end; in lowpan_frag_queue()
140 fq->q.meat == fq->q.len) { in lowpan_frag_queue()
[all …]
/linux/net/ipv6/
A Dreassembly.c76 struct frag_queue *fq; in ip6_frag_expire() local
80 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); in ip6_frag_expire()
148 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) in ip6_frag_queue()
151 fq->q.len = end; in ip6_frag_queue()
167 fq->q.len = end; in ip6_frag_queue()
198 fq->ecn |= ecn; in ip6_frag_queue()
209 fq->nhoffset = nhoff; in ip6_frag_queue()
214 fq->q.meat == fq->q.len) { in ip6_frag_queue()
277 nhoff = fq->nhoffset; in ip6_frag_reasm()
375 if (fq) { in ipv6_frag_rcv()
[all …]
/linux/drivers/soc/fsl/qbman/
A Dqman.c1514 fq->cb.fqs(p, fq, msg); in qm_mr_process_task()
1521 fq->cb.fqs(p, fq, msg); in qm_mr_process_task()
1533 fq->cb.ern(p, fq, msg); in qm_mr_process_task()
1819 fq_table[fq->idx] = fq; in qman_create_fq()
1887 qm_fqid_set(&mcc->fq, fq->fqid); in qman_init_fq()
1987 qm_fqid_set(&mcc->fq, fq->fqid); in qman_schedule_fq()
2030 qm_fqid_set(&mcc->fq, fq->fqid); in qman_retire_fq()
2075 fq->cb.fqs(p, fq, &msg); in qman_retire_fq()
2109 qm_fqid_set(&mcc->fq, fq->fqid); in qman_oos_fq()
2135 qm_fqid_set(&mcc->fq, fq->fqid); in qman_query_fq()
[all …]
A Dqman_test_api.c112 if (qman_enqueue(fq, &fd)) { in do_enqueues()
126 struct qman_fq *fq = &fq_base; in qman_test_api() local
133 err = qman_create_fq(0, FQ_FLAGS, fq); in qman_test_api()
144 err = do_enqueues(fq); in qman_test_api()
154 err = do_enqueues(fq); in qman_test_api()
173 err = do_enqueues(fq); in qman_test_api()
177 err = qman_schedule_fq(fq); in qman_test_api()
185 err = qman_retire_fq(fq, &flags); in qman_test_api()
196 err = qman_oos_fq(fq); in qman_test_api()
201 qman_destroy_fq(fq); in qman_test_api()
[all …]
/linux/drivers/iommu/
A Diova.c67 iovad->fq = NULL; in init_iova_domain()
78 return !!iovad->fq; in has_iova_flush_queue()
115 struct iova_fq *fq; in init_iova_flush_queue() local
118 fq->head = 0; in init_iova_flush_queue()
119 fq->tail = 0; in init_iova_flush_queue()
124 iovad->fq = queue; in init_iova_flush_queue()
548 return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); in fq_full()
581 fq->head = (fq->head + 1) % IOVA_FQ_SIZE; in fq_ring_free()
605 struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu); in fq_destroy_all_entries() local
625 fq = per_cpu_ptr(iovad->fq, cpu); in fq_flush_timeout()
[all …]
/linux/drivers/crypto/caam/
A Dqi.c246 ret = qman_retire_fq(fq, &flags); in kill_fq()
271 fq->fqid); in kill_fq()
276 ret = qman_oos_fq(fq); in kill_fq()
280 qman_destroy_fq(fq); in kill_fq()
281 kfree(fq); in kill_fq()
294 ret = qman_query_fq_np(fq, &np); in empty_caam_fq()
614 struct qman_fq *fq; in alloc_rsp_fq_cpu() local
617 fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA); in alloc_rsp_fq_cpu()
618 if (!fq) in alloc_rsp_fq_cpu()
627 kfree(fq); in alloc_rsp_fq_cpu()
[all …]
/linux/drivers/net/ethernet/freescale/dpaa2/
A Ddpaa2-eth-debugfs.c47 static char *fq_type_to_str(struct dpaa2_eth_fq *fq) in fq_type_to_str() argument
49 switch (fq->type) { in fq_type_to_str()
62 struct dpaa2_eth_fq *fq; in dpaa2_dbg_fqs_show() local
71 fq = &priv->fq[i]; in dpaa2_dbg_fqs_show()
72 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); in dpaa2_dbg_fqs_show()
77 if (!fq->stats.frames && !fcnt) in dpaa2_dbg_fqs_show()
81 fq->fqid, in dpaa2_dbg_fqs_show()
82 fq->target_cpu, in dpaa2_dbg_fqs_show()
83 fq->tc, in dpaa2_dbg_fqs_show()
84 fq_type_to_str(fq), in dpaa2_dbg_fqs_show()
[all …]
A Ddpaa2-eth.c334 fq = &priv->fq[queue_id]; in dpaa2_eth_xdp_enqueue()
635 fq->consume(priv, ch, fd, fq); in dpaa2_eth_consume_frames()
1177 fq = &priv->fq[queue_mapping]; in __dpaa2_eth_tx()
1687 fq = &priv->fq[i]; in dpaa2_eth_set_rx_taildrop()
1692 fq->tc, fq->flowid, &td); in dpaa2_eth_set_rx_taildrop()
1833 fq = &priv->fq[i]; in dpaa2_eth_ingress_fq_count()
2451 fq = &priv->fq[smp_processor_id()]; in dpaa2_eth_xdp_xmit()
2491 fq = &priv->fq[i % num_queues]; in update_xps()
2878 fq = &priv->fq[i]; in dpaa2_eth_set_fq_affinity()
3172 fq = &priv->fq[i]; in dpaa2_eth_update_tx_fqids()
[all …]
A Ddpaa2-switch.c2469 ethsw->fq[i].ethsw = ethsw; in dpaa2_switch_setup_fqs()
2473 ethsw->fq[i].ethsw = ethsw; in dpaa2_switch_setup_fqs()
2701 ethsw->fq[i].store = in dpaa2_switch_alloc_rings()
2704 if (!ethsw->fq[i].store) { in dpaa2_switch_alloc_rings()
2731 err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store); in dpaa2_switch_pull_fq()
2776 struct dpaa2_switch_fq *fq; in dpaa2_switch_poll() local
2816 struct dpaa2_switch_fq *fq; in dpaa2_switch_fqdan_cb() local
2820 napi_schedule(&fq->napi); in dpaa2_switch_fqdan_cb()
2830 nctx = &ethsw->fq[i].nctx; in dpaa2_switch_setup_dpio()
2837 nctx->id = ethsw->fq[i].fqid; in dpaa2_switch_setup_dpio()
[all …]
/linux/drivers/net/ethernet/freescale/dpaa/
A Ddpaa_eth_trace.h58 struct qman_fq *fq,
62 TP_ARGS(netdev, fq, fd),
80 __entry->fqid = fq->fqid;
104 struct qman_fq *fq,
107 TP_ARGS(netdev, fq, fd)
114 struct qman_fq *fq,
117 TP_ARGS(netdev, fq, fd)
124 struct qman_fq *fq,
127 TP_ARGS(netdev, fq, fd)
A Ddpaa_eth_sysfs.c59 struct dpaa_fq *fq; in dpaa_eth_show_fqids() local
66 list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) { in dpaa_eth_show_fqids()
67 switch (fq->fq_type) { in dpaa_eth_show_fqids()
93 if (prev && (abs(fq->fqid - prev->fqid) != 1 || in dpaa_eth_show_fqids()
104 if (prev && abs(fq->fqid - prev->fqid) == 1 && in dpaa_eth_show_fqids()
106 last_fqid = fq->fqid; in dpaa_eth_show_fqids()
108 first_fqid = fq->fqid; in dpaa_eth_show_fqids()
109 last_fqid = fq->fqid; in dpaa_eth_show_fqids()
112 prev = fq; in dpaa_eth_show_fqids()
A Ddpaa_eth.c645 fq->wq = 1; in dpaa_assign_wq()
649 fq->wq = 5; in dpaa_assign_wq()
653 fq->wq = 6; in dpaa_assign_wq()
659 fq->wq = 6; in dpaa_assign_wq()
663 fq->wq = 2; in dpaa_assign_wq()
667 fq->wq = 1; in dpaa_assign_wq()
671 fq->wq = 0; in dpaa_assign_wq()
680 fq->fq_type, fq->fqid); in dpaa_assign_wq()
920 struct dpaa_fq *fq; in dpaa_fq_setup() local
999 struct qman_fq *fq; in dpaa_fq_init() local
[all …]
/linux/net/ipv4/
A Dinet_fragment.c130 struct inet_frag_queue *fq = ptr; in inet_frags_free_cb() local
135 spin_lock_bh(&fq->lock); in inet_frags_free_cb()
137 fq->flags |= INET_FRAG_COMPLETE; in inet_frags_free_cb()
142 spin_unlock_bh(&fq->lock); in inet_frags_free_cb()
145 inet_frag_destroy(fq); in inet_frags_free_cb()
227 if (del_timer(&fq->timer)) in inet_frag_kill()
228 refcount_dec(&fq->refcnt); in inet_frag_kill()
243 refcount_dec(&fq->refcnt); in inet_frag_kill()
366 fq = prev; in inet_frag_find()
368 fq = NULL; in inet_frag_find()
[all …]
/linux/net/xdp/
A Dxsk_buff_pool.c79 pool->fq = xs->fq_tmp; in xp_create_and_assign_umem()
206 if (!pool->fq || !pool->cq) in xp_assign_dev_shared()
236 if (pool->fq) { in xp_release_deferred()
237 xskq_destroy(pool->fq); in xp_release_deferred()
238 pool->fq = NULL; in xp_release_deferred()
465 pool->fq->queue_empty_descs++; in __xp_alloc()
472 pool->fq->invalid_descs++; in __xp_alloc()
473 xskq_cons_release(pool->fq); in __xp_alloc()
488 xskq_cons_release(pool->fq); in __xp_alloc()
540 pool->fq->invalid_descs++; in xp_alloc_new_from_fq()
[all …]
A Dxsk_diag.c70 if (!err && pool && pool->fq) in xsk_diag_put_umem()
71 err = xsk_diag_put_ring(pool->fq, in xsk_diag_put_umem()
86 du.n_fill_ring_empty = xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0; in xsk_diag_put_stats()
/linux/net/mac80211/
A Ddebugfs.c81 struct fq *fq = &local->fq; in aqm_read() local
85 spin_lock_bh(&local->fq.lock); in aqm_read()
99 fq->flows_cnt, in aqm_read()
100 fq->backlog, in aqm_read()
101 fq->overmemory, in aqm_read()
102 fq->overlimit, in aqm_read()
103 fq->collisions, in aqm_read()
104 fq->memory_usage, in aqm_read()
105 fq->memory_limit, in aqm_read()
106 fq->limit, in aqm_read()
[all …]
A Dtx.c1336 struct fq *fq; in codel_dequeue_func() local
1341 fq = &local->fq; in codel_dequeue_func()
1375 local = container_of(fq, struct ieee80211_local, fq); in fq_tin_dequeue_func()
1403 static void fq_skb_free_func(struct fq *fq, in fq_skb_free_func() argument
1418 struct fq *fq = &local->fq; in ieee80211_txq_enqueue() local
1454 struct fq *fq = &local->fq; in ieee80211_txq_remove_vlan() local
1520 struct fq *fq = &local->fq; in ieee80211_txq_purge() local
1551 struct fq *fq = &local->fq; in ieee80211_txq_setup_flows() local
1605 struct fq *fq = &local->fq; in ieee80211_txq_teardown_flows() local
3296 struct fq *fq = &local->fq; in ieee80211_amsdu_aggregate() local
[all …]
/linux/include/soc/fsl/
A Dqman.h300 } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ member
691 struct qman_fq *fq,
699 typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
969 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
979 void qman_destroy_fq(struct qman_fq *fq);
985 u32 qman_fq_fqid(struct qman_fq *fq);
1030 int qman_schedule_fq(struct qman_fq *fq);
1050 int qman_retire_fq(struct qman_fq *fq, u32 *flags);
1059 int qman_oos_fq(struct qman_fq *fq);
1078 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
[all …]
/linux/samples/bpf/
A Dxdpsock_user.c139 struct xsk_ring_prod fq; member
840 ret = xsk_ring_prod__reserve(&umem->fq, in xsk_populate_fill_ring()
845 *xsk_ring_prod__fill_addr(&umem->fq, idx++) = in xsk_populate_fill_ring()
1154 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); in complete_tx_l2fwd()
1163 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); in complete_tx_l2fwd()
1167 *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = in complete_tx_l2fwd()
1170 xsk_ring_prod__submit(&xsk->umem->fq, rcvd); in complete_tx_l2fwd()
1212 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); in rx_drop()
1220 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); in rx_drop()
1232 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig; in rx_drop()
[all …]
/linux/tools/testing/selftests/netfilter/
A Dnft_trans_stress.sh56 ip netns exec "$testns" taskset $mask ping -4 127.0.0.1 -fq > /dev/null &
57 ip netns exec "$testns" taskset $mask ping -6 ::1 -fq > /dev/null &

Completed in 80 milliseconds

123