/linux-6.3-rc2/include/net/ |
A D | fq_impl.h | 40 static void fq_adjust_removal(struct fq *fq, in fq_adjust_removal() argument 197 static void fq_tin_enqueue(struct fq *fq, in fq_tin_enqueue() argument 234 oom = (fq->memory_usage > fq->memory_limit); in fq_tin_enqueue() 235 while (fq->backlog > fq->limit || oom) { in fq_tin_enqueue() 252 static void fq_flow_filter(struct fq *fq, in fq_flow_filter() argument 273 static void fq_tin_filter(struct fq *fq, in fq_tin_filter() argument 289 static void fq_flow_reset(struct fq *fq, in fq_flow_reset() argument 311 static void fq_tin_reset(struct fq *fq, in fq_tin_reset() argument 353 memset(fq, 0, sizeof(fq[0])); in fq_init() 361 fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); in fq_init() [all …]
|
A D | ipv6_frag.h | 38 fq->ecn = 0; in ip6frag_init() 51 return jhash2((const u32 *)&fq->key.v6, in ip6frag_obj_hashfn() 72 if (READ_ONCE(fq->q.fqdir->dead)) in ip6frag_expire_frag_queue() 74 spin_lock(&fq->q.lock); in ip6frag_expire_frag_queue() 76 if (fq->q.flags & INET_FRAG_COMPLETE) in ip6frag_expire_frag_queue() 79 fq->q.flags |= INET_FRAG_DROP; in ip6frag_expire_frag_queue() 80 inet_frag_kill(&fq->q); in ip6frag_expire_frag_queue() 97 head = inet_frag_pull_head(&fq->q); in ip6frag_expire_frag_queue() 102 spin_unlock(&fq->q.lock); in ip6frag_expire_frag_queue() 109 spin_unlock(&fq->q.lock); in ip6frag_expire_frag_queue() [all …]
|
A D | fq.h | 68 struct fq { struct 85 typedef struct sk_buff *fq_tin_dequeue_t(struct fq *, argument 89 typedef void fq_skb_free_t(struct fq *, 95 typedef bool fq_skb_filter_t(struct fq *, 101 typedef struct fq_flow *fq_flow_get_default_t(struct fq *,
|
/linux-6.3-rc2/block/ |
A D | blk-flush.c | 175 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_flush_complete_seq() 257 running = &fq->flush_queue[fq->flush_running_idx]; in flush_end_io() 258 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); in flush_end_io() 296 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_kick_flush() 477 fq = kzalloc_node(sizeof(*fq), flags, node); in blk_alloc_flush_queue() 478 if (!fq) in blk_alloc_flush_queue() 485 if (!fq->flush_rq) in blk_alloc_flush_queue() 492 return fq; in blk_alloc_flush_queue() 495 kfree(fq); in blk_alloc_flush_queue() 503 if (!fq) in blk_free_flush_queue() [all …]
|
/linux-6.3-rc2/net/ipv6/netfilter/ |
A D | nf_conntrack_reasm.c | 136 struct frag_queue *fq; in nf_ct_frag6_expire() local 140 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); in nf_ct_frag6_expire() 204 if (end < fq->q.len || in nf_ct_frag6_queue() 205 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) { in nf_ct_frag6_queue() 210 fq->q.len = end; in nf_ct_frag6_queue() 223 if (end > fq->q.len) { in nf_ct_frag6_queue() 229 fq->q.len = end; in nf_ct_frag6_queue() 268 fq->ecn |= ecn; in nf_ct_frag6_queue() 277 fq->nhoffset = nhoff; in nf_ct_frag6_queue() 282 fq->q.meat == fq->q.len) { in nf_ct_frag6_queue() [all …]
|
/linux-6.3-rc2/net/ieee802154/6lowpan/ |
A D | reassembly.c | 47 struct frag_queue *fq; in lowpan_frag_expire() local 51 spin_lock(&fq->q.lock); in lowpan_frag_expire() 56 inet_frag_kill(&fq->q); in lowpan_frag_expire() 59 inet_frag_put(&fq->q); in lowpan_frag_expire() 108 if (end < fq->q.len || in lowpan_frag_queue() 109 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) in lowpan_frag_queue() 112 fq->q.len = end; in lowpan_frag_queue() 114 if (end > fq->q.len) { in lowpan_frag_queue() 118 fq->q.len = end; in lowpan_frag_queue() 141 fq->q.meat == fq->q.len) { in lowpan_frag_queue() [all …]
|
/linux-6.3-rc2/net/ipv6/ |
A D | reassembly.c | 76 struct frag_queue *fq; in ip6_frag_expire() local 80 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); in ip6_frag_expire() 152 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) in ip6_frag_queue() 155 fq->q.len = end; in ip6_frag_queue() 171 fq->q.len = end; in ip6_frag_queue() 203 fq->ecn |= ecn; in ip6_frag_queue() 214 fq->nhoffset = nhoff; in ip6_frag_queue() 219 fq->q.meat == fq->q.len) { in ip6_frag_queue() 283 nhoff = fq->nhoffset; in ip6_frag_reasm() 381 if (fq) { in ipv6_frag_rcv() [all …]
|
/linux-6.3-rc2/drivers/soc/fsl/qbman/ |
A D | qman.c | 1514 fq->cb.fqs(p, fq, msg); in qm_mr_process_task() 1521 fq->cb.fqs(p, fq, msg); in qm_mr_process_task() 1533 fq->cb.ern(p, fq, msg); in qm_mr_process_task() 1819 fq_table[fq->idx] = fq; in qman_create_fq() 1887 qm_fqid_set(&mcc->fq, fq->fqid); in qman_init_fq() 1987 qm_fqid_set(&mcc->fq, fq->fqid); in qman_schedule_fq() 2030 qm_fqid_set(&mcc->fq, fq->fqid); in qman_retire_fq() 2075 fq->cb.fqs(p, fq, &msg); in qman_retire_fq() 2109 qm_fqid_set(&mcc->fq, fq->fqid); in qman_oos_fq() 2135 qm_fqid_set(&mcc->fq, fq->fqid); in qman_query_fq() [all …]
|
A D | qman_test_api.c | 112 if (qman_enqueue(fq, &fd)) { in do_enqueues() 126 struct qman_fq *fq = &fq_base; in qman_test_api() local 133 err = qman_create_fq(0, FQ_FLAGS, fq); in qman_test_api() 144 err = do_enqueues(fq); in qman_test_api() 154 err = do_enqueues(fq); in qman_test_api() 173 err = do_enqueues(fq); in qman_test_api() 177 err = qman_schedule_fq(fq); in qman_test_api() 185 err = qman_retire_fq(fq, &flags); in qman_test_api() 196 err = qman_oos_fq(fq); in qman_test_api() 201 qman_destroy_fq(fq); in qman_test_api() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/freescale/dpaa/ |
A D | dpaa_eth_trace.h | 32 struct qman_fq *fq, 36 TP_ARGS(netdev, fq, fd), 54 __entry->fqid = fq->fqid; 78 struct qman_fq *fq, 81 TP_ARGS(netdev, fq, fd) 88 struct qman_fq *fq, 91 TP_ARGS(netdev, fq, fd) 98 struct qman_fq *fq, 101 TP_ARGS(netdev, fq, fd)
|
A D | dpaa_eth_sysfs.c | 33 struct dpaa_fq *fq; in dpaa_eth_show_fqids() local 40 list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) { in dpaa_eth_show_fqids() 41 switch (fq->fq_type) { in dpaa_eth_show_fqids() 67 if (prev && (abs(fq->fqid - prev->fqid) != 1 || in dpaa_eth_show_fqids() 78 if (prev && abs(fq->fqid - prev->fqid) == 1 && in dpaa_eth_show_fqids() 80 last_fqid = fq->fqid; in dpaa_eth_show_fqids() 82 first_fqid = fq->fqid; in dpaa_eth_show_fqids() 83 last_fqid = fq->fqid; in dpaa_eth_show_fqids() 86 prev = fq; in dpaa_eth_show_fqids()
|
A D | dpaa_eth.c | 639 fq->wq = 1; in dpaa_assign_wq() 643 fq->wq = 5; in dpaa_assign_wq() 647 fq->wq = 6; in dpaa_assign_wq() 653 fq->wq = 6; in dpaa_assign_wq() 657 fq->wq = 2; in dpaa_assign_wq() 661 fq->wq = 1; in dpaa_assign_wq() 665 fq->wq = 0; in dpaa_assign_wq() 674 fq->fq_type, fq->fqid); in dpaa_assign_wq() 939 struct dpaa_fq *fq; in dpaa_fq_setup() local 1018 struct qman_fq *fq; in dpaa_fq_init() local [all …]
|
/linux-6.3-rc2/drivers/crypto/caam/ |
A D | qi.c | 252 ret = qman_retire_fq(fq, &flags); in kill_fq() 277 fq->fqid); in kill_fq() 282 ret = qman_oos_fq(fq); in kill_fq() 286 qman_destroy_fq(fq); in kill_fq() 287 kfree(fq); in kill_fq() 300 ret = qman_query_fq_np(fq, &np); in empty_caam_fq() 620 struct qman_fq *fq; in alloc_rsp_fq_cpu() local 623 fq = kzalloc(sizeof(*fq), GFP_KERNEL); in alloc_rsp_fq_cpu() 624 if (!fq) in alloc_rsp_fq_cpu() 633 kfree(fq); in alloc_rsp_fq_cpu() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/freescale/dpaa2/ |
A D | dpaa2-eth-debugfs.c | 47 static char *fq_type_to_str(struct dpaa2_eth_fq *fq) in fq_type_to_str() argument 49 switch (fq->type) { in fq_type_to_str() 62 struct dpaa2_eth_fq *fq; in dpaa2_dbg_fqs_show() local 71 fq = &priv->fq[i]; in dpaa2_dbg_fqs_show() 72 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); in dpaa2_dbg_fqs_show() 77 if (!fq->stats.frames && !fcnt) in dpaa2_dbg_fqs_show() 81 fq->fqid, in dpaa2_dbg_fqs_show() 82 fq->target_cpu, in dpaa2_dbg_fqs_show() 83 fq->tc, in dpaa2_dbg_fqs_show() 84 fq_type_to_str(fq), in dpaa2_dbg_fqs_show() [all …]
|
A D | dpaa2-eth.c | 415 fq = &priv->fq[queue_id]; in dpaa2_eth_xdp_enqueue() 735 fq->consume(priv, ch, fd, fq); in dpaa2_eth_consume_frames() 1481 fq = &priv->fq[queue_mapping]; in __dpaa2_eth_tx() 2088 fq = &priv->fq[i]; in dpaa2_eth_set_rx_taildrop() 2093 fq->tc, fq->flowid, &td); in dpaa2_eth_set_rx_taildrop() 2234 fq = &priv->fq[i]; in dpaa2_eth_ingress_fq_count() 2869 fq = &priv->fq[smp_processor_id()]; in dpaa2_eth_xdp_xmit() 2909 fq = &priv->fq[i % num_queues]; in update_xps() 3305 fq = &priv->fq[i]; in dpaa2_eth_set_fq_affinity() 3647 fq = &priv->fq[i]; in dpaa2_eth_update_tx_fqids() [all …]
|
A D | dpaa2-xsk.c | 17 struct dpaa2_eth_fq *fq; in dpaa2_eth_setup_consume_func() local 21 fq = &priv->fq[i]; in dpaa2_eth_setup_consume_func() 23 if (fq->type != type) in dpaa2_eth_setup_consume_func() 25 if (fq->channel != ch) in dpaa2_eth_setup_consume_func() 28 fq->consume = consume; in dpaa2_eth_setup_consume_func() 106 struct dpaa2_eth_fq *fq) in dpaa2_xsk_rx() argument 143 dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); in dpaa2_xsk_rx() 399 struct dpaa2_eth_fq *fq; in dpaa2_xsk_tx() local 408 fq = &priv->fq[ch->nctx.desired_cpu]; in dpaa2_xsk_tx() 431 err = priv->enqueue(priv, fq, &fds[total_enqueued], 0, in dpaa2_xsk_tx() [all …]
|
A D | dpaa2-switch.c | 2494 ethsw->fq[i].ethsw = ethsw; in dpaa2_switch_setup_fqs() 2498 ethsw->fq[i].ethsw = ethsw; in dpaa2_switch_setup_fqs() 2726 ethsw->fq[i].store = in dpaa2_switch_alloc_rings() 2729 if (!ethsw->fq[i].store) { in dpaa2_switch_alloc_rings() 2756 err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store); in dpaa2_switch_pull_fq() 2801 struct dpaa2_switch_fq *fq; in dpaa2_switch_poll() local 2841 struct dpaa2_switch_fq *fq; in dpaa2_switch_fqdan_cb() local 2845 napi_schedule(&fq->napi); in dpaa2_switch_fqdan_cb() 2855 nctx = ðsw->fq[i].nctx; in dpaa2_switch_setup_dpio() 2862 nctx->id = ethsw->fq[i].fqid; in dpaa2_switch_setup_dpio() [all …]
|
A D | dpaa2-eth.h | 454 struct dpaa2_eth_fq *fq); 552 struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; member 554 struct dpaa2_eth_fq *fq, 822 struct dpaa2_eth_fq *fq, 829 struct dpaa2_eth_fq *fq); 850 struct dpaa2_eth_fq *fq,
|
/linux-6.3-rc2/net/ipv4/ |
A D | inet_fragment.c | 130 struct inet_frag_queue *fq = ptr; in inet_frags_free_cb() local 135 spin_lock_bh(&fq->lock); in inet_frags_free_cb() 136 fq->flags |= INET_FRAG_DROP; in inet_frags_free_cb() 143 spin_unlock_bh(&fq->lock); in inet_frags_free_cb() 146 inet_frag_destroy(fq); in inet_frags_free_cb() 228 if (del_timer(&fq->timer)) in inet_frag_kill() 229 refcount_dec(&fq->refcnt); in inet_frag_kill() 244 refcount_dec(&fq->refcnt); in inet_frag_kill() 374 fq = prev; in inet_frag_find() 376 fq = NULL; in inet_frag_find() [all …]
|
/linux-6.3-rc2/drivers/iommu/ |
A D | dma-iommu.c | 113 return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); in fq_full() 144 fq->head = (fq->head + 1) % IOVA_FQ_SIZE; in fq_ring_free() 167 fq = per_cpu_ptr(cookie->fq, cpu); in fq_flush_timeout() 178 struct iova_fq *fq; in queue_iova() local 191 fq = raw_cpu_ptr(cookie->fq); in queue_iova() 201 if (fq_full(fq)) { in queue_iova() 226 if (!cookie->fq) in iommu_dma_free_fq() 232 struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu); in iommu_dma_free_fq() local 263 fq->head = 0; in iommu_dma_init_fq() 264 fq->tail = 0; in iommu_dma_init_fq() [all …]
|
/linux-6.3-rc2/net/mac80211/ |
A D | debugfs.c | 81 struct fq *fq = &local->fq; in aqm_read() local 85 spin_lock_bh(&local->fq.lock); in aqm_read() 99 fq->flows_cnt, in aqm_read() 100 fq->backlog, in aqm_read() 101 fq->overmemory, in aqm_read() 102 fq->overlimit, in aqm_read() 103 fq->collisions, in aqm_read() 104 fq->memory_usage, in aqm_read() 105 fq->memory_limit, in aqm_read() 106 fq->limit, in aqm_read() [all …]
|
A D | tx.c | 1381 struct fq *fq; in codel_dequeue_func() local 1386 fq = &local->fq; in codel_dequeue_func() 1420 local = container_of(fq, struct ieee80211_local, fq); in fq_tin_dequeue_func() 1448 static void fq_skb_free_func(struct fq *fq, in fq_skb_free_func() argument 1463 struct fq *fq = &local->fq; in ieee80211_txq_enqueue() local 1499 struct fq *fq = &local->fq; in ieee80211_txq_remove_vlan() local 1565 struct fq *fq = &local->fq; in ieee80211_txq_purge() local 1598 struct fq *fq = &local->fq; in ieee80211_txq_setup_flows() local 1649 struct fq *fq = &local->fq; in ieee80211_txq_teardown_flows() local 3377 struct fq *fq = &local->fq; in ieee80211_amsdu_aggregate() local [all …]
|
/linux-6.3-rc2/net/xdp/ |
A D | xsk_buff_pool.c | 94 pool->fq = xs->fq_tmp; in xp_create_and_assign_umem() 225 if (!pool->fq || !pool->cq) in xp_assign_dev_shared() 255 if (pool->fq) { in xp_release_deferred() 256 xskq_destroy(pool->fq); in xp_release_deferred() 257 pool->fq = NULL; in xp_release_deferred() 489 pool->fq->queue_empty_descs++; in __xp_alloc() 496 pool->fq->invalid_descs++; in __xp_alloc() 497 xskq_cons_release(pool->fq); in __xp_alloc() 512 xskq_cons_release(pool->fq); in __xp_alloc() 564 pool->fq->invalid_descs++; in xp_alloc_new_from_fq() [all …]
|
A D | xsk_diag.c | 70 if (!err && pool && pool->fq) in xsk_diag_put_umem() 71 err = xsk_diag_put_ring(pool->fq, in xsk_diag_put_umem() 86 du.n_fill_ring_empty = xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0; in xsk_diag_put_stats()
|
/linux-6.3-rc2/include/soc/fsl/ |
A D | qman.h | 300 } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ member 691 struct qman_fq *fq, 699 typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq, 969 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq); 979 void qman_destroy_fq(struct qman_fq *fq); 985 u32 qman_fq_fqid(struct qman_fq *fq); 1030 int qman_schedule_fq(struct qman_fq *fq); 1050 int qman_retire_fq(struct qman_fq *fq, u32 *flags); 1059 int qman_oos_fq(struct qman_fq *fq); 1078 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr); [all …]
|