| /linux/include/net/ |
| A D | fq_impl.h | 40 static void fq_adjust_removal(struct fq *fq, in fq_adjust_removal() argument 197 static void fq_tin_enqueue(struct fq *fq, in fq_tin_enqueue() argument 234 oom = (fq->memory_usage > fq->memory_limit); in fq_tin_enqueue() 235 while (fq->backlog > fq->limit || oom) { in fq_tin_enqueue() 252 static void fq_flow_filter(struct fq *fq, in fq_flow_filter() argument 273 static void fq_tin_filter(struct fq *fq, in fq_tin_filter() argument 289 static void fq_flow_reset(struct fq *fq, in fq_flow_reset() argument 311 static void fq_tin_reset(struct fq *fq, in fq_tin_reset() argument 353 memset(fq, 0, sizeof(fq[0])); in fq_init() 361 fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); in fq_init() [all …]
|
| A D | ipv6_frag.h | 38 fq->ecn = 0; in ip6frag_init() 51 return jhash2((const u32 *)&fq->key.v6, in ip6frag_obj_hashfn() 72 if (READ_ONCE(fq->q.fqdir->dead)) in ip6frag_expire_frag_queue() 74 spin_lock(&fq->q.lock); in ip6frag_expire_frag_queue() 76 if (fq->q.flags & INET_FRAG_COMPLETE) in ip6frag_expire_frag_queue() 79 fq->q.flags |= INET_FRAG_DROP; in ip6frag_expire_frag_queue() 80 inet_frag_kill(&fq->q); in ip6frag_expire_frag_queue() 97 head = inet_frag_pull_head(&fq->q); in ip6frag_expire_frag_queue() 102 spin_unlock(&fq->q.lock); in ip6frag_expire_frag_queue() 109 spin_unlock(&fq->q.lock); in ip6frag_expire_frag_queue() [all …]
|
| A D | fq.h | 68 struct fq { struct 85 typedef struct sk_buff *fq_tin_dequeue_t(struct fq *, argument 89 typedef void fq_skb_free_t(struct fq *, 95 typedef bool fq_skb_filter_t(struct fq *,
|
| /linux/block/ |
| A D | blk-flush.c | 153 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_flush_complete_seq() 238 running = &fq->flush_queue[fq->flush_running_idx]; in flush_end_io() 239 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); in flush_end_io() 278 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_kick_flush() 483 fq = kzalloc_node(sizeof(*fq), flags, node); in blk_alloc_flush_queue() 484 if (!fq) in blk_alloc_flush_queue() 491 if (!fq->flush_rq) in blk_alloc_flush_queue() 497 return fq; in blk_alloc_flush_queue() 500 kfree(fq); in blk_alloc_flush_queue() 508 if (!fq) in blk_free_flush_queue() [all …]
|
| /linux/drivers/net/ethernet/intel/libeth/ |
| A D | rx.c | 76 switch (fq->type) { in libeth_rx_page_pool_params() 78 fq->buf_len = libeth_rx_hw_len_mtu(pp, fq->buf_len); in libeth_rx_page_pool_params() 81 fq->buf_len = libeth_rx_hw_len_truesize(pp, fq->buf_len, in libeth_rx_page_pool_params() 120 switch (fq->type) { in libeth_rx_page_pool_params_zc() 136 fq->truesize = fq->buf_len; in libeth_rx_page_pool_params_zc() 154 .nid = fq->nid, in libeth_rx_fq_create() 164 if (!fq->hsplit) in libeth_rx_fq_create() 175 fqes = kvcalloc_node(fq->count, sizeof(*fqes), GFP_KERNEL, fq->nid); in libeth_rx_fq_create() 179 fq->fqes = fqes; in libeth_rx_fq_create() 180 fq->pp = pool; in libeth_rx_fq_create() [all …]
|
| /linux/net/ipv6/netfilter/ |
| A D | nf_conntrack_reasm.c | 136 struct frag_queue *fq; in nf_ct_frag6_expire() local 140 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); in nf_ct_frag6_expire() 208 if (end < fq->q.len || in nf_ct_frag6_queue() 209 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) { in nf_ct_frag6_queue() 214 fq->q.len = end; in nf_ct_frag6_queue() 227 if (end > fq->q.len) { in nf_ct_frag6_queue() 233 fq->q.len = end; in nf_ct_frag6_queue() 272 fq->ecn |= ecn; in nf_ct_frag6_queue() 281 fq->nhoffset = nhoff; in nf_ct_frag6_queue() 286 fq->q.meat == fq->q.len) { in nf_ct_frag6_queue() [all …]
|
| /linux/net/ieee802154/6lowpan/ |
| A D | reassembly.c | 47 struct frag_queue *fq; in lowpan_frag_expire() local 51 spin_lock(&fq->q.lock); in lowpan_frag_expire() 56 inet_frag_kill(&fq->q); in lowpan_frag_expire() 59 inet_frag_put(&fq->q); in lowpan_frag_expire() 108 if (end < fq->q.len || in lowpan_frag_queue() 109 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) in lowpan_frag_queue() 112 fq->q.len = end; in lowpan_frag_queue() 114 if (end > fq->q.len) { in lowpan_frag_queue() 118 fq->q.len = end; in lowpan_frag_queue() 141 fq->q.meat == fq->q.len) { in lowpan_frag_queue() [all …]
|
| /linux/net/ipv6/ |
| A D | reassembly.c | 76 struct frag_queue *fq; in ip6_frag_expire() local 80 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); in ip6_frag_expire() 152 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) in ip6_frag_queue() 155 fq->q.len = end; in ip6_frag_queue() 171 fq->q.len = end; in ip6_frag_queue() 203 fq->ecn |= ecn; in ip6_frag_queue() 214 fq->nhoffset = nhoff; in ip6_frag_queue() 219 fq->q.meat == fq->q.len) { in ip6_frag_queue() 283 nhoff = fq->nhoffset; in ip6_frag_reasm() 381 if (fq) { in ipv6_frag_rcv() [all …]
|
| /linux/drivers/soc/fsl/qbman/ |
| A D | qman.c | 1517 fq->cb.fqs(p, fq, msg); in qm_mr_process_task() 1524 fq->cb.fqs(p, fq, msg); in qm_mr_process_task() 1536 fq->cb.ern(p, fq, msg); in qm_mr_process_task() 1822 fq_table[fq->idx] = fq; in qman_create_fq() 1890 qm_fqid_set(&mcc->fq, fq->fqid); in qman_init_fq() 1990 qm_fqid_set(&mcc->fq, fq->fqid); in qman_schedule_fq() 2033 qm_fqid_set(&mcc->fq, fq->fqid); in qman_retire_fq() 2078 fq->cb.fqs(p, fq, &msg); in qman_retire_fq() 2112 qm_fqid_set(&mcc->fq, fq->fqid); in qman_oos_fq() 2138 qm_fqid_set(&mcc->fq, fq->fqid); in qman_query_fq() [all …]
|
| A D | qman_test_api.c | 112 if (qman_enqueue(fq, &fd)) { in do_enqueues() 126 struct qman_fq *fq = &fq_base; in qman_test_api() local 133 err = qman_create_fq(0, FQ_FLAGS, fq); in qman_test_api() 144 err = do_enqueues(fq); in qman_test_api() 154 err = do_enqueues(fq); in qman_test_api() 173 err = do_enqueues(fq); in qman_test_api() 177 err = qman_schedule_fq(fq); in qman_test_api() 185 err = qman_retire_fq(fq, &flags); in qman_test_api() 196 err = qman_oos_fq(fq); in qman_test_api() 201 qman_destroy_fq(fq); in qman_test_api() [all …]
|
| /linux/drivers/net/ethernet/freescale/dpaa/ |
| A D | dpaa_eth_trace.h | 32 struct qman_fq *fq, 36 TP_ARGS(netdev, fq, fd), 54 __entry->fqid = fq->fqid; 78 struct qman_fq *fq, 81 TP_ARGS(netdev, fq, fd) 88 struct qman_fq *fq, 91 TP_ARGS(netdev, fq, fd) 98 struct qman_fq *fq, 101 TP_ARGS(netdev, fq, fd)
|
| A D | dpaa_eth_sysfs.c | 33 struct dpaa_fq *fq; in dpaa_eth_show_fqids() local 39 list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) { in dpaa_eth_show_fqids() 40 switch (fq->fq_type) { in dpaa_eth_show_fqids() 66 if (prev && (abs(fq->fqid - prev->fqid) != 1 || in dpaa_eth_show_fqids() 77 if (prev && abs(fq->fqid - prev->fqid) == 1 && in dpaa_eth_show_fqids() 79 last_fqid = fq->fqid; in dpaa_eth_show_fqids() 81 first_fqid = fq->fqid; in dpaa_eth_show_fqids() 82 last_fqid = fq->fqid; in dpaa_eth_show_fqids() 85 prev = fq; in dpaa_eth_show_fqids()
|
| A D | dpaa_eth.c | 643 fq->wq = 1; in dpaa_assign_wq() 647 fq->wq = 5; in dpaa_assign_wq() 651 fq->wq = 6; in dpaa_assign_wq() 657 fq->wq = 6; in dpaa_assign_wq() 661 fq->wq = 2; in dpaa_assign_wq() 665 fq->wq = 1; in dpaa_assign_wq() 669 fq->wq = 0; in dpaa_assign_wq() 678 fq->fq_type, fq->fqid); in dpaa_assign_wq() 943 struct dpaa_fq *fq; in dpaa_fq_setup() local 1016 struct qman_fq *fq; in dpaa_fq_init() local [all …]
|
| /linux/drivers/net/ethernet/freescale/dpaa2/ |
| A D | dpaa2-eth-debugfs.c | 47 static char *fq_type_to_str(struct dpaa2_eth_fq *fq) in fq_type_to_str() argument 49 switch (fq->type) { in fq_type_to_str() 62 struct dpaa2_eth_fq *fq; in dpaa2_dbg_fqs_show() local 71 fq = &priv->fq[i]; in dpaa2_dbg_fqs_show() 72 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); in dpaa2_dbg_fqs_show() 77 if (!fq->stats.frames && !fcnt) in dpaa2_dbg_fqs_show() 81 fq->fqid, in dpaa2_dbg_fqs_show() 82 fq->target_cpu, in dpaa2_dbg_fqs_show() 83 fq->tc, in dpaa2_dbg_fqs_show() 84 fq_type_to_str(fq), in dpaa2_dbg_fqs_show() [all …]
|
| A D | dpaa2-eth.c | 415 fq = &priv->fq[queue_id]; in dpaa2_eth_xdp_enqueue() 739 fq->consume(priv, ch, fd, fq); in dpaa2_eth_consume_frames() 1483 fq = &priv->fq[queue_mapping]; in __dpaa2_eth_tx() 2090 fq = &priv->fq[i]; in dpaa2_eth_set_rx_taildrop() 2095 fq->tc, fq->flowid, &td); in dpaa2_eth_set_rx_taildrop() 2236 fq = &priv->fq[i]; in dpaa2_eth_ingress_fq_count() 2871 fq = &priv->fq[smp_processor_id()]; in dpaa2_eth_xdp_xmit() 2914 fq = &priv->fq[i % num_queues]; in update_xps() 3311 fq = &priv->fq[i]; in dpaa2_eth_set_fq_affinity() 3653 fq = &priv->fq[i]; in dpaa2_eth_update_tx_fqids() [all …]
|
| A D | dpaa2-xsk.c | 17 struct dpaa2_eth_fq *fq; in dpaa2_eth_setup_consume_func() local 21 fq = &priv->fq[i]; in dpaa2_eth_setup_consume_func() 23 if (fq->type != type) in dpaa2_eth_setup_consume_func() 25 if (fq->channel != ch) in dpaa2_eth_setup_consume_func() 28 fq->consume = consume; in dpaa2_eth_setup_consume_func() 106 struct dpaa2_eth_fq *fq) in dpaa2_xsk_rx() argument 143 dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb); in dpaa2_xsk_rx() 399 struct dpaa2_eth_fq *fq; in dpaa2_xsk_tx() local 408 fq = &priv->fq[ch->nctx.desired_cpu]; in dpaa2_xsk_tx() 431 err = priv->enqueue(priv, fq, &fds[total_enqueued], 0, in dpaa2_xsk_tx() [all …]
|
| A D | dpaa2-switch.c | 2522 ethsw->fq[i].ethsw = ethsw; in dpaa2_switch_setup_fqs() 2526 ethsw->fq[i].ethsw = ethsw; in dpaa2_switch_setup_fqs() 2755 ethsw->fq[i].store = in dpaa2_switch_alloc_rings() 2758 if (!ethsw->fq[i].store) { in dpaa2_switch_alloc_rings() 2785 err = dpaa2_io_service_pull_fq(NULL, fq->fqid, fq->store); in dpaa2_switch_pull_fq() 2830 struct dpaa2_switch_fq *fq; in dpaa2_switch_poll() local 2870 struct dpaa2_switch_fq *fq; in dpaa2_switch_fqdan_cb() local 2874 napi_schedule(&fq->napi); in dpaa2_switch_fqdan_cb() 2884 nctx = ðsw->fq[i].nctx; in dpaa2_switch_setup_dpio() 2891 nctx->id = ethsw->fq[i].fqid; in dpaa2_switch_setup_dpio() [all …]
|
| /linux/drivers/crypto/caam/ |
| A D | qi.c | 253 ret = qman_retire_fq(fq, &flags); in kill_fq() 278 fq->fqid); in kill_fq() 283 ret = qman_oos_fq(fq); in kill_fq() 287 qman_destroy_fq(fq); in kill_fq() 288 kfree(fq); in kill_fq() 301 ret = qman_query_fq_np(fq, &np); in empty_caam_fq() 622 struct qman_fq *fq; in alloc_rsp_fq_cpu() local 625 fq = kzalloc(sizeof(*fq), GFP_KERNEL); in alloc_rsp_fq_cpu() 626 if (!fq) in alloc_rsp_fq_cpu() 635 kfree(fq); in alloc_rsp_fq_cpu() [all …]
|
| /linux/net/ipv4/ |
| A D | inet_fragment.c | 133 struct inet_frag_queue *fq = ptr; in inet_frags_free_cb() local 138 spin_lock_bh(&fq->lock); in inet_frags_free_cb() 139 fq->flags |= INET_FRAG_DROP; in inet_frags_free_cb() 146 spin_unlock_bh(&fq->lock); in inet_frags_free_cb() 149 inet_frag_destroy(fq); in inet_frags_free_cb() 231 if (del_timer(&fq->timer)) in inet_frag_kill() 232 refcount_dec(&fq->refcnt); in inet_frag_kill() 247 refcount_dec(&fq->refcnt); in inet_frag_kill() 377 fq = prev; in inet_frag_find() 379 fq = NULL; in inet_frag_find() [all …]
|
| /linux/drivers/iommu/ |
| A D | dma-iommu.c | 130 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) & (fq)->mod_mask) 135 return (((fq->tail + 1) & fq->mod_mask) == fq->head); in fq_full() 144 fq->tail = (idx + 1) & fq->mod_mask; in fq_ring_add() 166 fq->head = (fq->head + 1) & fq->mod_mask; in fq_ring_free_locked() 206 struct iova_fq *fq; in queue_iova() local 220 fq = cookie->single_fq; in queue_iova() 233 if (fq_full(fq)) { in queue_iova() 238 idx = fq_ring_add(fq); in queue_iova() 260 vfree(fq); in iommu_dma_free_fq_single() 294 fq->head = 0; in iommu_dma_init_one_fq() [all …]
|
| /linux/include/net/libeth/ |
| A D | rx.h | 89 int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi); 90 void libeth_rx_fq_destroy(struct libeth_fq *fq); 100 static inline dma_addr_t libeth_rx_alloc(const struct libeth_fq_fp *fq, u32 i) in libeth_rx_alloc() argument 102 struct libeth_fqe *buf = &fq->fqes[i]; in libeth_rx_alloc() 104 buf->truesize = fq->truesize; in libeth_rx_alloc() 105 buf->page = page_pool_dev_alloc(fq->pp, &buf->offset, &buf->truesize); in libeth_rx_alloc() 110 fq->pp->p.offset; in libeth_rx_alloc()
|
| /linux/net/xdp/ |
| A D | xsk_buff_pool.c | 97 pool->fq = xs->fq_tmp; in xp_create_and_assign_umem() 254 if (!pool->fq || !pool->cq) in xp_assign_dev_shared() 284 if (pool->fq) { in xp_release_deferred() 285 xskq_destroy(pool->fq); in xp_release_deferred() 286 pool->fq = NULL; in xp_release_deferred() 522 pool->fq->invalid_descs++; in __xp_alloc() 523 xskq_cons_release(pool->fq); in __xp_alloc() 538 xskq_cons_release(pool->fq); in __xp_alloc() 589 pool->fq->invalid_descs++; in xp_alloc_new_from_fq() 667 pool->fq->queue_empty_descs++; in xp_alloc_batch() [all …]
|
| /linux/net/mac80211/ |
| A D | debugfs.c | 81 struct fq *fq = &local->fq; in aqm_read() local 85 spin_lock_bh(&local->fq.lock); in aqm_read() 99 fq->flows_cnt, in aqm_read() 100 fq->backlog, in aqm_read() 101 fq->overmemory, in aqm_read() 102 fq->overlimit, in aqm_read() 103 fq->collisions, in aqm_read() 104 fq->memory_usage, in aqm_read() 105 fq->memory_limit, in aqm_read() 106 fq->limit, in aqm_read() [all …]
|
| A D | tx.c | 1363 struct fq *fq; in codel_dequeue_func() local 1368 fq = &local->fq; in codel_dequeue_func() 1402 local = container_of(fq, struct ieee80211_local, fq); in fq_tin_dequeue_func() 1430 static void fq_skb_free_func(struct fq *fq, in fq_skb_free_func() argument 1445 struct fq *fq = &local->fq; in ieee80211_txq_enqueue() local 1481 struct fq *fq = &local->fq; in ieee80211_txq_remove_vlan() local 1547 struct fq *fq = &local->fq; in ieee80211_txq_purge() local 1580 struct fq *fq = &local->fq; in ieee80211_txq_setup_flows() local 1631 struct fq *fq = &local->fq; in ieee80211_txq_teardown_flows() local 3404 struct fq *fq = &local->fq; in ieee80211_amsdu_aggregate() local [all …]
|
| /linux/include/soc/fsl/ |
| A D | qman.h | 300 } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ member 691 struct qman_fq *fq, 699 typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq, 969 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq); 979 void qman_destroy_fq(struct qman_fq *fq); 985 u32 qman_fq_fqid(struct qman_fq *fq); 1030 int qman_schedule_fq(struct qman_fq *fq); 1050 int qman_retire_fq(struct qman_fq *fq, u32 *flags); 1059 int qman_oos_fq(struct qman_fq *fq); 1078 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr); [all …]
|