Home
last modified time | relevance | path

Searched refs:q (Results 1 – 25 of 103) sorted by relevance

12345

/net/sched/
A Dsch_choke.c77 return (q->tail - q->head) & q->tab_mask; in choke_len()
96 q->head = (q->head + 1) & q->tab_mask; in choke_zap_head_holes()
97 if (q->head == q->tail) in choke_zap_head_holes()
106 q->tail = (q->tail - 1) & q->tab_mask; in choke_zap_tail_holes()
107 if (q->head == q->tail) in choke_zap_tail_holes()
205 if (q->head == q->tail) in choke_match_random()
269 q->tail = (q->tail + 1) & q->tab_mask; in choke_enqueue()
294 skb = q->tab[q->head]; in choke_dequeue()
311 q->head = (q->head + 1) & q->tab_mask; in choke_reset()
386 q->head = (q->head + 1) & q->tab_mask; in choke_change()
[all …]
A Dsch_netem.c302 return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng); in loss_event()
385 rtnl_kfree_skbs(q->t_head, q->t_tail); in tfifo_reset()
463 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng)) in netem_enqueue()
482 if (q->latency || q->jitter || q->rate) in netem_enqueue()
498 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) { in netem_enqueue()
554 q->reorder < get_crandom(&q->reorder_cor, &q->prng)) { in netem_enqueue()
558 delay = tabledist(q->latency, q->jitter, in netem_enqueue()
559 &q->delay_cor, &q->prng, q->delay_dist); in netem_enqueue()
666 NULL, &q->prng, q->slot_dist); in get_slot_next()
776 skb = q->qdisc->ops->dequeue(q->qdisc); in netem_dequeue()
[all …]
A Dsch_dualpi2.c201 q->c_protection_credit = q->c_protection_init; in dualpi2_reset_c_protection()
214 ((int)q->c_protection_wc - (int)q->c_protection_wl); in dualpi2_calculate_c_protection()
400 if (q->drop_early && must_drop(sch, q, skb)) { in dualpi2_enqueue_skb()
409 if (q->memory_used > q->max_memory_used) in dualpi2_enqueue_skb()
410 q->max_memory_used = q->memory_used; in dualpi2_enqueue_skb()
527 skb = __qdisc_dequeue_head(&q->l_queue->q); in dequeue_packet()
593 if (!q->drop_early && must_drop(sch, q, skb)) { in dualpi2_qdisc_dequeue()
720 hrtimer_set_expires(&q->pi2_timer, next_pi2_timeout(q)); in dualpi2_timer()
873 q->memory_used > q->memory_limit) { in dualpi2_change()
893 q->memory_limit = get_memory_limit(sch, q->sch->limit); in dualpi2_reset_default()
[all …]
A Dsch_sfq.c231 sfq_link(q, x); in sfq_dec()
244 sfq_link(q, x); in sfq_inc()
441 sfq_inc(q, x); in sfq_enqueue()
457 if (++sch->q.qlen <= q->limit) in sfq_enqueue()
496 sfq_dec(q, a); in sfq_dequeue()
614 if (!q->filter_list && q->tail) in sfq_perturbation()
733 while (sch->q.qlen > q->limit) { in sfq_change()
781 q->sch = sch; in sfq_init()
809 q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor); in sfq_init()
810 q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows); in sfq_init()
[all …]
A Dsch_sfb.c192 memset(&q->bins, 0, sizeof(q->bins)); in sfb_zero_all_buckets()
202 const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0]; in sfb_compute_qlen()
226 sfb_init_perturbation(q->slot, q); in sfb_swap_slot()
227 q->slot ^= 1; in sfb_swap_slot()
236 if (q->penalty_rate == 0 || q->penalty_burst == 0) in sfb_rate_limit()
242 q->tokens_avail = (age * q->penalty_rate) / HZ; in sfb_rate_limit()
243 if (q->tokens_avail > q->penalty_burst) in sfb_rate_limit()
244 q->tokens_avail = q->penalty_burst; in sfb_rate_limit()
296 if (unlikely(sch->q.qlen >= q->limit)) { in sfb_enqueue()
462 q->slot = 0; in sfb_reset()
[all …]
A Dsch_fq_pie.c188 sch->q.qlen++; in fq_pie_qdisc_enqueue()
266 sch->q.qlen--; in fq_pie_qdisc_dequeue()
272 if (head == &q->new_flows && !list_empty(&q->old_flows)) in fq_pie_qdisc_dequeue()
314 if (!q->flows_cnt || q->flows_cnt > 65536) { in fq_pie_change()
398 max_cnt = min_t(int, q->flows_cnt - q->flows_cursor, 2048); in fq_pie_timer()
401 &q->flows[q->flows_cursor].vars, in fq_pie_timer()
402 q->flows[q->flows_cursor].backlog); in fq_pie_timer()
408 if (q->flows_cursor >= q->flows_cnt) { in fq_pie_timer()
429 q->sch = sch; in fq_pie_init()
445 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in fq_pie_init()
[all …]
A Dsch_fq_codel.c181 sch->q.qlen -= i; in fq_codel_drop()
217 memory_limited = q->memory_usage > q->memory_limit; in fq_codel_enqueue()
266 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); in dequeue_func()
268 sch->q.qlen--; in dequeue_func()
310 if ((head == &q->new_flows) && !list_empty(&q->old_flows)) in fq_codel_dequeue()
348 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); in fq_codel_reset()
379 if (q->flows) in fq_codel_change()
443 q->memory_usage > q->memory_limit) { in fq_codel_change()
492 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in fq_codel_init()
497 q->flows = kvcalloc(q->flows_cnt, in fq_codel_init()
[all …]
A Dsch_red.c79 q->vars.qavg = red_calc_qavg(&q->parms, in red_enqueue()
86 switch (red_action(&q->parms, &q->vars, q->vars.qavg)) { in red_enqueue()
113 if (red_use_harddrop(q) || !red_use_ecn(q)) { in red_enqueue()
200 opt.set.min = q->parms.qth_min >> q->parms.Wlog; in red_offload()
201 opt.set.max = q->parms.qth_max >> q->parms.Wlog; in red_offload()
304 if (!q->qdisc->q.qlen) in __red_change()
331 red_adaptative_algo(&q->parms, &q->vars); in red_adaptative_timer()
419 .qth_min = q->parms.qth_min >> q->parms.Wlog, in red_dump()
420 .qth_max = q->parms.qth_max >> q->parms.Wlog, in red_dump()
466 st.early = q->stats.prob_drop + q->stats.forced_drop; in red_dump_stats()
[all …]
A Dsch_fq.c327 if (q->flows != q->inactive_flows + q->throttled_flows) in fq_fastpath_check()
339 if (q->time_next_delayed_flow <= now + q->offload_horizon) in fq_fastpath_check()
403 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)]; in fq_classify()
458 q->flows++; in fq_classify()
499 sch->q.qlen--; in fq_dequeue_skb()
612 if (q->time_next_delayed_flow > now + q->offload_horizon) in fq_check_throttled()
620 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3; in fq_check_throttled()
673 pband = &q->band_flows[q->band_nr]; in fq_dequeue()
680 pband = &q->band_flows[q->band_nr]; in fq_dequeue()
893 if (q->fq_root && log == q->fq_trees_log) in fq_resize()
[all …]
A Dsch_cbs.c100 sch->q.qlen++; in cbs_child_enqueue()
120 if (sch->q.qlen == 0 && q->credits > 0) { in cbs_enqueue_soft()
171 sch->q.qlen--; in cbs_child_dequeue()
187 qdisc_watchdog_schedule_ns(&q->watchdog, q->last); in cbs_dequeue_soft()
191 credits = timediff_to_credits(now - q->last, q->idleslope); in cbs_dequeue_soft()
194 q->credits = min_t(s64, credits, q->hicredit); in cbs_dequeue_soft()
199 delay = delay_from_credits(q->credits, q->idleslope); in cbs_dequeue_soft()
220 q->credits = max_t(s64, credits, q->locredit); in cbs_dequeue_soft()
223 q->last = now; in cbs_dequeue_soft()
414 if (!q->qdisc) in cbs_init()
[all …]
A Dsch_skbprio.c44 for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) { in calc_new_high_prio()
96 sch->q.qlen++; in skbprio_enqueue()
125 if (q->lowest_prio == q->highest_prio) { in skbprio_enqueue()
129 q->lowest_prio = calc_new_low_prio(q); in skbprio_enqueue()
142 struct sk_buff_head *hpq = &q->qdiscs[q->highest_prio]; in skbprio_dequeue()
148 sch->q.qlen--; in skbprio_dequeue()
152 q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb); in skbprio_dequeue()
156 if (q->lowest_prio == q->highest_prio) { in skbprio_dequeue()
160 q->highest_prio = calc_new_high_prio(q); in skbprio_dequeue()
188 memset(&q->qstats, 0, sizeof(q->qstats)); in skbprio_init()
[all …]
A Dsch_hhf.c228 if (q->hh_flows_current_cnt >= q->hh_flows_limit) { in alloc_new_hh()
259 prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout; in hhf_classify()
271 flow = seek_list(hash, &q->hh_flows[flow_pos], q); in hhf_classify()
307 flow = alloc_new_hh(&q->hh_flows[flow_pos], q); in hhf_classify()
362 sch->q.qlen--; in hhf_drop()
445 sch->q.qlen--; in hhf_dequeue()
451 if ((head == &q->new_buckets) && !list_empty(&q->old_buckets)) in hhf_dequeue()
481 if (!q->hh_flows) in hhf_destroy()
564 qlen = sch->q.qlen; in hhf_change()
586 get_random_bytes(&q->perturbation, sizeof(q->perturbation)); in hhf_init()
[all …]
A Dsch_cake.c1926 if (q->buffer_used > q->buffer_max_used) in cake_enqueue()
1927 q->buffer_max_used = q->buffer_used; in cake_enqueue()
1929 if (q->buffer_used > q->buffer_limit) { in cake_enqueue()
1971 for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++) in cake_clear_tin()
2020 if (q->cur_tin >= q->tin_cnt) { in cake_dequeue()
2551 q->rate_ns = q->tins[ft].tin_rate_ns; in cake_reconfigure()
2552 q->rate_shft = q->tins[ft].tin_rate_shft; in cake_reconfigure()
2555 q->buffer_limit = q->buffer_config_limit; in cake_reconfigure()
2557 u64 t = q->rate_bps * q->interval; in cake_reconfigure()
2567 q->buffer_limit = min(q->buffer_limit, in cake_reconfigure()
[all …]
A Dsch_qfq.c314 q->iwsum = ONE_FP / q->wsum; in qfq_update_agg()
342 q->iwsum = ONE_FP / q->wsum; in qfq_destroy_agg()
345 q->in_serv_agg = qfq_choose_next_agg(q); in qfq_destroy_agg()
788 q->bitmaps[dst] |= q->bitmaps[src] & mask; in qfq_move_groups()
820 unsigned long vslot = q->V >> q->min_slot_shift; in qfq_make_eligible()
978 ineligible = q->bitmaps[IR] | q->bitmaps[IB]; in qfq_update_eligible()
1174 q->V += (u64)len * q->iwsum; in qfq_dequeue()
1189 q->oldV = q->V; in qfq_choose_next_agg()
1194 grp = qfq_ffs(q, q->bitmaps[ER]); in qfq_choose_next_agg()
1351 q->oldV = q->V = agg->S; in qfq_activate_agg()
[all …]
A Dsch_multiq.c79 sch->q.qlen++; in multiq_enqueue()
96 q->curband++; in multiq_dequeue()
97 if (q->curband >= q->bands) in multiq_dequeue()
98 q->curband = 0; in multiq_dequeue()
105 qdisc = q->queues[q->curband]; in multiq_dequeue()
109 sch->q.qlen--; in multiq_dequeue()
155 q->curband = 0; in multiq_reset()
168 kfree(q->queues); in multiq_destroy()
195 for (i = q->bands; i < q->max_bands; i++) { in multiq_tune()
246 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in multiq_init()
[all …]
A Dsch_plug.c105 if (q->throttled) in plug_dequeue()
113 q->throttled = true; in plug_dequeue()
116 q->pkts_to_release--; in plug_dequeue()
128 q->pkts_last_epoch = 0; in plug_init()
129 q->pkts_to_release = 0; in plug_init()
144 q->throttled = true; in plug_init()
171 q->pkts_last_epoch = q->pkts_current_epoch; in plug_change()
174 q->throttled = true; in plug_change()
181 q->pkts_to_release += q->pkts_last_epoch; in plug_change()
183 q->throttled = false; in plug_change()
[all …]
A Dsch_tbf.c266 sch->q.qlen++; in tbf_enqueue()
280 skb = q->qdisc->ops->peek(q->qdisc); in tbf_dequeue()
289 toks = min_t(s64, now - q->t_c, q->buffer); in tbf_dequeue()
307 q->t_c = now; in tbf_dequeue()
311 sch->q.qlen--; in tbf_dequeue()
341 q->tokens = q->buffer; in tbf_reset()
342 q->ptokens = q->mtu; in tbf_reset()
456 old = q->qdisc; in tbf_change()
461 q->mtu = mtu; in tbf_change()
469 q->tokens = q->buffer; in tbf_change()
[all …]
A Dsch_ingress.c53 return q->block; in ingress_tcf_block()
101 q->block_info.chain_head_change_priv = &q->miniqp; in ingress_init()
103 err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack); in ingress_init()
107 mini_qdisc_pair_block_init(&q->miniqp, q->block); in ingress_init()
121 tcf_block_put_ext(q->block, sch, &q->block_info); in ingress_destroy()
267 q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress; in clsact_init()
269 err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info, in clsact_init()
274 mini_qdisc_pair_block_init(&q->miniqp_ingress, q->ingress_block); in clsact_init()
286 q->egress_block_info.chain_head_change_priv = &q->miniqp_egress; in clsact_init()
301 tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info); in clsact_destroy()
[all …]
A Dsch_gred.c177 if (!q) { in gred_enqueue()
212 q->vars.qavg = red_calc_qavg(&q->parms, in gred_enqueue()
222 switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) { in gred_enqueue()
238 if (gred_use_harddrop(q) || !gred_use_ecn(q) || in gred_enqueue()
303 if (!q) in gred_reset()
336 if (!q) in gred_offload()
405 kfree(q); in gred_destroy_vq()
498 if (!q) in gred_change_vq()
793 max_p[i] = q ? q->parms.max_P : 0; in gred_dump()
840 qavg = red_calc_qavg(&q->parms, &q->vars, in gred_dump()
[all …]
A Dsch_htb.c595 WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen); in htb_activate()
629 if (q->direct_queue.qlen < q->direct_qlen) { in htb_enqueue()
895 if (unlikely(cl->leaf.q->q.qlen == 0)) { in htb_dequeue_tree()
911 skb = cl->leaf.q->dequeue(cl->leaf.q); in htb_dequeue_tree()
933 if (!cl->leaf.q->q.qlen) in htb_dequeue_tree()
1011 if (cl->leaf.q && !q->offload) in htb_reset()
1020 memset(q->hlevel, 0, sizeof(q->hlevel)); in htb_reset()
1021 memset(q->row_mask, 0, sizeof(q->row_mask)); in htb_reset()
1098 q->direct_qdiscs = kcalloc(q->num_direct_qdiscs, in htb_init()
1547 struct Qdisc *q = cl->leaf.q; in htb_destroy_class_offload() local
[all …]
A Dsch_etf.c82 if (q->skip_sock_check) in is_packet_valid()
101 now = q->get_time(); in is_packet_valid()
192 sch->q.qlen++; in etf_enqueue_timesortedlist()
225 sch->q.qlen--; in timesortedlist_drop()
250 sch->q.qlen--; in timesortedlist_remove()
263 now = q->get_time(); in etf_dequeue_timesortedlist()
303 if (!q->offload) in etf_disable_offload()
310 etf.queue = q->queue; in etf_disable_offload()
412 qdisc_watchdog_init_clockid(&q->watchdog, sch, q->clockid); in etf_init()
429 sch->q.qlen--; in timesortedlist_clear()
[all …]
A Dsch_ets.c134 q->prio2band, sizeof(q->prio2band)); in ets_offload_change()
300 if (!ets_class_is_strict(q, cl) && sch->q.qlen) in ets_class_qlen_notify()
407 return &q->classes[q->prio2band[band & TC_PRIO_MAX]]; in ets_classify()
413 return &q->classes[q->prio2band[0]]; in ets_classify()
448 sch->q.qlen++; in ets_qdisc_enqueue()
457 sch->q.qlen--; in ets_qdisc_dequeue_skb()
655 if (i >= q->nstrict && q->classes[i].qdisc->q.qlen) in ets_qdisc_change()
662 if (q->classes[i].qdisc->q.qlen) { in ets_qdisc_change()
663 list_add_tail(&q->classes[i].alist, &q->active); in ets_qdisc_change()
718 for (band = q->nstrict; band < q->nbands; band++) { in ets_qdisc_reset()
[all …]
A Dsch_pie.c93 q->stats.overlimit++; in pie_qdisc_enqueue()
99 if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog, in pie_qdisc_enqueue()
102 } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) && in pie_qdisc_enqueue()
107 q->stats.ecn_mark++; in pie_qdisc_enqueue()
125 q->stats.dropped++; in pie_qdisc_enqueue()
196 qlen = sch->q.qlen; in pie_change()
427 struct pie_sched_data *q = timer_container_of(q, t, adapt_timer); in pie_timer() local
434 pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog); in pie_timer()
438 mod_timer(&q->adapt_timer, jiffies + q->params.tupdate); in pie_timer()
452 q->sch = sch; in pie_init()
[all …]
/net/xdp/
A Dxsk_queue.h130 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_addr_unchecked()
194 return q->cached_cons != q->cached_prod; in xskq_has_descs()
212 if (q->cached_cons != q->cached_prod) { in xskq_cons_read_desc()
311 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_addr_unchecked()
320 if (q->cached_prod == q->cached_cons) in xskq_cons_peek_desc()
349 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_nb_free()
356 free_entries = q->nentries - (q->cached_prod - q->cached_cons); in xskq_prod_nb_free()
416 idx = q->cached_prod++ & q->ring_mask; in xskq_prod_reserve_desc()
431 __xskq_prod_submit(q, q->cached_prod); in xskq_prod_submit()
449 return q ? q->invalid_descs : 0; in xskq_nb_invalid_descs()
[all …]
/net/ipv4/
A Dip_fragment.c85 struct ipq *qp = container_of(q, struct ipq, q); in ip4_frag_init()
90 q->key.v4 = *key; in ip4_frag_init()
106 qp = container_of(q, struct ipq, q); in ip4_frag_free()
211 if (!q) in ip_find()
214 return container_of(q, struct ipq, q); in ip_find()
245 if (!mod_timer(&qp->q.timer, jiffies + qp->q.fqdir->timeout)) { in ip_frag_reinit()
254 qp->q.flags = 0; in ip_frag_reinit()
255 qp->q.len = 0; in ip_frag_reinit()
256 qp->q.meat = 0; in ip_frag_reinit()
308 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) in ip_frag_queue()
[all …]

Completed in 868 milliseconds

12345