| /net/sched/ |
| A D | sch_pie.c | 34 struct pie_vars *vars, u32 backlog, u32 packet_size) in pie_drop_early() argument 54 if (backlog < 2 * mtu) in pie_drop_early() 99 if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog, in pie_qdisc_enqueue() 211 struct pie_vars *vars, u32 backlog) in pie_process_dequeue() argument 227 if (backlog == 0) in pie_process_dequeue() 279 if (backlog < QUEUE_THRESHOLD) { in pie_process_dequeue() 303 u32 backlog) in pie_calculate_probability() argument 318 qdelay = (backlog << PIE_SCALE) / vars->avg_dq_rate; in pie_calculate_probability() 329 if (qdelay == 0 && backlog != 0) in pie_calculate_probability() 405 vars->backlog_old = backlog; in pie_calculate_probability() [all …]
|
| A D | sch_gred.c | 118 return sch->qstats.backlog; in gred_backlog() 120 return q->backlog; in gred_backlog() 248 q->backlog += qdisc_pkt_len(skb); in gred_enqueue() 276 q->backlog -= qdisc_pkt_len(skb); in gred_dequeue() 279 if (!sch->qstats.backlog) in gred_dequeue() 282 if (!q->backlog) in gred_dequeue() 307 q->backlog = 0; in gred_reset() 346 opt->set.tab[i].backlog = &q->backlog; in gred_offload() 386 table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog; in gred_offload_dump_stats() 391 sch->qstats.backlog += hw_stats->stats.qstats[i].backlog; in gred_offload_dump_stats() [all …]
|
| A D | sch_fq_pie.c | 48 u32 backlog; member 168 sel_flow->backlog, skb->len)) { in fq_pie_qdisc_enqueue() 187 sch->qstats.backlog += pkt_len; in fq_pie_qdisc_enqueue() 195 sel_flow->backlog = 0; in fq_pie_qdisc_enqueue() 198 sel_flow->backlog += pkt_len; in fq_pie_qdisc_enqueue() 265 sch->qstats.backlog -= pkt_len; in fq_pie_qdisc_dequeue() 281 flow->backlog -= pkt_len; in fq_pie_qdisc_dequeue() 283 pie_process_dequeue(skb, &q->p_params, &flow->vars, flow->backlog); in fq_pie_qdisc_dequeue() 402 q->flows[q->flows_cursor].backlog); in fq_pie_timer()
|
| A D | sch_sfq.c | 103 unsigned int backlog; member 301 slot->backlog -= len; in sfq_drop() 377 slot->backlog); in sfq_enqueue() 428 sch->qstats.backlog -= delta; in sfq_enqueue() 429 slot->backlog -= delta; in sfq_enqueue() 439 slot->backlog += qdisc_pkt_len(skb); in sfq_enqueue() 500 slot->backlog -= qdisc_pkt_len(skb); in sfq_dequeue() 552 slot->backlog = 0; in sfq_rehash() 583 slot->backlog); in sfq_rehash() 584 slot->backlog += qdisc_pkt_len(skb); in sfq_rehash() [all …]
|
| A D | sch_skbprio.c | 87 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue() 110 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue() 119 q->qstats[lp].backlog -= qdisc_pkt_len(to_drop); in skbprio_enqueue() 152 q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb); in skbprio_dequeue()
|
| A D | sch_fifo.c | 22 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= in bfifo_enqueue() 49 prev_backlog = sch->qstats.backlog; in pfifo_tail_enqueue() 55 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog); in pfifo_tail_enqueue()
|
| A D | sch_fq_codel.c | 180 sch->qstats.backlog -= len; in fq_codel_drop() 221 prev_backlog = sch->qstats.backlog; in fq_codel_enqueue() 234 prev_backlog -= sch->qstats.backlog; in fq_codel_enqueue() 269 sch->qstats.backlog -= qdisc_pkt_len(skb); in dequeue_func() 304 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, in fq_codel_dequeue() 678 qs.backlog = q->backlogs[idx]; in fq_codel_dump_class_stats()
|
| A D | sch_hhf.c | 405 prev_backlog = sch->qstats.backlog; in hhf_enqueue() 414 qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog); in hhf_enqueue() 565 prev_backlog = sch->qstats.backlog; in hhf_change() 572 prev_backlog - sch->qstats.backlog); in hhf_change()
|
| A D | sch_codel.c | 45 sch->qstats.backlog -= qdisc_pkt_len(skb); in dequeue_func() 64 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars, in codel_qdisc_dequeue()
|
| A D | sch_sfb.c | 409 sch->qstats.backlog += len; in sfb_enqueue() 585 sch->qstats.backlog = q->qdisc->qstats.backlog; in sfb_dump()
|
| A D | sch_plug.c | 92 if (likely(sch->qstats.backlog + skb->len <= q->limit)) { in plug_enqueue()
|
| A D | sch_tbf.c | 234 sch->qstats.backlog += len; in tbf_segment() 265 sch->qstats.backlog += len; in tbf_enqueue()
|
| /net/core/ |
| A D | gen_stats.c | 349 qstats->backlog += qcpu->backlog; in gnet_stats_add_queue_cpu() 364 qstats->backlog += q->backlog; in gnet_stats_add_queue() 399 d->tc_stats.backlog = qstats.backlog; in gnet_stats_copy_queue()
|
| /net/rxrpc/ |
| A D | call_accept.c | 164 struct rxrpc_backlog *b = rx->backlog; in rxrpc_service_prealloc() 170 rx->backlog = b; in rxrpc_service_prealloc() 181 struct rxrpc_backlog *b = rx->backlog; in rxrpc_discard_prealloc() 187 rx->backlog = NULL; in rxrpc_discard_prealloc() 253 struct rxrpc_backlog *b = rx->backlog; in rxrpc_alloc_incoming_call() 446 struct rxrpc_backlog *b = rx->backlog; in rxrpc_user_charge_accept() 475 struct rxrpc_backlog *b = rx->backlog; in rxrpc_kernel_charge_accept()
|
| A D | af_rxrpc.c | 210 static int rxrpc_listen(struct socket *sock, int backlog) in rxrpc_listen() argument 217 _enter("%p,%d", rx, backlog); in rxrpc_listen() 230 if (backlog == INT_MAX) in rxrpc_listen() 231 backlog = max; in rxrpc_listen() 232 else if (backlog < 0 || backlog > max) in rxrpc_listen() 235 sk->sk_max_ack_backlog = backlog; in rxrpc_listen() 243 if (backlog == 0) { in rxrpc_listen()
|
| /net/sunrpc/ |
| A D | stats.c | 158 ktime_t backlog, execute, now; in rpc_count_iostats_metrics() local 174 backlog = 0; in rpc_count_iostats_metrics() 176 backlog = ktime_sub(req->rq_xtime, task->tk_start); in rpc_count_iostats_metrics() 177 op_metrics->om_queue = ktime_add(op_metrics->om_queue, backlog); in rpc_count_iostats_metrics() 189 trace_rpc_stats_latency(req->rq_task, backlog, req->rq_rtt, execute); in rpc_count_iostats_metrics()
|
| A D | xprt.c | 1597 xprt->stat.bklog_u += xprt->backlog.qlen; in xprt_request_transmit() 1662 rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init); in xprt_add_backlog() 1680 if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) { in xprt_wake_up_backlog() 2047 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); in xprt_init() 2111 rpc_destroy_wait_queue(&xprt->backlog); in xprt_destroy_cb() 2211 !xprt->backlog.qlen && !atomic_long_read(&xprt->queuelen)) in xprt_delete_locked()
|
| /net/tipc/ |
| A D | link.c | 178 } backlog[5]; member 921 avail[imp] = l->backlog[imp].limit - l->backlog[imp].len; in link_prepare_wakeup() 979 l->backlog[imp].len = 0; in tipc_link_reset() 980 l->backlog[imp].target_bskb = NULL; in tipc_link_reset() 1046 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) { in tipc_link_xmit() 1086 l->backlog[imp].target_bskb = skb; in tipc_link_xmit() 1087 l->backlog[imp].len++; in tipc_link_xmit() 1098 l->backlog[imp].target_bskb = NULL; in tipc_link_xmit() 1171 l->backlog[imp].len--; in tipc_link_advance_backlog() 1173 l->backlog[imp].target_bskb = NULL; in tipc_link_advance_backlog() [all …]
|
| /net/ |
| A D | socket.c | 1902 int __sys_listen_socket(struct socket *sock, int backlog) in __sys_listen_socket() argument 1907 if ((unsigned int)backlog > somaxconn) in __sys_listen_socket() 1908 backlog = somaxconn; in __sys_listen_socket() 1910 err = security_socket_listen(sock, backlog); in __sys_listen_socket() 1912 err = READ_ONCE(sock->ops)->listen(sock, backlog); in __sys_listen_socket() 1916 int __sys_listen(int fd, int backlog) in __sys_listen() argument 1927 return __sys_listen_socket(sock, backlog); in __sys_listen() 1930 SYSCALL_DEFINE2(listen, int, fd, int, backlog) in SYSCALL_DEFINE2() argument 1932 return __sys_listen(fd, backlog); in SYSCALL_DEFINE2() 3589 int kernel_listen(struct socket *sock, int backlog) in kernel_listen() argument [all …]
|
| /net/atm/ |
| A D | svc.c | 287 static int svc_listen(struct socket *sock, int backlog) in svc_listen() argument 320 sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; in svc_listen()
|
| /net/llc/ |
| A D | af_llc.c | 553 static int llc_ui_listen(struct socket *sock, int backlog) in llc_ui_listen() argument 568 if (!(unsigned int)backlog) /* BSDism */ in llc_ui_listen() 569 backlog = 1; in llc_ui_listen() 570 sk->sk_max_ack_backlog = backlog; in llc_ui_listen()
|
| /net/ipv6/ |
| A D | ioam6.c | 785 __u32 qlen, backlog; in __ioam6_fill_trace_data() local 792 qdisc_qstats_qlen_backlog(qdisc, &qlen, &backlog); in __ioam6_fill_trace_data() 794 *(__be32 *)data = cpu_to_be32(backlog); in __ioam6_fill_trace_data()
|
| /net/bluetooth/rfcomm/ |
| A D | sock.c | 422 static int rfcomm_sock_listen(struct socket *sock, int backlog) in rfcomm_sock_listen() argument 427 BT_DBG("sk %p backlog %d", sk, backlog); in rfcomm_sock_listen() 462 sk->sk_max_ack_backlog = backlog; in rfcomm_sock_listen()
|
| /net/ipv4/ |
| A D | af_inet.c | 191 int __inet_listen_sk(struct sock *sk, int backlog) in __inet_listen_sk() argument 199 WRITE_ONCE(sk->sk_max_ack_backlog, backlog); in __inet_listen_sk() 214 fastopen_queue_tune(sk, backlog); in __inet_listen_sk() 230 int inet_listen(struct socket *sock, int backlog) in inet_listen() argument 240 err = __inet_listen_sk(sk, backlog); in inet_listen()
|
| /net/nfc/ |
| A D | llcp_sock.c | 204 static int llcp_sock_listen(struct socket *sock, int backlog) in llcp_sock_listen() argument 209 pr_debug("sk %p backlog %d\n", sk, backlog); in llcp_sock_listen() 219 sk->sk_max_ack_backlog = backlog; in llcp_sock_listen()
|