| /net/sunrpc/ |
| A D | sched.c | 140 rpc_set_waitqueue_priority(queue, queue->maxpriority); in rpc_reset_waitqueue_priority() 215 queue->qlen++; in __rpc_add_wait_queue() 240 queue->qlen--; in __rpc_remove_wait_queue() 252 queue->qlen = 0; in __rpc_init_priority_wait_queue() 624 q = &queue->tasks[queue->priority]; in __rpc_find_next_queued_priority() 626 queue->nr--; in __rpc_find_next_queued_priority() 636 q = &queue->tasks[queue->maxpriority]; in __rpc_find_next_queued_priority() 643 } while (q != &queue->tasks[queue->priority]); in __rpc_find_next_queued_priority() 649 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); in __rpc_find_next_queued_priority() 870 if (queue) in rpc_signal_task() [all …]
|
| /net/netfilter/ |
| A D | nfnetlink_queue.c | 205 queue->queue_total--; in __dequeue_entry() 891 if (queue->queue_total >= queue->queue_maxlen) { in __nfqnl_enqueue_packet() 1010 if (!queue) in nfqnl_enqueue_packet() 1266 if (!queue) in verdict_instance_lookup() 1272 return queue; in verdict_instance_lookup() 1310 if (IS_ERR(queue)) in nfqnl_recv_verdict_batch() 1424 if (IS_ERR(queue)) in nfqnl_recv_verdict() 1549 if (queue && queue->peer_portid != NETLINK_CB(skb).portid) { in nfqnl_recv_config() 1557 if (queue) { in nfqnl_recv_config() 1569 if (!queue) { in nfqnl_recv_config() [all …]
|
| A D | xt_NFQUEUE.c | 40 u32 queue = info->queuenum; in nfqueue_tg_v1() local 43 queue = nfqueue_hash(skb, queue, info->queues_total, in nfqueue_tg_v1() 46 return NF_QUEUE_NR(queue); in nfqueue_tg_v1() 89 u32 queue = info->queuenum; in nfqueue_tg_v3() local 96 queue = info->queuenum + cpu % info->queues_total; in nfqueue_tg_v3() 98 queue = nfqueue_hash(skb, queue, info->queues_total, in nfqueue_tg_v3() 103 ret = NF_QUEUE_NR(queue); in nfqueue_tg_v3()
|
| A D | nft_queue.c | 33 u32 queue = priv->queuenum; in nft_queue_eval() local 40 queue = priv->queuenum + cpu % priv->queues_total; in nft_queue_eval() 42 queue = nfqueue_hash(pkt->skb, queue, in nft_queue_eval() 48 ret = NF_QUEUE_NR(queue); in nft_queue_eval() 60 u32 queue, ret; in nft_queue_sreg_eval() local 62 queue = regs->data[priv->sreg_qnum]; in nft_queue_sreg_eval() 64 ret = NF_QUEUE_NR(queue); in nft_queue_sreg_eval()
|
| /net/core/ |
| A D | net-sysfs.c | 1179 netdev_put(queue->dev, &queue->dev_tracker); in rx_queue_release() 1255 netdev_hold(queue->dev, &queue->dev_tracker, GFP_KERNEL); in rx_queue_add_kobject() 1430 i = queue - dev->_tx; in get_netdev_queue_index() 1512 queue->tx_maxrate = rate; in tx_maxrate_store() 1564 struct dql *dql = &queue->dql; in bql_show_hold_time() 1573 struct dql *dql = &queue->dql; in bql_set_hold_time() 1593 struct dql *dql = &queue->dql; in bql_show_stall_thrs() 1929 netdev_put(queue->dev, &queue->dev_tracker); in netdev_queue_release() 1992 netdev_hold(queue->dev, &queue->dev_tracker, GFP_KERNEL); in netdev_queue_add_kobject() 2070 sysfs_remove_groups(&queue->kobj, queue->groups); in netdev_queue_update_kobjects() [all …]
|
| A D | request_sock.c | 34 void reqsk_queue_alloc(struct request_sock_queue *queue) in reqsk_queue_alloc() argument 36 queue->fastopenq.rskq_rst_head = NULL; in reqsk_queue_alloc() 37 queue->fastopenq.rskq_rst_tail = NULL; in reqsk_queue_alloc() 38 queue->fastopenq.qlen = 0; in reqsk_queue_alloc() 40 queue->rskq_accept_head = NULL; in reqsk_queue_alloc()
|
| A D | datagram.c | 89 int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue, in __skb_wait_for_more_packets() argument 103 if (READ_ONCE(queue->prev) != skb) in __skb_wait_for_more_packets() 168 struct sk_buff *__skb_try_recv_from_queue(struct sk_buff_head *queue, in __skb_try_recv_from_queue() argument 182 *last = queue->prev; in __skb_try_recv_from_queue() 183 skb_queue_walk(queue, skb) { in __skb_try_recv_from_queue() 199 __skb_unlink(skb, queue); in __skb_try_recv_from_queue() 243 struct sk_buff_head *queue, in __skb_try_recv_datagram() argument 264 spin_lock_irqsave(&queue->lock, cpu_flags); in __skb_try_recv_datagram() 265 skb = __skb_try_recv_from_queue(queue, flags, off, &error, in __skb_try_recv_datagram() 267 spin_unlock_irqrestore(&queue->lock, cpu_flags); in __skb_try_recv_datagram() [all …]
|
| /net/sctp/ |
| A D | inqueue.c | 32 void sctp_inq_init(struct sctp_inq *queue) in sctp_inq_init() argument 34 INIT_LIST_HEAD(&queue->in_chunk_list); in sctp_inq_init() 35 queue->in_progress = NULL; in sctp_inq_init() 38 INIT_WORK(&queue->immediate, NULL); in sctp_inq_init() 50 void sctp_inq_free(struct sctp_inq *queue) in sctp_inq_free() argument 63 if (queue->in_progress) { in sctp_inq_free() 65 queue->in_progress = NULL; in sctp_inq_free() 97 chunk = queue->in_progress; in sctp_inq_peek() 124 chunk = queue->in_progress; in sctp_inq_pop() 142 chunk = queue->in_progress = NULL; in sctp_inq_pop() [all …]
|
| A D | ulpqueue.c | 184 struct sk_buff_head *queue; in sctp_ulpq_tail_event() local 213 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event() 224 queue = &sp->pd_lobby; in sctp_ulpq_tail_event() 227 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event() 236 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event() 238 queue = &sp->pd_lobby; in sctp_ulpq_tail_event() 242 skb_queue_splice_tail_init(skb_list, queue); in sctp_ulpq_tail_event() 251 if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) { in sctp_ulpq_tail_event() 313 struct sk_buff_head *queue, in sctp_make_reassembled_event() argument 357 __skb_unlink(f_frag, queue); in sctp_make_reassembled_event() [all …]
|
| /net/qrtr/ |
| A D | tun.c | 15 struct sk_buff_head queue; member 23 skb_queue_tail(&tun->queue, skb); in qrtr_tun_send() 40 skb_queue_head_init(&tun->queue); in qrtr_tun_open() 66 while (!(skb = skb_dequeue(&tun->queue))) { in qrtr_tun_read_iter() 72 !skb_queue_empty(&tun->queue))) in qrtr_tun_read_iter() 121 if (!skb_queue_empty(&tun->queue)) in qrtr_tun_poll() 134 skb_queue_purge(&tun->queue); in qrtr_tun_release()
|
| /net/ipv4/ |
| A D | inet_connection_sock.c | 677 if (reqsk_queue_empty(queue)) { in inet_csk_accept() 991 atomic_inc(&queue->young); in reqsk_queue_migrated() 992 atomic_inc(&queue->qlen); in reqsk_queue_migrated() 1060 struct request_sock_queue *queue; in reqsk_timer_handler() local 1109 queue = &icsk->icsk_accept_queue; in reqsk_timer_handler() 1110 qlen = reqsk_queue_len(queue); in reqsk_timer_handler() 1129 atomic_dec(&queue->young); in reqsk_timer_handler() 1158 reqsk_queue_removed(queue, nreq); in reqsk_timer_handler() 1396 spin_lock(&queue->rskq_lock); in inet_csk_reqsk_queue_add() 1407 queue->rskq_accept_tail = req; in inet_csk_reqsk_queue_add() [all …]
|
| A D | tcp_yeah.c | 114 u32 rtt, queue; in tcp_yeah_cong_avoid() local 136 queue = bw; in tcp_yeah_cong_avoid() 138 if (queue > TCP_YEAH_ALPHA || in tcp_yeah_cong_avoid() 140 if (queue > TCP_YEAH_ALPHA && in tcp_yeah_cong_avoid() 142 u32 reduction = min(queue / TCP_YEAH_GAMMA , in tcp_yeah_cong_avoid() 171 yeah->lastQ = queue; in tcp_yeah_cong_avoid()
|
| A D | ip_output.c | 950 struct sk_buff_head *queue, in __ip_append_data() argument 976 skb = skb_peek_tail(queue); in __ip_append_data() 1204 __skb_queue_tail(queue, skb); in __ip_append_data() 1396 struct sk_buff_head *queue, in __ip_make_skb() argument 1409 skb = __skb_dequeue(queue); in __ip_make_skb() 1533 struct sk_buff_head *queue, in __ip_flush_pending_frames() argument 1557 struct sk_buff_head queue; in ip_make_skb() local 1563 __skb_queue_head_init(&queue); in ip_make_skb() 1572 err = __ip_append_data(sk, fl4, &queue, cork, in ip_make_skb() 1576 __ip_flush_pending_frames(sk, &queue, cork); in ip_make_skb() [all …]
|
| A D | tcp_fastopen.c | 245 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in tcp_fastopen_create_child() local 254 spin_lock(&queue->fastopenq.lock); in tcp_fastopen_create_child() 255 queue->fastopenq.qlen++; in tcp_fastopen_create_child() 256 spin_unlock(&queue->fastopenq.lock); in tcp_fastopen_create_child()
|
| /net/mac80211/ |
| A D | util.c | 286 struct ieee80211_txq *queue) in wake_tx_push_queue() argument 289 .sta = queue->sta, in wake_tx_push_queue() 308 struct ieee80211_txq *queue; in ieee80211_handle_wake_tx_queue() local 440 if (WARN_ON(queue >= hw->queues)) in __ieee80211_wake_queue() 457 trace_wake_queue(local, queue, reason, in __ieee80211_wake_queue() 506 if (WARN_ON(queue >= hw->queues)) in __ieee80211_stop_queue() 546 int queue = info->hw_queue; in ieee80211_add_pending_skb() local 568 int queue, i; in ieee80211_add_pending_skbs() local 579 queue = info->hw_queue; in ieee80211_add_pending_skbs() 581 __ieee80211_stop_queue(hw, queue, in ieee80211_add_pending_skbs() [all …]
|
| A D | agg-tx.c | 155 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; in __acquires() local 159 if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1) in __acquires() 161 &sdata->local->hw, queue, in __acquires() 170 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; in __releases() local 172 if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0) in __releases() 174 &sdata->local->hw, queue, in __releases() 236 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; in __acquires() local 250 &local->pending[queue]); in __acquires()
|
| /net/sched/ |
| A D | sch_etf.c | 32 int queue; member 310 etf.queue = q->queue; in etf_disable_offload() 316 etf.queue); in etf_disable_offload() 331 etf.queue = q->queue; in etf_enable_offload() 379 q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0); in etf_init()
|
| A D | sch_cbs.c | 72 int queue; member 267 cbs.queue = q->queue; in cbs_disable_offload() 273 cbs.queue); in cbs_disable_offload() 289 cbs.queue = q->queue; in cbs_enable_offload() 423 q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0); in cbs_init()
|
| /net/x25/ |
| A D | x25_link.c | 125 while ((skbn = skb_dequeue(&nb->queue)) != NULL) in x25_link_control() 216 skb_queue_tail(&nb->queue, skb); in x25_transmit_link() 222 skb_queue_tail(&nb->queue, skb); in x25_transmit_link() 253 skb_queue_purge(&nb->queue); in x25_link_terminated() 270 skb_queue_head_init(&nb->queue); in x25_link_device_up()
|
| /net/unix/ |
| A D | garbage.c | 354 struct sk_buff_head *queue; in unix_collect_skb() local 360 queue = &u->sk.sk_receive_queue; in unix_collect_skb() 362 spin_lock(&queue->lock); in unix_collect_skb() 367 skb_queue_walk(queue, skb) { in unix_collect_skb() 375 skb_queue_splice_init(queue, hitlist); in unix_collect_skb() 378 spin_unlock(&queue->lock); in unix_collect_skb()
|
| /net/xfrm/ |
| A D | xfrm_input.c | 31 struct sk_buff_head queue; member 778 struct sk_buff_head queue; in xfrm_trans_reinject() local 781 __skb_queue_head_init(&queue); in xfrm_trans_reinject() 783 skb_queue_splice_init(&trans->queue, &queue); in xfrm_trans_reinject() 787 while ((skb = __skb_dequeue(&queue))) in xfrm_trans_reinject() 801 if (skb_queue_len(&trans->queue) >= READ_ONCE(net_hotdata.max_backlog)) in xfrm_trans_queue_net() 809 __skb_queue_tail(&trans->queue, skb); in xfrm_trans_queue_net() 842 __skb_queue_head_init(&trans->queue); in xfrm_input_init()
|
| /net/nfc/ |
| A D | digital_core.c | 28 struct list_head queue; member 120 queue); in digital_wq_cmd_complete() 126 list_del(&cmd->queue); in digital_wq_cmd_complete() 164 queue); in digital_wq_cmd() 217 list_del(&cmd->queue); in digital_wq_cmd() 244 INIT_LIST_HEAD(&cmd->queue); in digital_send_cmd() 247 list_add_tail(&cmd->queue, &ddev->cmd_queue); in digital_send_cmd() 845 list_for_each_entry_safe(cmd, n, &ddev->cmd_queue, queue) { in nfc_digital_unregister_device() 846 list_del(&cmd->queue); in nfc_digital_unregister_device()
|
| /net/802/ |
| A D | garp.c | 264 skb_queue_tail(&app->queue, app->pdu); in garp_pdu_queue() 272 while ((skb = skb_dequeue(&app->queue))) in garp_queue_xmit() 305 goto queue; in garp_pdu_append_attr() 307 goto queue; in garp_pdu_append_attr() 312 goto queue; in garp_pdu_append_attr() 319 queue: in garp_pdu_append_attr() 596 skb_queue_head_init(&app->queue); in garp_init_applicant()
|
| A D | mrp.c | 356 skb_queue_tail(&app->queue, app->pdu); in mrp_pdu_queue() 364 while ((skb = skb_dequeue(&app->queue))) in mrp_queue_xmit() 426 goto queue; in mrp_pdu_append_vecattr_event() 436 goto queue; in mrp_pdu_append_vecattr_event() 447 goto queue; in mrp_pdu_append_vecattr_event() 476 queue: in mrp_pdu_append_vecattr_event() 882 skb_queue_head_init(&app->queue); in mrp_init_applicant()
|
| /net/dsa/ |
| A D | tag_brcm.c | 94 u16 queue = skb_get_queue_mapping(skb); in brcm_tag_xmit_ll() local 120 ((queue & BRCM_IG_TC_MASK) << BRCM_IG_TC_SHIFT); in brcm_tag_xmit_ll() 130 skb_set_queue_mapping(skb, BRCM_TAG_SET_PORT_QUEUE(dp->index, queue)); in brcm_tag_xmit_ll()
|