| /net/rxrpc/ |
| A D | call_event.c | 26 ktime_t delay = ms_to_ktime(READ_ONCE(rxrpc_idle_ack_delay)); in rxrpc_propose_ping() local 28 ktime_t ping_at = ktime_add(now, delay); in rxrpc_propose_ping() 43 ktime_t now = ktime_get_real(), delay; in rxrpc_propose_delay_ACK() local 48 delay = (call->srtt_us >> 3) * NSEC_PER_USEC; in rxrpc_propose_delay_ACK() 50 delay = ms_to_ktime(READ_ONCE(rxrpc_soft_ack_delay)); in rxrpc_propose_delay_ACK() 51 ktime_add_ms(delay, call->tx_backoff); in rxrpc_propose_delay_ACK() 53 call->delay_ack_at = ktime_add(now, delay); in rxrpc_propose_delay_ACK() 454 delay = ktime_sub(next, now); in rxrpc_input_call_event() 455 if (delay <= 0) { in rxrpc_input_call_event() 460 delayj = umax(nsecs_to_jiffies(delay), 1); in rxrpc_input_call_event() [all …]
|
| A D | peer_event.c | 306 time64_t base, now, delay; in rxrpc_peer_keepalive_worker() local 354 delay = base - now; in rxrpc_peer_keepalive_worker() 355 if (delay < 1) in rxrpc_peer_keepalive_worker() 356 delay = 1; in rxrpc_peer_keepalive_worker() 357 delay *= HZ; in rxrpc_peer_keepalive_worker() 359 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay); in rxrpc_peer_keepalive_worker()
|
| A D | Kconfig | 40 bool "Inject delay into packet reception" 43 Say Y here to inject a delay into packet reception, allowing an 44 extended RTT time to be modelled. The delay can be configured using
|
| A D | output.c | 68 ktime_t delay = ms_to_ktime(READ_ONCE(call->next_rx_timo) / 6); in rxrpc_set_keepalive() local 70 call->keepalive_at = ktime_add(ktime_get_real(), delay); in rxrpc_set_keepalive() 71 trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_keepalive); in rxrpc_set_keepalive() 625 ktime_t delay = ms_to_ktime(READ_ONCE(call->next_rx_timo)); in rxrpc_prepare_data_packet() local 627 call->expect_rx_by = ktime_add(req->now, delay); in rxrpc_prepare_data_packet() 628 trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_expect_rx); in rxrpc_prepare_data_packet()
|
| A D | input.c | 709 ktime_t delay = ms_to_ktime(timo); in rxrpc_input_data() local 711 call->expect_req_by = ktime_add(ktime_get_real(), delay); in rxrpc_input_data() 712 trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_idle); in rxrpc_input_data() 1246 ktime_t delay = ms_to_ktime(timo); in rxrpc_input_call_packet() local 1248 call->expect_rx_by = ktime_add(ktime_get_real(), delay); in rxrpc_input_call_packet() 1249 trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_expect_rx); in rxrpc_input_call_packet()
|
| A D | call_object.c | 250 ktime_t delay = ms_to_ktime(call->hard_timo * 1000); in rxrpc_start_call_timer() local 252 call->expect_term_by = ktime_add(ktime_get_real(), delay); in rxrpc_start_call_timer() 253 trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_hard); in rxrpc_start_call_timer()
|
| /net/ipv4/ |
| A D | tcp_cubic.c | 386 static void hystart_update(struct sock *sk, u32 delay) in hystart_update() argument 433 if (ca->curr_rtt > delay) in hystart_update() 434 ca->curr_rtt = delay; in hystart_update() 456 u32 delay; in cubictcp_acked() local 466 delay = sample->rtt_us; in cubictcp_acked() 467 if (delay == 0) in cubictcp_acked() 468 delay = 1; in cubictcp_acked() 471 if (ca->delay_min == 0 || ca->delay_min > delay) in cubictcp_acked() 472 ca->delay_min = delay; in cubictcp_acked() 475 hystart_update(sk, delay); in cubictcp_acked()
|
| /net/batman-adv/ |
| A D | send.c | 754 unsigned long delay, in batadv_forw_bcast_packet_to_list() argument 778 send_time += delay ? delay : msecs_to_jiffies(5); in batadv_forw_bcast_packet_to_list() 810 unsigned long delay, in batadv_forw_bcast_packet_if() argument 819 if (!delay) { in batadv_forw_bcast_packet_if() 832 ret = batadv_forw_bcast_packet_to_list(bat_priv, skb, delay, in batadv_forw_bcast_packet_if() 921 unsigned long delay, in __batadv_forw_bcast_packet() argument 944 ret = batadv_forw_bcast_packet_if(bat_priv, skb, delay, in __batadv_forw_bcast_packet() 973 unsigned long delay, in batadv_forw_bcast_packet() argument 976 return __batadv_forw_bcast_packet(bat_priv, skb, delay, own_packet); in batadv_forw_bcast_packet() 994 unsigned long delay, in batadv_send_bcast_packet() argument [all …]
|
| A D | send.h | 44 unsigned long delay, 48 unsigned long delay,
|
| /net/nfc/nci/ |
| A D | spi.c | 47 t.delay.value = nspi->xfer_udelay; in __nci_spi_send() 48 t.delay.unit = SPI_DELAY_UNIT_USECS; in __nci_spi_send() 125 u8 acknowledge_mode, unsigned int delay, in nci_spi_allocate_spi() argument 135 nspi->xfer_udelay = delay; in nci_spi_allocate_spi() 222 rx.delay.value = nspi->xfer_udelay; in __nci_spi_read() 223 rx.delay.unit = SPI_DELAY_UNIT_USECS; in __nci_spi_read()
|
| /net/core/ |
| A D | link_watch.c | 139 unsigned long delay = linkwatch_nextevent - jiffies; in linkwatch_schedule_work() local 148 delay = 0; in linkwatch_schedule_work() 152 if (delay > HZ) in linkwatch_schedule_work() 153 delay = 0; in linkwatch_schedule_work() 162 queue_delayed_work(system_unbound_wq, &linkwatch_work, delay); in linkwatch_schedule_work()
|
| A D | pktgen.c | 290 u64 delay; /* nano-seconds */ member 588 pkt_dev->nfrags, (unsigned long long) pkt_dev->delay, in pktgen_if_show() 1134 pkt_dev->delay = ULLONG_MAX; in pktgen_if_write() 1136 pkt_dev->delay = (u64)value; in pktgen_if_write() 1139 (unsigned long long) pkt_dev->delay); in pktgen_if_write() 1152 pr_info("Delay set at: %llu ns\n", pkt_dev->delay); in pktgen_if_write() 1165 pkt_dev->delay = NSEC_PER_SEC/value; in pktgen_if_write() 1167 pr_info("Delay set at: %llu ns\n", pkt_dev->delay); in pktgen_if_write() 3540 if (unlikely(pkt_dev->delay == ULLONG_MAX)) { in pktgen_xmit() 3562 if (pkt_dev->delay && pkt_dev->last_ok) in pktgen_xmit() [all …]
|
| /net/netfilter/ |
| A D | xt_LED.c | 51 if ((ledinfo->delay > 0) && ledinfo->always_blink && in led_tg() 59 if (ledinfo->delay > 0) { in led_tg() 61 jiffies + msecs_to_jiffies(ledinfo->delay)); in led_tg() 64 } else if (ledinfo->delay == 0) { in led_tg()
|
| A D | nf_conntrack_ecache.c | 109 int ret, delay = -1; in ecache_work() local 114 delay = ECACHE_RETRY_JIFFIES; in ecache_work() 117 delay = 0; in ecache_work() 123 if (delay >= 0) in ecache_work() 124 schedule_delayed_work(&cnet->ecache.dwork, delay); in ecache_work()
|
| /net/netfilter/ipvs/ |
| A D | ip_vs_est.c | 397 int delay = est->ktrow; in ip_vs_enqueue_estimator() local 419 if (kd->est_count >= 2 * kd->tick_max || delay < IPVS_EST_NTICKS - 1) in ip_vs_enqueue_estimator() 423 crow += delay; in ip_vs_enqueue_estimator() 427 if (delay >= IPVS_EST_NTICKS - 1) { in ip_vs_enqueue_estimator() 744 int id, row, cid, delay; in ip_vs_est_calc_phase() local 778 delay = IPVS_EST_NTICKS; in ip_vs_est_calc_phase() 781 delay--; in ip_vs_est_calc_phase() 782 if (delay < 0) in ip_vs_est_calc_phase() 802 row = kd->est_row + delay; in ip_vs_est_calc_phase()
|
| A D | Kconfig | 250 tristate "shortest expected delay scheduling" 252 The shortest expected delay scheduling algorithm assigns network 253 connections to the server with the shortest expected delay. The 254 expected delay that the job will experience is (Ci + 1) / Ui if 269 that minimize its expected delay (The Shortest Expected Delay
|
| /net/rfkill/ |
| A D | input.c | 142 const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY); in rfkill_ratelimit() local 143 return time_after(jiffies, last + delay) ? 0 : delay; in rfkill_ratelimit()
|
| /net/ceph/ |
| A D | messenger.c | 611 con->delay = 0; /* reset backoff memory */ in ceph_con_open() 1441 if (delay >= HZ) in queue_con_delay() 1442 delay = round_jiffies_relative(delay); in queue_con_delay() 1444 dout("%s %p %lu\n", __func__, con, delay); in queue_con_delay() 1506 ret = queue_con_delay(con, con->delay); in con_backoff() 1509 con, con->delay); in con_backoff() 1647 if (!con->delay) { in con_fault() 1648 con->delay = BASE_DELAY_INTERVAL; in con_fault() 1650 con->delay *= 2; in con_fault() 1651 if (con->delay > MAX_DELAY_INTERVAL) in con_fault() [all …]
|
| A D | mon_client.c | 309 unsigned long delay; in __schedule_delayed() local 312 delay = CEPH_MONC_HUNT_INTERVAL * monc->hunt_mult; in __schedule_delayed() 314 delay = CEPH_MONC_PING_INTERVAL; in __schedule_delayed() 316 dout("__schedule_delayed after %lu\n", delay); in __schedule_delayed() 318 round_jiffies_relative(delay)); in __schedule_delayed()
|
| /net/sunrpc/xprtrdma/ |
| A D | transport.c | 477 unsigned long delay; in xprt_rdma_connect() local 481 delay = 0; in xprt_rdma_connect() 483 delay = xprt_reconnect_delay(xprt); in xprt_rdma_connect() 486 trace_xprtrdma_op_connect(r_xprt, delay); in xprt_rdma_connect() 487 queue_delayed_work(system_long_wq, &r_xprt->rx_connect_worker, delay); in xprt_rdma_connect()
|
| /net/sched/ |
| A D | sch_netem.c | 556 s64 delay; in netem_enqueue() local 558 delay = tabledist(q->latency, q->jitter, in netem_enqueue() 593 delay -= last->time_to_send - now; in netem_enqueue() 594 delay = max_t(s64, 0, delay); in netem_enqueue() 598 delay += packet_time_ns(qdisc_pkt_len(skb), q); in netem_enqueue() 601 cb->time_to_send = now + delay; in netem_enqueue()
|
| A D | sch_cbs.c | 197 s64 delay; in cbs_dequeue_soft() local 199 delay = delay_from_credits(q->credits, q->idleslope); in cbs_dequeue_soft() 200 qdisc_watchdog_schedule_ns(&q->watchdog, now + delay); in cbs_dequeue_soft()
|
| /net/tipc/ |
| A D | crypto.c | 1397 unsigned long delay; in tipc_crypto_key_synch() local 1417 get_random_bytes(&delay, 2); in tipc_crypto_key_synch() 1418 delay %= 5; in tipc_crypto_key_synch() 1419 delay = msecs_to_jiffies(500 * ++delay); in tipc_crypto_key_synch() 1420 if (queue_delayed_work(tx->wq, &rx->work, delay)) in tipc_crypto_key_synch() 2360 unsigned long delay = msecs_to_jiffies(5000); in tipc_crypto_work_rx() local 2403 if (resched && queue_delayed_work(tx->wq, &rx->work, delay)) in tipc_crypto_work_rx() 2418 unsigned long delay; in tipc_crypto_rekeying_sched() local 2430 delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000; in tipc_crypto_rekeying_sched() 2431 queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay)); in tipc_crypto_rekeying_sched()
|
| /net/ipv6/ |
| A D | mcast.c | 1103 unsigned long tv = get_random_u32_below(delay); in mld_ifc_start_work() 1122 unsigned long tv = get_random_u32_below(delay); in mld_dad_start_work() 1153 unsigned long delay = resptime; in igmp6_group_queried() local 1164 delay = ma->mca_work.timer.expires - jiffies; in igmp6_group_queried() 1167 if (delay >= resptime) in igmp6_group_queried() 1168 delay = get_random_u32_below(resptime); in igmp6_group_queried() 1170 if (!mod_delayed_work(mld_wq, &ma->mca_work, delay)) in igmp6_group_queried() 2612 unsigned long delay; in igmp6_join_group() local 2621 delay = get_random_u32_below(unsolicited_report_interval(ma->idev)); in igmp6_join_group() 2625 delay = ma->mca_work.timer.expires - jiffies; in igmp6_join_group() [all …]
|
| /net/802/ |
| A D | garp.c | 409 unsigned long delay; in garp_join_timer_arm() local 411 delay = get_random_u32_below(msecs_to_jiffies(garp_join_time)); in garp_join_timer_arm() 412 mod_timer(&app->join_timer, jiffies + delay); in garp_join_timer_arm()
|