| /tools/perf/util/ |
| A D | cputopo.c | 146 if (!tp) in cpu_topology__delete() 158 free(tp); in cpu_topology__delete() 287 tp = addr; in cpu_topology__new() 312 return tp; in cpu_topology__new() 406 tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr); in numa_topology__new() 407 if (!tp) in numa_topology__new() 424 return tp; in numa_topology__new() 434 free(tp); in numa_topology__delete() 480 tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0]) * nr); in hybrid_topology__new() 481 if (!tp) in hybrid_topology__new() [all …]
|
| A D | cputopo.h | 65 void cpu_topology__delete(struct cpu_topology *tp); 73 void numa_topology__delete(struct numa_topology *tp); 76 void hybrid_topology__delete(struct hybrid_topology *tp);
|
| A D | probe-event.c | 728 tp->realname = tp->symbol; in post_process_probe_trace_point() 732 if (!tp->symbol) in post_process_probe_trace_point() 1991 tp->offset = 0; in parse_probe_trace_command() 2219 if (!tp->module) in synthesize_uprobe_trace_def() 2227 if (!tp->address && (!tp->symbol || strcmp(tp->symbol, "0x0"))) in synthesize_uprobe_trace_def() 2231 err = strbuf_addf(buf, "%s:0x%" PRIx64, tp->module, tp->address); in synthesize_uprobe_trace_def() 2247 tp->module ? ":" : "", tp->address); in synthesize_kprobe_trace_def() 2250 tp->module ? ":" : "", tp->symbol, tp->offset); in synthesize_kprobe_trace_def() 3229 tp->offset = tp->address - reloc_sym->addr; in find_probe_trace_events_from_map() 3332 if (asprintf(&tp->symbol, "0x%" PRIx64, tp->address) < 0) in try_to_find_absolute_address() [all …]
|
| A D | event.c | 545 struct perf_record_text_poke_event *tp = &event->text_poke; in perf_event__fprintf_text_poke() local 549 ret = fprintf(fp, " %" PRI_lx64 " ", tp->addr); in perf_event__fprintf_text_poke() 554 al.map = maps__find(machine__kernel_maps(machine), tp->addr); in perf_event__fprintf_text_poke() 556 al.addr = map__map_ip(al.map, tp->addr); in perf_event__fprintf_text_poke() 563 ret += fprintf(fp, " old len %u new len %u\n", tp->old_len, tp->new_len); in perf_event__fprintf_text_poke() 565 ret += binary__fprintf(tp->bytes, tp->old_len, 16, text_poke_printer, in perf_event__fprintf_text_poke() 568 ret += binary__fprintf(tp->bytes + tp->old_len, tp->new_len, 16, in perf_event__fprintf_text_poke()
|
| A D | header.c | 591 struct cpu_topology *tp; in write_cpu_topology() local 595 tp = cpu_topology__new(); in write_cpu_topology() 596 if (!tp) in write_cpu_topology() 599 ret = do_write(ff, &tp->package_cpus_lists, sizeof(tp->package_cpus_lists)); in write_cpu_topology() 608 ret = do_write(ff, &tp->core_cpus_lists, sizeof(tp->core_cpus_lists)); in write_cpu_topology() 633 if (!tp->die_cpus_lists) in write_cpu_topology() 636 ret = do_write(ff, &tp->die_cpus_lists, sizeof(tp->die_cpus_lists)); in write_cpu_topology() 654 cpu_topology__delete(tp); in write_cpu_topology() 692 struct numa_topology *tp; in write_numa_topology() local 697 if (!tp) in write_numa_topology() [all …]
|
| /tools/testing/selftests/bpf/progs/ |
| A D | bpf_cc_cubic.c | 63 if (tp->snd_cwnd < tp->snd_ssthresh / 2) in tcp_update_pacing_rate() 68 rate *= max(tp->snd_cwnd, tp->packets_out); in tcp_update_pacing_rate() 70 if (tp->srtt_us) in tcp_update_pacing_rate() 79 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_reduction() local 81 __u32 pkts_in_flight = tp->packets_out - (tp->sacked_out + tp->lost_out) + tp->retrans_out; in tcp_cwnd_reduction() 91 (__u64)tp->snd_ssthresh * prr_delivered + tp->prior_cwnd - 1; in tcp_cwnd_reduction() 92 sndcnt = (__u32)div64_u64(dividend, (__u64)tp->prior_cwnd) - tp->prr_out; in tcp_cwnd_reduction() 129 struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() local 136 if (!before(tp->snd_una, tp->high_seq)) { in BPF_PROG() 140 tp->snd_cwnd = tp->snd_ssthresh; in BPF_PROG() [all …]
|
| A D | tcp_ca_write_sk_pacing.c | 13 static unsigned int tcp_left_out(const struct tcp_sock *tp) in tcp_left_out() argument 15 return tp->sacked_out + tp->lost_out; in tcp_left_out() 18 static unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) in tcp_packets_in_flight() argument 20 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; in tcp_packets_in_flight() 38 struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() local 40 ((tp->snd_cwnd * tp->mss_cache * USEC_PER_SEC) << 3) / in BPF_PROG() 41 (tp->srtt_us ?: 1U << 3); in BPF_PROG() 43 tp->app_limited = (tp->delivered + tcp_packets_in_flight(tp)) ?: 1; in BPF_PROG()
|
| A D | bpf_dctcp.c | 61 ca->next_seq = tp->snd_nxt; in dctcp_reset() 63 ca->old_delivered = tp->delivered; in dctcp_reset() 101 ca->prior_rcv_nxt = tp->rcv_nxt; in BPF_PROG() 111 dctcp_reset(tp, ca); in BPF_PROG() 118 struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() local 120 ca->loss_cwnd = tp->snd_cwnd; in BPF_PROG() 121 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); in BPF_PROG() 150 dctcp_reset(tp, ca); in BPF_PROG() 157 struct tcp_sock *tp = tcp_sk(sk); in dctcp_react_to_loss() local 159 ca->loss_cwnd = tp->snd_cwnd; in dctcp_react_to_loss() [all …]
|
| A D | bpf_iter_setsockopt.c | 11 tp = NULL; \ 13 tp = bpf_skc_to_tcp_sock(_skc); \ 14 sk = (struct sock *)tp; \ 16 tp; \ 43 struct tcp_sock *tp; in change_tcp_cc() local 57 if (bpf_getsockopt(tp, SOL_TCP, TCP_CONGESTION, in change_tcp_cc() 67 bpf_setsockopt(tp, SOL_TCP, TCP_CONGESTION, dctcp_cc, sizeof(dctcp_cc)); in change_tcp_cc()
|
| A D | vrf_socket_lookup.c | 21 struct bpf_sock_tuple *tp; in socket_lookup() local 36 tp = (struct bpf_sock_tuple *)&iph->saddr; in socket_lookup() 37 tplen = sizeof(tp->ipv4); in socket_lookup() 38 if ((void *)tp + tplen > data_end) in socket_lookup() 44 sk = bpf_skc_lookup_tcp(ctx, tp, tplen, CUR_NS, 0); in socket_lookup() 46 sk = bpf_sk_lookup_tcp(ctx, tp, tplen, CUR_NS, 0); in socket_lookup() 49 sk = bpf_sk_lookup_udp(ctx, tp, tplen, CUR_NS, 0); in socket_lookup()
|
| A D | bpf_iter_tcp4.c | 74 static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp, in dump_tcp_sock() argument 88 icsk = &tp->inet_conn; in dump_tcp_sock() 118 rx_queue = tp->rcv_nxt - tp->copied_seq; in dump_tcp_sock() 127 tp->write_seq - tp->snd_una, rx_queue, in dump_tcp_sock() 135 tp, in dump_tcp_sock() 139 tp->snd_cwnd, in dump_tcp_sock() 141 : (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) in dump_tcp_sock() 202 struct tcp_sock *tp; in dump_tcp4() local 220 tp = bpf_skc_to_tcp_sock(sk_common); in dump_tcp4() 221 if (tp) in dump_tcp4() [all …]
|
| A D | test_btf_skc_cls_ingress.c | 24 struct tcphdr *th, struct tcp_sock *tp, in test_syncookie_helper() argument 43 mss_cookie = bpf_tcp_gen_syncookie(tp, iphdr, iphdr_size, in test_syncookie_helper() 54 int ret = bpf_tcp_check_syncookie(tp, iphdr, iphdr_size, in test_syncookie_helper() 149 struct tcp_sock *tp; in handle_ip_tcp() local 151 tp = bpf_skc_to_tcp_sock(bpf_skc); in handle_ip_tcp() 152 if (!tp) { in handle_ip_tcp() 157 if (bpf_sk_assign(skb, tp, 0)) { in handle_ip_tcp() 162 listen_tp_sport = tp->inet_conn.icsk_inet.sk.__sk_common.skc_num; in handle_ip_tcp() 164 test_syncookie_helper(iphdr, iphdr_size, th, tp, skb); in handle_ip_tcp() 165 bpf_sk_release(tp); in handle_ip_tcp()
|
| A D | bpf_cubic.c | 171 struct tcp_sock *tp = tcp_sk(sk); in bictcp_hystart_reset() local 175 ca->end_seq = tp->snd_nxt; in bictcp_hystart_reset() 388 struct tcp_sock *tp = tcp_sk(sk); in BPF_PROG() local 394 if (tcp_in_slow_start(tp)) { in BPF_PROG() 397 acked = tcp_slow_start(tp, acked); in BPF_PROG() 401 bictcp_update(ca, tp->snd_cwnd, acked); in BPF_PROG() 402 tcp_cong_avoid_ai(tp, ca->cnt, acked); in BPF_PROG() 418 ca->last_max_cwnd = tp->snd_cwnd; in BPF_PROG() 456 struct tcp_sock *tp = tcp_sk(sk); in hystart_update() local 479 tp->snd_ssthresh = tp->snd_cwnd; in hystart_update() [all …]
|
| A D | bpf_iter_tcp6.c | 88 icsk = &tp->tcp.inet_conn; in dump_tcp6_sock() 118 rx_queue = tp->tcp.rcv_nxt - tp->tcp.copied_seq; in dump_tcp6_sock() 131 tp->tcp.write_seq - tp->tcp.snd_una, rx_queue, in dump_tcp6_sock() 139 tp, in dump_tcp6_sock() 143 tp->tcp.snd_cwnd, in dump_tcp6_sock() 145 : (tcp_in_initial_slowstart(&tp->tcp) ? -1 in dump_tcp6_sock() 146 : tp->tcp.snd_ssthresh) in dump_tcp6_sock() 218 struct tcp6_sock *tp; in dump_tcp6() local 236 tp = bpf_skc_to_tcp6_sock(sk_common); in dump_tcp6() 237 if (tp) in dump_tcp6() [all …]
|
| A D | bpf_tracing_net.h | 164 static inline bool tcp_in_slow_start(const struct tcp_sock *tp) in tcp_in_slow_start() argument 166 return tp->snd_cwnd < tp->snd_ssthresh; in tcp_in_slow_start() 171 const struct tcp_sock *tp = tcp_sk(sk); in tcp_is_cwnd_limited() local 174 if (tcp_in_slow_start(tp)) in tcp_is_cwnd_limited() 175 return tp->snd_cwnd < 2 * tp->max_packets_out; in tcp_is_cwnd_limited() 177 return !!BPF_CORE_READ_BITFIELD(tp, is_cwnd_limited); in tcp_is_cwnd_limited()
|
| A D | test_tcpbpf_kern.c | 20 struct tcp_sock *tp; in get_tp_window_clamp() local 25 tp = bpf_skc_to_tcp_sock(sk); in get_tp_window_clamp() 26 if (!tp) in get_tp_window_clamp() 28 return tp->window_clamp; in get_tp_window_clamp()
|
| A D | test_sock_fields.c | 129 struct bpf_tcp_sock *tp, *tp_ret; in egress_read_sock_fields() local 169 tp = bpf_tcp_sock(sk); in egress_read_sock_fields() 170 if (!tp) in egress_read_sock_fields() 174 tpcpy(tp_ret, tp); in egress_read_sock_fields() 223 struct bpf_tcp_sock *tp; in ingress_read_sock_fields() local 247 tp = bpf_tcp_sock(sk); in ingress_read_sock_fields() 248 if (!tp) in ingress_read_sock_fields() 252 tpcpy(&listen_tp, tp); in ingress_read_sock_fields()
|
| /tools/include/nolibc/sys/ |
| A D | time.h | 16 static int sys_clock_gettime(clockid_t clockid, struct timespec *tp); 30 struct timespec tp; in sys_gettimeofday() 33 ret = sys_clock_gettime(CLOCK_REALTIME, &tp); in sys_gettimeofday() 35 tv->tv_sec = tp.tv_sec; in sys_gettimeofday() 36 tv->tv_usec = tp.tv_nsec / 1000; in sys_gettimeofday()
|
| /tools/testing/selftests/timens/ |
| A D | gettime_perf.c | 49 struct timespec tp, start; in test() local 54 tp = start; in test() 55 for (tp = start; start.tv_sec + timeout > tp.tv_sec || in test() 56 (start.tv_sec + timeout == tp.tv_sec && in test() 57 start.tv_nsec > tp.tv_nsec); i++) { in test() 58 vdso_clock_gettime(clockid, &tp); in test()
|
| /tools/testing/selftests/bpf/prog_tests/ |
| A D | sock_fields.c | 83 static void print_tp(const struct bpf_tcp_sock *tp, const char *prefix) in print_tp() argument 92 tp->snd_cwnd, tp->srtt_us, tp->rtt_min, tp->snd_ssthresh, in print_tp() 93 tp->rcv_nxt, tp->snd_nxt, tp->snd_una, tp->mss_cache, in print_tp() 94 tp->ecn_flags, tp->rate_delivered, tp->rate_interval_us, in print_tp() 95 tp->packets_out, tp->retrans_out, tp->total_retrans, in print_tp() 96 tp->segs_in, tp->data_segs_in, tp->segs_out, in print_tp() 97 tp->data_segs_out, tp->lost_out, tp->sacked_out, in print_tp() 98 tp->bytes_received, tp->bytes_acked); in print_tp()
|
| /tools/include/nolibc/ |
| A D | time.h | 71 return my_syscall2(__NR_clock_gettime, clockid, tp); in sys_clock_gettime() 77 if (tp) in sys_clock_gettime() 78 __nolibc_timespec_kernel_to_user(&ktp, tp); in sys_clock_gettime() 81 return __nolibc_enosys(__func__, clockid, tp); in sys_clock_gettime() 86 int clock_gettime(clockid_t clockid, struct timespec *tp) in clock_gettime() argument 88 return __sysret(sys_clock_gettime(clockid, tp)); in clock_gettime() 95 return my_syscall2(__NR_clock_settime, clockid, tp); in sys_clock_settime() 99 __nolibc_timespec_user_to_kernel(tp, &ktp); in sys_clock_settime() 102 return __nolibc_enosys(__func__, clockid, tp); in sys_clock_settime() 107 int clock_settime(clockid_t clockid, struct timespec *tp) in clock_settime() argument [all …]
|
| /tools/testing/selftests/bpf/benchs/ |
| A D | run_bench_trigger.sh | 8 rawtp tp \
|
| /tools/power/pm-graph/ |
| A D | bootgraph.py | 303 tp = aslib.TestProps() 312 if re.match(tp.stampfmt, line): 313 tp.stamp = line 315 elif re.match(tp.sysinfofmt, line): 316 tp.sysinfo = line 319 tp.cmdline = line 322 tp.kparams = line 368 if tp.stamp: 370 tp.parseStamp(data, sysvals) 392 tp = aslib.TestProps() [all …]
|
| /tools/net/ynl/pyynl/ |
| A D | ethtool.py | 53 field, name, tp = spec 56 tp = 'int' 59 if tp == 'yn': 61 elif tp == 'bool' or isinstance(value, bool):
|
| /tools/include/uapi/linux/ |
| A D | tcp.h | 70 #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) argument
|