| /tools/thermal/lib/ |
| A D | mainloop.c | 26 struct mainloop_data *md; in mainloop() local 45 md = events[i].data.ptr; in mainloop() 47 if (md->cb(md->fd, md->data) > 0) in mainloop() 59 struct mainloop_data *md; in mainloop_add() local 61 md = malloc(sizeof(*md)); in mainloop_add() 62 if (!md) in mainloop_add() 65 md->data = data; in mainloop_add() 66 md->cb = cb; in mainloop_add() 67 md->fd = fd; in mainloop_add() 69 ev.data.ptr = md; in mainloop_add() [all …]
|
| /tools/perf/util/ |
| A D | mmap.c | 332 size = md->core.end - md->core.start; in perf_mmap__push() 334 if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) { in perf_mmap__push() 335 buf = &data[md->core.start & md->core.mask]; in perf_mmap__push() 336 size = md->core.mask + 1 - (md->core.start & md->core.mask); in perf_mmap__push() 337 md->core.start += size; in perf_mmap__push() 345 buf = &data[md->core.start & md->core.mask]; in perf_mmap__push() 346 size = md->core.end - md->core.start; in perf_mmap__push() 347 md->core.start += size; in perf_mmap__push() 349 if (push(md, to, buf, size) < 0) { in perf_mmap__push() 354 md->core.prev = head; in perf_mmap__push() [all …]
|
| A D | mmap.h | 57 int perf_mmap__push(struct mmap *md, void *to,
|
| A D | symbol.c | 1274 struct kcore_mapfn_data *md = data; in kcore_mapfn() local 1280 list_node->map = map__new2(start, md->dso); in kcore_mapfn() 1289 list_add(&list_node->node, &md->maps); in kcore_mapfn() 1309 struct kcore_mapfn_data md; in dso__load_kcore() local 1334 md.dso = dso; in dso__load_kcore() 1335 INIT_LIST_HEAD(&md.maps); in dso__load_kcore() 1351 if (list_empty(&md.maps)) { in dso__load_kcore() 1365 list_for_each_entry(new_node, &md.maps, node) { in dso__load_kcore() 1407 while (!list_empty(&md.maps)) { in dso__load_kcore() 1462 while (!list_empty(&md.maps)) { in dso__load_kcore() [all …]
|
| A D | python.c | 1188 struct mmap *md = &evlist->mmap[i]; in get_md() local 1190 if (md->core.cpu.cpu == cpu) in get_md() 1191 return md; in get_md() 1204 struct mmap *md; in pyrf_evlist__read_on_cpu() local 1211 md = get_md(evlist, cpu); in pyrf_evlist__read_on_cpu() 1212 if (!md) in pyrf_evlist__read_on_cpu() 1215 if (perf_mmap__read_init(&md->core) < 0) in pyrf_evlist__read_on_cpu() 1218 event = perf_mmap__read_event(&md->core); in pyrf_evlist__read_on_cpu() 1236 perf_mmap__consume(&md->core); in pyrf_evlist__read_on_cpu()
|
| /tools/testing/selftests/bpf/progs/ |
| A D | test_tunnel_kern.c | 179 __builtin_memset(&md, 0, sizeof(md)); in erspan_set_tunnel() 181 md.version = 1; in erspan_set_tunnel() 187 md.version = 2; in erspan_set_tunnel() 193 ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md)); in erspan_set_tunnel() 215 ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md)); in erspan_get_tunnel() 258 __builtin_memset(&md, 0, sizeof(md)); in ip4ip6erspan_set_tunnel() 273 ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md)); in ip4ip6erspan_set_tunnel() 296 ret = bpf_skb_get_tunnel_opt(skb, &md, sizeof(md)); in ip4ip6erspan_get_tunnel() 349 ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md)); in vxlan_set_tunnel_dst() 388 ret = bpf_skb_set_tunnel_opt(skb, &md, sizeof(md)); in vxlan_set_tunnel_src() [all …]
|
| /tools/lib/perf/ |
| A D | mmap.c | 85 ring_buffer_write_tail(md->base, tail); in perf_mmap__write_tail() 149 u64 head = perf_mmap__read_head(md); in __perf_mmap__read_init() 150 u64 old = md->prev; in __perf_mmap__read_init() 154 md->start = md->overwrite ? head : old; in __perf_mmap__read_init() 155 md->end = md->overwrite ? old : head; in __perf_mmap__read_init() 157 if ((md->end - md->start) < md->flush) in __perf_mmap__read_init() 160 size = md->end - md->start; in __perf_mmap__read_init() 162 if (!md->overwrite) { in __perf_mmap__read_init() 165 md->prev = head; in __perf_mmap__read_init() 166 perf_mmap__consume(md); in __perf_mmap__read_init() [all …]
|
| /tools/perf/tests/ |
| A D | sw-clock.c | 48 struct mmap *md; in __test__sw_clock_freq() local 100 md = &evlist->mmap[0]; in __test__sw_clock_freq() 101 if (perf_mmap__read_init(&md->core) < 0) in __test__sw_clock_freq() 104 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in __test__sw_clock_freq() 121 perf_mmap__consume(&md->core); in __test__sw_clock_freq() 124 perf_mmap__read_done(&md->core); in __test__sw_clock_freq()
|
| A D | openat-syscall-tp-fields.c | 95 struct mmap *md; in test__syscall_openat_tp_fields() local 97 md = &evlist->mmap[i]; in test__syscall_openat_tp_fields() 98 if (perf_mmap__read_init(&md->core) < 0) in test__syscall_openat_tp_fields() 101 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in test__syscall_openat_tp_fields() 109 perf_mmap__consume(&md->core); in test__syscall_openat_tp_fields() 131 perf_mmap__read_done(&md->core); in test__syscall_openat_tp_fields()
|
| A D | task-exit.c | 55 struct mmap *md; in test__task_exit() local 117 md = &evlist->mmap[0]; in test__task_exit() 118 if (perf_mmap__read_init(&md->core) < 0) in test__task_exit() 121 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in test__task_exit() 125 perf_mmap__consume(&md->core); in test__task_exit() 127 perf_mmap__read_done(&md->core); in test__task_exit()
|
| A D | keep-tracking.c | 36 struct mmap *md; in find_comm() local 41 md = &evlist->mmap[i]; in find_comm() 42 if (perf_mmap__read_init(&md->core) < 0) in find_comm() 44 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in find_comm() 50 perf_mmap__consume(&md->core); in find_comm() 52 perf_mmap__read_done(&md->core); in find_comm()
|
| A D | perf-time-to-tsc.c | 90 struct mmap *md; in test__perf_time_to_tsc() local 149 md = &evlist->mmap[i]; in test__perf_time_to_tsc() 150 if (perf_mmap__read_init(&md->core) < 0) in test__perf_time_to_tsc() 153 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in test__perf_time_to_tsc() 173 perf_mmap__consume(&md->core); in test__perf_time_to_tsc() 176 perf_mmap__read_done(&md->core); in test__perf_time_to_tsc()
|
| A D | perf-record.c | 171 struct mmap *md; in test__PERF_RECORD() local 173 md = &evlist->mmap[i]; in test__PERF_RECORD() 174 if (perf_mmap__read_init(&md->core) < 0) in test__PERF_RECORD() 177 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in test__PERF_RECORD() 280 perf_mmap__consume(&md->core); in test__PERF_RECORD() 282 perf_mmap__read_done(&md->core); in test__PERF_RECORD()
|
| A D | mmap-basic.c | 50 struct mmap *md; in test__basic_mmap() local 123 md = &evlist->mmap[0]; in test__basic_mmap() 124 if (perf_mmap__read_init(&md->core) < 0) in test__basic_mmap() 127 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in test__basic_mmap() 153 perf_mmap__consume(&md->core); in test__basic_mmap() 155 perf_mmap__read_done(&md->core); in test__basic_mmap()
|
| A D | switch-tracking.c | 277 struct mmap *md; in process_events() local 281 md = &evlist->mmap[i]; in process_events() 282 if (perf_mmap__read_init(&md->core) < 0) in process_events() 285 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in process_events() 288 perf_mmap__consume(&md->core); in process_events() 292 perf_mmap__read_done(&md->core); in process_events()
|
| A D | code-reading.c | 531 struct mmap *md; in process_events() local 535 md = &evlist->mmap[i]; in process_events() 536 if (perf_mmap__read_init(&md->core) < 0) in process_events() 539 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in process_events() 541 perf_mmap__consume(&md->core); in process_events() 545 perf_mmap__read_done(&md->core); in process_events()
|
| /tools/lib/bpf/ |
| A D | netlink.c | 411 struct xdp_features_md *md = cookie; in parse_xdp_features() local 421 if (ifindex != md->ifindex) in parse_xdp_features() 424 md->flags = libbpf_nla_getattr_u64(tb[NETDEV_A_DEV_XDP_FEATURES]); in parse_xdp_features() 426 md->xdp_zc_max_segs = in parse_xdp_features() 440 struct xdp_features_md md = { in bpf_xdp_query() local 495 parse_xdp_features, NULL, &md); in bpf_xdp_query() 499 OPTS_SET(opts, feature_flags, md.flags); in bpf_xdp_query() 500 OPTS_SET(opts, xdp_zc_max_segs, md.xdp_zc_max_segs); in bpf_xdp_query()
|
| /tools/perf/ |
| A D | builtin-top.c | 882 struct mmap *md; in perf_top__mmap_read_idx() local 885 md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx]; in perf_top__mmap_read_idx() 886 if (perf_mmap__read_init(&md->core) < 0) in perf_top__mmap_read_idx() 889 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in perf_top__mmap_read_idx() 900 perf_mmap__consume(&md->core); in perf_top__mmap_read_idx() 910 perf_mmap__read_done(&md->core); in perf_top__mmap_read_idx()
|
| A D | builtin-kvm.c | 1213 struct mmap *md; in perf_kvm__mmap_read_idx() local 1219 md = &evlist->mmap[idx]; in perf_kvm__mmap_read_idx() 1220 err = perf_mmap__read_init(&md->core); in perf_kvm__mmap_read_idx() 1224 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in perf_kvm__mmap_read_idx() 1227 perf_mmap__consume(&md->core); in perf_kvm__mmap_read_idx() 1237 perf_mmap__consume(&md->core); in perf_kvm__mmap_read_idx() 1254 perf_mmap__read_done(&md->core); in perf_kvm__mmap_read_idx()
|
| A D | builtin-record.c | 317 static int record__aio_complete(struct mmap *md, struct aiocb *cblock) in record__aio_complete() argument 345 perf_mmap__put(&md->core); in record__aio_complete() 363 static int record__aio_sync(struct mmap *md, bool sync_all) in record__aio_sync() argument 365 struct aiocb **aiocb = md->aio.aiocb; in record__aio_sync() 366 struct aiocb *cblocks = md->aio.cblocks; in record__aio_sync() 372 for (i = 0; i < md->aio.nr_cblocks; ++i) { in record__aio_sync() 373 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) { in record__aio_sync() 391 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) { in record__aio_sync()
|
| A D | builtin-trace.c | 4546 struct mmap *md; in trace__run() local 4548 md = &evlist->mmap[i]; in trace__run() 4549 if (perf_mmap__read_init(&md->core) < 0) in trace__run() 4552 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in trace__run() 4559 perf_mmap__consume(&md->core); in trace__run() 4569 perf_mmap__read_done(&md->core); in trace__run()
|
| /tools/testing/selftests/seccomp/ |
| A D | seccomp_bpf.c | 3287 struct seccomp_metadata md; in TEST() local 3330 md.filter_off = 0; in TEST() 3332 ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md); in TEST() 3333 EXPECT_EQ(sizeof(md), ret) { in TEST() 3338 EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG); in TEST() 3339 EXPECT_EQ(md.filter_off, 0); in TEST() 3341 md.filter_off = 1; in TEST() 3342 ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md); in TEST() 3343 EXPECT_EQ(sizeof(md), ret); in TEST() 3344 EXPECT_EQ(md.flags, 0); in TEST() [all …]
|
| /tools/memory-model/ |
| A D | README | 28 See "herdtools7/INSTALL.md" for installation instructions.
|
| /tools/power/pm-graph/ |
| A D | sleepgraph.py | 2547 md = self.sv.max_graph_depth 2550 if (md and self.depth >= md - 1) or (line.name in self.sv.cgblacklist): 2558 (md and last and last.depth >= md) or \
|