Lines Matching refs:env
27 bool perf_env__insert_bpf_prog_info(struct perf_env *env, in perf_env__insert_bpf_prog_info() argument
32 down_write(&env->bpf_progs.lock); in perf_env__insert_bpf_prog_info()
33 ret = __perf_env__insert_bpf_prog_info(env, info_node); in perf_env__insert_bpf_prog_info()
34 up_write(&env->bpf_progs.lock); in perf_env__insert_bpf_prog_info()
39 bool __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node) in __perf_env__insert_bpf_prog_info() argument
46 p = &env->bpf_progs.infos.rb_node; in __perf_env__insert_bpf_prog_info()
62 rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos); in __perf_env__insert_bpf_prog_info()
63 env->bpf_progs.infos_cnt++; in __perf_env__insert_bpf_prog_info()
67 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env, in perf_env__find_bpf_prog_info() argument
73 down_read(&env->bpf_progs.lock); in perf_env__find_bpf_prog_info()
74 n = env->bpf_progs.infos.rb_node; in perf_env__find_bpf_prog_info()
88 up_read(&env->bpf_progs.lock); in perf_env__find_bpf_prog_info()
92 void perf_env__iterate_bpf_prog_info(struct perf_env *env, in perf_env__iterate_bpf_prog_info() argument
99 down_read(&env->bpf_progs.lock); in perf_env__iterate_bpf_prog_info()
100 first = rb_first(&env->bpf_progs.infos); in perf_env__iterate_bpf_prog_info()
103 up_read(&env->bpf_progs.lock); in perf_env__iterate_bpf_prog_info()
106 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node) in perf_env__insert_btf() argument
110 down_write(&env->bpf_progs.lock); in perf_env__insert_btf()
111 ret = __perf_env__insert_btf(env, btf_node); in perf_env__insert_btf()
112 up_write(&env->bpf_progs.lock); in perf_env__insert_btf()
116 bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node) in __perf_env__insert_btf() argument
123 p = &env->bpf_progs.btfs.rb_node; in __perf_env__insert_btf()
139 rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs); in __perf_env__insert_btf()
140 env->bpf_progs.btfs_cnt++; in __perf_env__insert_btf()
144 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id) in perf_env__find_btf() argument
148 down_read(&env->bpf_progs.lock); in perf_env__find_btf()
149 res = __perf_env__find_btf(env, btf_id); in perf_env__find_btf()
150 up_read(&env->bpf_progs.lock); in perf_env__find_btf()
154 struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id) in __perf_env__find_btf() argument
159 n = env->bpf_progs.btfs.rb_node; in __perf_env__find_btf()
174 static void perf_env__purge_bpf(struct perf_env *env) in perf_env__purge_bpf() argument
179 down_write(&env->bpf_progs.lock); in perf_env__purge_bpf()
181 root = &env->bpf_progs.infos; in perf_env__purge_bpf()
195 env->bpf_progs.infos_cnt = 0; in perf_env__purge_bpf()
197 root = &env->bpf_progs.btfs; in perf_env__purge_bpf()
209 env->bpf_progs.btfs_cnt = 0; in perf_env__purge_bpf()
211 up_write(&env->bpf_progs.lock); in perf_env__purge_bpf()
214 static void perf_env__purge_bpf(struct perf_env *env __maybe_unused) in perf_env__purge_bpf()
219 void perf_env__exit(struct perf_env *env) in perf_env__exit() argument
223 perf_env__purge_bpf(env); in perf_env__exit()
224 perf_env__purge_cgroups(env); in perf_env__exit()
225 zfree(&env->hostname); in perf_env__exit()
226 zfree(&env->os_release); in perf_env__exit()
227 zfree(&env->version); in perf_env__exit()
228 zfree(&env->arch); in perf_env__exit()
229 zfree(&env->cpu_desc); in perf_env__exit()
230 zfree(&env->cpuid); in perf_env__exit()
231 zfree(&env->cmdline); in perf_env__exit()
232 zfree(&env->cmdline_argv); in perf_env__exit()
233 zfree(&env->sibling_dies); in perf_env__exit()
234 zfree(&env->sibling_cores); in perf_env__exit()
235 zfree(&env->sibling_threads); in perf_env__exit()
236 zfree(&env->pmu_mappings); in perf_env__exit()
237 zfree(&env->cpu); in perf_env__exit()
238 for (i = 0; i < env->nr_cpu_pmu_caps; i++) in perf_env__exit()
239 zfree(&env->cpu_pmu_caps[i]); in perf_env__exit()
240 zfree(&env->cpu_pmu_caps); in perf_env__exit()
241 zfree(&env->numa_map); in perf_env__exit()
243 for (i = 0; i < env->nr_numa_nodes; i++) in perf_env__exit()
244 perf_cpu_map__put(env->numa_nodes[i].map); in perf_env__exit()
245 zfree(&env->numa_nodes); in perf_env__exit()
247 for (i = 0; i < env->caches_cnt; i++) in perf_env__exit()
248 cpu_cache_level__free(&env->caches[i]); in perf_env__exit()
249 zfree(&env->caches); in perf_env__exit()
251 for (i = 0; i < env->nr_memory_nodes; i++) in perf_env__exit()
252 zfree(&env->memory_nodes[i].set); in perf_env__exit()
253 zfree(&env->memory_nodes); in perf_env__exit()
255 for (i = 0; i < env->nr_hybrid_nodes; i++) { in perf_env__exit()
256 zfree(&env->hybrid_nodes[i].pmu_name); in perf_env__exit()
257 zfree(&env->hybrid_nodes[i].cpus); in perf_env__exit()
259 zfree(&env->hybrid_nodes); in perf_env__exit()
261 for (i = 0; i < env->nr_pmus_with_caps; i++) { in perf_env__exit()
262 for (j = 0; j < env->pmu_caps[i].nr_caps; j++) in perf_env__exit()
263 zfree(&env->pmu_caps[i].caps[j]); in perf_env__exit()
264 zfree(&env->pmu_caps[i].caps); in perf_env__exit()
265 zfree(&env->pmu_caps[i].pmu_name); in perf_env__exit()
267 zfree(&env->pmu_caps); in perf_env__exit()
270 void perf_env__init(struct perf_env *env) in perf_env__init() argument
272 memset(env, 0, sizeof(*env)); in perf_env__init()
274 env->bpf_progs.infos = RB_ROOT; in perf_env__init()
275 env->bpf_progs.btfs = RB_ROOT; in perf_env__init()
276 init_rwsem(&env->bpf_progs.lock); in perf_env__init()
278 env->kernel_is_64_bit = -1; in perf_env__init()
281 static void perf_env__init_kernel_mode(struct perf_env *env) in perf_env__init_kernel_mode() argument
283 const char *arch = perf_env__raw_arch(env); in perf_env__init_kernel_mode()
289 env->kernel_is_64_bit = 1; in perf_env__init_kernel_mode()
291 env->kernel_is_64_bit = 0; in perf_env__init_kernel_mode()
294 int perf_env__kernel_is_64_bit(struct perf_env *env) in perf_env__kernel_is_64_bit() argument
296 if (env->kernel_is_64_bit == -1) in perf_env__kernel_is_64_bit()
297 perf_env__init_kernel_mode(env); in perf_env__kernel_is_64_bit()
299 return env->kernel_is_64_bit; in perf_env__kernel_is_64_bit()
302 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]) in perf_env__set_cmdline() argument
307 env->cmdline_argv = calloc(argc, sizeof(char *)); in perf_env__set_cmdline()
308 if (env->cmdline_argv == NULL) in perf_env__set_cmdline()
316 env->cmdline_argv[i] = argv[i]; in perf_env__set_cmdline()
317 if (env->cmdline_argv[i] == NULL) in perf_env__set_cmdline()
321 env->nr_cmdline = argc; in perf_env__set_cmdline()
325 zfree(&env->cmdline_argv); in perf_env__set_cmdline()
330 int perf_env__read_cpu_topology_map(struct perf_env *env) in perf_env__read_cpu_topology_map() argument
334 if (env->cpu != NULL) in perf_env__read_cpu_topology_map()
337 if (env->nr_cpus_avail == 0) in perf_env__read_cpu_topology_map()
338 env->nr_cpus_avail = cpu__max_present_cpu().cpu; in perf_env__read_cpu_topology_map()
340 nr_cpus = env->nr_cpus_avail; in perf_env__read_cpu_topology_map()
344 env->cpu = calloc(nr_cpus, sizeof(env->cpu[0])); in perf_env__read_cpu_topology_map()
345 if (env->cpu == NULL) in perf_env__read_cpu_topology_map()
354 env->cpu[idx].core_id = core_id >= 0 ? core_id : -1; in perf_env__read_cpu_topology_map()
355 env->cpu[idx].socket_id = socket_id >= 0 ? socket_id : -1; in perf_env__read_cpu_topology_map()
356 env->cpu[idx].die_id = die_id >= 0 ? die_id : -1; in perf_env__read_cpu_topology_map()
359 env->nr_cpus_avail = nr_cpus; in perf_env__read_cpu_topology_map()
363 int perf_env__read_pmu_mappings(struct perf_env *env) in perf_env__read_pmu_mappings() argument
376 env->nr_pmu_mappings = pmu_num; in perf_env__read_pmu_mappings()
389 env->pmu_mappings = strbuf_detach(&sb, NULL); in perf_env__read_pmu_mappings()
398 int perf_env__read_cpuid(struct perf_env *env) in perf_env__read_cpuid() argument
407 free(env->cpuid); in perf_env__read_cpuid()
408 env->cpuid = strdup(cpuid); in perf_env__read_cpuid()
409 if (env->cpuid == NULL) in perf_env__read_cpuid()
414 static int perf_env__read_arch(struct perf_env *env) in perf_env__read_arch() argument
418 if (env->arch) in perf_env__read_arch()
422 env->arch = strdup(uts.machine); in perf_env__read_arch()
424 return env->arch ? 0 : -ENOMEM; in perf_env__read_arch()
427 static int perf_env__read_nr_cpus_avail(struct perf_env *env) in perf_env__read_nr_cpus_avail() argument
429 if (env->nr_cpus_avail == 0) in perf_env__read_nr_cpus_avail()
430 env->nr_cpus_avail = cpu__max_present_cpu().cpu; in perf_env__read_nr_cpus_avail()
432 return env->nr_cpus_avail ? 0 : -ENOENT; in perf_env__read_nr_cpus_avail()
481 int perf_env__read_core_pmu_caps(struct perf_env *env) in perf_env__read_core_pmu_caps() argument
500 return __perf_env__read_core_pmu_caps(pmu, &env->nr_cpu_pmu_caps, in perf_env__read_core_pmu_caps()
501 &env->cpu_pmu_caps, in perf_env__read_core_pmu_caps()
502 &env->max_branches, in perf_env__read_core_pmu_caps()
503 &env->br_cntr_nr, in perf_env__read_core_pmu_caps()
504 &env->br_cntr_width); in perf_env__read_core_pmu_caps()
530 env->nr_pmus_with_caps = nr_pmu; in perf_env__read_core_pmu_caps()
531 env->pmu_caps = pmu_caps; in perf_env__read_core_pmu_caps()
545 const char *perf_env__raw_arch(struct perf_env *env) in perf_env__raw_arch() argument
547 return env && !perf_env__read_arch(env) ? env->arch : "unknown"; in perf_env__raw_arch()
550 int perf_env__nr_cpus_avail(struct perf_env *env) in perf_env__nr_cpus_avail() argument
552 return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0; in perf_env__nr_cpus_avail()
594 const char *perf_env__arch(struct perf_env *env) in perf_env__arch() argument
598 if (!env || !env->arch) { /* Assume local operation */ in perf_env__arch()
604 arch_name = env->arch; in perf_env__arch()
613 const char *perf_env__arch_strerrno(struct perf_env *env __maybe_unused, int err __maybe_unused) in perf_env__arch_strerrno()
616 if (env->arch_strerrno == NULL) in perf_env__arch_strerrno()
617 env->arch_strerrno = arch_syscalls__strerrno_function(perf_env__arch(env)); in perf_env__arch_strerrno()
619 return env->arch_strerrno ? env->arch_strerrno(err) : "no arch specific strerrno function"; in perf_env__arch_strerrno()
625 const char *perf_env__cpuid(struct perf_env *env) in perf_env__cpuid() argument
629 if (!env->cpuid) { /* Assume local operation */ in perf_env__cpuid()
630 status = perf_env__read_cpuid(env); in perf_env__cpuid()
635 return env->cpuid; in perf_env__cpuid()
638 int perf_env__nr_pmu_mappings(struct perf_env *env) in perf_env__nr_pmu_mappings() argument
642 if (!env->nr_pmu_mappings) { /* Assume local operation */ in perf_env__nr_pmu_mappings()
643 status = perf_env__read_pmu_mappings(env); in perf_env__nr_pmu_mappings()
648 return env->nr_pmu_mappings; in perf_env__nr_pmu_mappings()
651 const char *perf_env__pmu_mappings(struct perf_env *env) in perf_env__pmu_mappings() argument
655 if (!env->pmu_mappings) { /* Assume local operation */ in perf_env__pmu_mappings()
656 status = perf_env__read_pmu_mappings(env); in perf_env__pmu_mappings()
661 return env->pmu_mappings; in perf_env__pmu_mappings()
664 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu) in perf_env__numa_node() argument
666 if (!env->nr_numa_map) { in perf_env__numa_node()
670 for (i = 0; i < env->nr_numa_nodes; i++) { in perf_env__numa_node()
671 nn = &env->numa_nodes[i]; in perf_env__numa_node()
681 env->numa_map = malloc(nr * sizeof(int)); in perf_env__numa_node()
682 if (!env->numa_map) in perf_env__numa_node()
686 env->numa_map[i] = -1; in perf_env__numa_node()
688 env->nr_numa_map = nr; in perf_env__numa_node()
690 for (i = 0; i < env->nr_numa_nodes; i++) { in perf_env__numa_node()
694 nn = &env->numa_nodes[i]; in perf_env__numa_node()
696 env->numa_map[tmp.cpu] = i; in perf_env__numa_node()
700 return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1; in perf_env__numa_node()
703 bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name) in perf_env__has_pmu_mapping() argument
705 char *pmu_mapping = env->pmu_mappings, *colon; in perf_env__has_pmu_mapping()
707 for (int i = 0; i < env->nr_pmu_mappings; ++i) { in perf_env__has_pmu_mapping()
721 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name, in perf_env__find_pmu_cap() argument
741 for (i = 0; i < env->nr_cpu_pmu_caps; i++) { in perf_env__find_pmu_cap()
742 if (!strncmp(env->cpu_pmu_caps[i], cap_eq, cap_size + 1)) { in perf_env__find_pmu_cap()
744 return &env->cpu_pmu_caps[i][cap_size + 1]; in perf_env__find_pmu_cap()
750 for (i = 0; i < env->nr_pmus_with_caps; i++) { in perf_env__find_pmu_cap()
751 if (strcmp(env->pmu_caps[i].pmu_name, pmu_name)) in perf_env__find_pmu_cap()
754 ptr = env->pmu_caps[i].caps; in perf_env__find_pmu_cap()
756 for (j = 0; j < env->pmu_caps[i].nr_caps; j++) { in perf_env__find_pmu_cap()
769 void perf_env__find_br_cntr_info(struct perf_env *env, in perf_env__find_br_cntr_info() argument
774 *nr = env->cpu_pmu_caps ? env->br_cntr_nr : in perf_env__find_br_cntr_info()
775 env->pmu_caps->br_cntr_nr; in perf_env__find_br_cntr_info()
779 *width = env->cpu_pmu_caps ? env->br_cntr_width : in perf_env__find_br_cntr_info()
780 env->pmu_caps->br_cntr_width; in perf_env__find_br_cntr_info()
784 bool perf_env__is_x86_amd_cpu(struct perf_env *env) in perf_env__is_x86_amd_cpu() argument
789 is_amd = env->cpuid && strstarts(env->cpuid, "AuthenticAMD") ? 1 : -1; in perf_env__is_x86_amd_cpu()
796 struct perf_env env = { .total_mem = 0, }; in x86__is_amd_cpu() local
799 perf_env__cpuid(&env); in x86__is_amd_cpu()
800 is_amd = perf_env__is_x86_amd_cpu(&env); in x86__is_amd_cpu()
801 perf_env__exit(&env); in x86__is_amd_cpu()