Lines Matching refs:env
560 struct perf_env *env = &ff->ph->env; in write_cmdline() local
568 n = env->nr_cmdline + 1; in write_cmdline()
578 for (i = 0 ; i < env->nr_cmdline; i++) { in write_cmdline()
579 ret = do_write_string(ff, env->cmdline_argv[i]); in write_cmdline()
590 struct perf_env *env = &ff->ph->env; in write_cpu_topology() local
618 ret = perf_env__read_cpu_topology_map(env); in write_cpu_topology()
622 for (j = 0; j < env->nr_cpus_avail; j++) { in write_cpu_topology()
623 ret = do_write(ff, &env->cpu[j].core_id, in write_cpu_topology()
624 sizeof(env->cpu[j].core_id)); in write_cpu_topology()
627 ret = do_write(ff, &env->cpu[j].socket_id, in write_cpu_topology()
628 sizeof(env->cpu[j].socket_id)); in write_cpu_topology()
646 for (j = 0; j < env->nr_cpus_avail; j++) { in write_cpu_topology()
647 ret = do_write(ff, &env->cpu[j].die_id, in write_cpu_topology()
648 sizeof(env->cpu[j].die_id)); in write_cpu_topology()
927 return do_write(ff, &ff->ph->env.clock.clockid_res_ns, in write_clockid()
928 sizeof(ff->ph->env.clock.clockid_res_ns)); in write_clockid()
946 data32 = ff->ph->env.clock.clockid; in write_clock_data()
953 data64 = &ff->ph->env.clock.tod_ns; in write_clock_data()
960 data64 = &ff->ph->env.clock.clockid_ns; in write_clock_data()
1018 struct perf_env *env = &ff->ph->env; in write_bpf_prog_info() local
1023 down_read(&env->bpf_progs.lock); in write_bpf_prog_info()
1025 if (env->bpf_progs.infos_cnt == 0) in write_bpf_prog_info()
1028 ret = do_write(ff, &env->bpf_progs.infos_cnt, in write_bpf_prog_info()
1029 sizeof(env->bpf_progs.infos_cnt)); in write_bpf_prog_info()
1033 root = &env->bpf_progs.infos; in write_bpf_prog_info()
1056 up_read(&env->bpf_progs.lock); in write_bpf_prog_info()
1063 struct perf_env *env = &ff->ph->env; in write_bpf_btf() local
1068 down_read(&env->bpf_progs.lock); in write_bpf_btf()
1070 if (env->bpf_progs.btfs_cnt == 0) in write_bpf_btf()
1073 ret = do_write(ff, &env->bpf_progs.btfs_cnt, in write_bpf_btf()
1074 sizeof(env->bpf_progs.btfs_cnt)); in write_bpf_btf()
1079 root = &env->bpf_progs.btfs; in write_bpf_btf()
1092 up_read(&env->bpf_progs.lock); in write_bpf_btf()
1513 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver)); in write_compressed()
1517 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type)); in write_compressed()
1521 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level)); in write_compressed()
1525 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio)); in write_compressed()
1529 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len)); in write_compressed()
1626 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname); in print_hostname()
1631 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release); in print_osrelease()
1636 fprintf(fp, "# arch : %s\n", ff->ph->env.arch); in print_arch()
1641 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc); in print_cpudesc()
1646 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online); in print_nrcpus()
1647 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail); in print_nrcpus()
1652 fprintf(fp, "# perf version : %s\n", ff->ph->env.version); in print_version()
1659 nr = ff->ph->env.nr_cmdline; in print_cmdline()
1664 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]); in print_cmdline()
1666 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]); in print_cmdline()
1687 int cpu_nr = ph->env.nr_cpus_avail; in print_cpu_topology()
1691 nr = ph->env.nr_sibling_cores; in print_cpu_topology()
1692 str = ph->env.sibling_cores; in print_cpu_topology()
1699 if (ph->env.nr_sibling_dies) { in print_cpu_topology()
1700 nr = ph->env.nr_sibling_dies; in print_cpu_topology()
1701 str = ph->env.sibling_dies; in print_cpu_topology()
1709 nr = ph->env.nr_sibling_threads; in print_cpu_topology()
1710 str = ph->env.sibling_threads; in print_cpu_topology()
1717 if (ph->env.nr_sibling_dies) { in print_cpu_topology()
1718 if (ph->env.cpu != NULL) { in print_cpu_topology()
1722 i, ph->env.cpu[i].core_id, in print_cpu_topology()
1723 ph->env.cpu[i].die_id, in print_cpu_topology()
1724 ph->env.cpu[i].socket_id); in print_cpu_topology()
1729 if (ph->env.cpu != NULL) { in print_cpu_topology()
1733 i, ph->env.cpu[i].core_id, in print_cpu_topology()
1734 ph->env.cpu[i].socket_id); in print_cpu_topology()
1744 ff->ph->env.clock.clockid_res_ns * 1000); in print_clockid()
1756 if (!ff->ph->env.clock.enabled) { in print_clock_data()
1762 ref = ff->ph->env.clock.tod_ns; in print_clock_data()
1768 ref = ff->ph->env.clock.clockid_ns; in print_clock_data()
1773 clockid = ff->ph->env.clock.clockid; in print_clock_data()
1796 for (i = 0; i < ff->ph->env.nr_hybrid_nodes; i++) { in print_hybrid_topology()
1797 n = &ff->ph->env.hybrid_nodes[i]; in print_hybrid_topology()
1816 struct perf_env *env = &ff->ph->env; in print_bpf_prog_info() local
1820 down_read(&env->bpf_progs.lock); in print_bpf_prog_info()
1822 root = &env->bpf_progs.infos; in print_bpf_prog_info()
1835 env, fp); in print_bpf_prog_info()
1838 up_read(&env->bpf_progs.lock); in print_bpf_prog_info()
1843 struct perf_env *env = &ff->ph->env; in print_bpf_btf() local
1847 down_read(&env->bpf_progs.lock); in print_bpf_btf()
1849 root = &env->bpf_progs.btfs; in print_bpf_btf()
1863 up_read(&env->bpf_progs.lock); in print_bpf_btf()
2043 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem); in print_total_mem()
2051 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) { in print_numa_topology()
2052 n = &ff->ph->env.numa_nodes[i]; in print_numa_topology()
2065 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid); in print_cpuid()
2088 for (i = 0; i < ff->ph->env.caches_cnt; i++) { in print_cache()
2090 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]); in print_cache()
2097 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown", in print_compressed()
2098 ff->ph->env.comp_level, ff->ph->env.comp_ratio); in print_compressed()
2122 __print_pmu_caps(fp, ff->ph->env.nr_cpu_pmu_caps, in print_cpu_pmu_caps()
2123 ff->ph->env.cpu_pmu_caps, (char *)"cpu"); in print_cpu_pmu_caps()
2128 struct perf_env *env = &ff->ph->env; in print_pmu_caps() local
2131 for (int i = 0; i < env->nr_pmus_with_caps; i++) { in print_pmu_caps()
2132 pmu_caps = &env->pmu_caps[i]; in print_pmu_caps()
2137 if (strcmp(perf_env__arch(env), "x86") == 0 && in print_pmu_caps()
2138 perf_env__has_pmu_mapping(env, "ibs_op")) { in print_pmu_caps()
2139 char *max_precise = perf_env__find_pmu_cap(env, "cpu", "max_precise"); in print_pmu_caps()
2148 struct perf_env *env = &ff->ph->env; in print_pmu_mappings() local
2154 pmu_num = env->nr_pmu_mappings; in print_pmu_mappings()
2160 str = env->pmu_mappings; in print_pmu_mappings()
2242 struct perf_env *env = &ff->ph->env; in print_mem_topology() local
2246 nodes = env->memory_nodes; in print_mem_topology()
2247 nr = env->nr_memory_nodes; in print_mem_topology()
2250 nr, env->memory_bsize); in print_mem_topology()
2253 memory_node__fprintf(&nodes[i], env->memory_bsize, fp); in print_mem_topology()
2421 free(ff->ph->env.__feat_env); \
2422 ff->ph->env.__feat_env = do_read_string(ff); \
2423 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
2451 struct perf_env *env = &ff->ph->env; in process_nrcpus() local
2462 env->nr_cpus_avail = (int)nr_cpus_avail; in process_nrcpus()
2463 env->nr_cpus_online = (int)nr_cpus_online; in process_nrcpus()
2469 struct perf_env *env = &ff->ph->env; in process_total_mem() local
2476 env->total_mem = (unsigned long long)total_mem; in process_total_mem()
2537 struct perf_env *env = &ff->ph->env; in process_cmdline() local
2544 env->nr_cmdline = nr; in process_cmdline()
2564 env->cmdline = cmdline; in process_cmdline()
2565 env->cmdline_argv = (const char **) argv; in process_cmdline()
2579 struct perf_env *env = &ff->ph->env; in process_cpu_topology() local
2580 int cpu_nr = env->nr_cpus_avail; in process_cpu_topology()
2583 env->cpu = calloc(cpu_nr, sizeof(*env->cpu)); in process_cpu_topology()
2584 if (!env->cpu) in process_cpu_topology()
2590 env->nr_sibling_cores = nr; in process_cpu_topology()
2606 env->sibling_cores = strbuf_detach(&sb, NULL); in process_cpu_topology()
2611 env->nr_sibling_threads = nr; in process_cpu_topology()
2625 env->sibling_threads = strbuf_detach(&sb, NULL); in process_cpu_topology()
2632 zfree(&env->cpu); in process_cpu_topology()
2640 env->cpu[i].core_id = nr; in process_cpu_topology()
2646 env->cpu[i].socket_id = nr; in process_cpu_topology()
2660 env->nr_sibling_dies = nr; in process_cpu_topology()
2674 env->sibling_dies = strbuf_detach(&sb, NULL); in process_cpu_topology()
2680 env->cpu[i].die_id = nr; in process_cpu_topology()
2689 zfree(&env->cpu); in process_cpu_topology()
2695 struct perf_env *env = &ff->ph->env; in process_numa_topology() local
2730 env->nr_numa_nodes = nr; in process_numa_topology()
2731 env->numa_nodes = nodes; in process_numa_topology()
2741 struct perf_env *env = &ff->ph->env; in process_pmu_mappings() local
2755 env->nr_pmu_mappings = pmu_num; in process_pmu_mappings()
2774 env->msr_pmu_type = type; in process_pmu_mappings()
2780 free(env->pmu_mappings); in process_pmu_mappings()
2781 env->pmu_mappings = strbuf_detach(&sb, NULL); in process_pmu_mappings()
2791 struct perf_env *env = &ff->ph->env; in process_group_desc() local
2805 env->nr_groups = nr_groups; in process_group_desc()
2889 struct perf_env *env = &ff->ph->env; in process_cache() local
2930 env->caches = caches; in process_cache()
2931 env->caches_cnt = cnt; in process_cache()
2967 struct perf_env *env = &ff->ph->env; in process_mem_topology() local
3006 env->memory_bsize = bsize; in process_mem_topology()
3007 env->memory_nodes = nodes; in process_mem_topology()
3008 env->nr_memory_nodes = nr; in process_mem_topology()
3020 struct perf_env *env = &ff->ph->env; in process_clockid() local
3022 if (do_read_u64(ff, &env->clock.clockid_res_ns)) in process_clockid()
3031 struct perf_env *env = &ff->ph->env; in process_clock_data() local
3046 env->clock.clockid = data32; in process_clock_data()
3052 env->clock.tod_ns = data64; in process_clock_data()
3058 env->clock.clockid_ns = data64; in process_clock_data()
3059 env->clock.enabled = true; in process_clock_data()
3066 struct perf_env *env = &ff->ph->env; in process_hybrid_topology() local
3090 env->nr_hybrid_nodes = nr; in process_hybrid_topology()
3091 env->hybrid_nodes = nodes; in process_hybrid_topology()
3123 struct perf_env *env = &ff->ph->env; in process_bpf_prog_info() local
3136 down_write(&env->bpf_progs.lock); in process_bpf_prog_info()
3178 if (!__perf_env__insert_bpf_prog_info(env, info_node)) { in process_bpf_prog_info()
3184 up_write(&env->bpf_progs.lock); in process_bpf_prog_info()
3189 up_write(&env->bpf_progs.lock); in process_bpf_prog_info()
3195 struct perf_env *env = &ff->ph->env; in process_bpf_btf() local
3208 down_write(&env->bpf_progs.lock); in process_bpf_btf()
3228 if (!__perf_env__insert_btf(env, node)) in process_bpf_btf()
3235 up_write(&env->bpf_progs.lock); in process_bpf_btf()
3244 struct perf_env *env = &ff->ph->env; in process_compressed() local
3246 if (do_read_u32(ff, &(env->comp_ver))) in process_compressed()
3249 if (do_read_u32(ff, &(env->comp_type))) in process_compressed()
3252 if (do_read_u32(ff, &(env->comp_level))) in process_compressed()
3255 if (do_read_u32(ff, &(env->comp_ratio))) in process_compressed()
3258 if (do_read_u32(ff, &(env->comp_mmap_len))) in process_compressed()
3330 struct perf_env *env = &ff->ph->env; in process_cpu_pmu_caps() local
3331 int ret = __process_pmu_caps(ff, &env->nr_cpu_pmu_caps, in process_cpu_pmu_caps()
3332 &env->cpu_pmu_caps, in process_cpu_pmu_caps()
3333 &env->max_branches, in process_cpu_pmu_caps()
3334 &env->br_cntr_nr, in process_cpu_pmu_caps()
3335 &env->br_cntr_width); in process_cpu_pmu_caps()
3337 if (!ret && !env->cpu_pmu_caps) in process_cpu_pmu_caps()
3344 struct perf_env *env = &ff->ph->env; in process_pmu_caps() local
3382 env->nr_pmus_with_caps = nr_pmu; in process_pmu_caps()
3383 env->pmu_caps = pmu_caps; in process_pmu_caps()
4251 session->machines.host.env = &header->env; in perf_session__read_header()