Lines Matching refs:cpu

79 		if (data->cpus_data.cpu[i] == (u16) -1) {  in cpu_map__from_entries()
80 RC_CHK_ACCESS(map)->map[i].cpu = -1; in cpu_map__from_entries()
81 } else if (data->cpus_data.cpu[i] < INT16_MAX) { in cpu_map__from_entries()
82 RC_CHK_ACCESS(map)->map[i].cpu = (int16_t) data->cpus_data.cpu[i]; in cpu_map__from_entries()
84 pr_err("Invalid cpumap entry %u\n", data->cpus_data.cpu[i]); in cpu_map__from_entries()
110 int cpu; in cpu_map__from_mask() local
113 for_each_set_bit(cpu, local_copy, 64) { in cpu_map__from_mask()
114 if (cpu + cpus_per_i < INT16_MAX) { in cpu_map__from_mask()
115 RC_CHK_ACCESS(map)->map[j++].cpu = cpu + cpus_per_i; in cpu_map__from_mask()
117 pr_err("Invalid cpumap entry %d\n", cpu + cpus_per_i); in cpu_map__from_mask()
138 RC_CHK_ACCESS(map)->map[i++].cpu = -1; in cpu_map__from_range()
140 for (int cpu = data->range_cpu_data.start_cpu; cpu <= data->range_cpu_data.end_cpu; in cpu_map__from_range() local
141 i++, cpu++) { in cpu_map__from_range()
142 if (cpu < INT16_MAX) { in cpu_map__from_range()
143 RC_CHK_ACCESS(map)->map[i].cpu = cpu; in cpu_map__from_range()
145 pr_err("Invalid cpumap entry %d\n", cpu); in cpu_map__from_range()
185 RC_CHK_ACCESS(cpus)->map[i].cpu = -1; in perf_cpu_map__empty_new()
206 static int cpu__get_topology_int(int cpu, const char *name, int *value) in cpu__get_topology_int() argument
211 "devices/system/cpu/cpu%d/topology/%s", cpu, name); in cpu__get_topology_int()
216 int cpu__get_socket_id(struct perf_cpu cpu) in cpu__get_socket_id() argument
218 int value, ret = cpu__get_topology_int(cpu.cpu, "physical_package_id", &value); in cpu__get_socket_id()
222 struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data __maybe_unused) in aggr_cpu_id__socket() argument
226 id.socket = cpu__get_socket_id(cpu); in aggr_cpu_id__socket()
258 struct perf_cpu cpu; in cpu_aggr_map__new() local
267 perf_cpu_map__for_each_cpu(cpu, idx, cpus) { in cpu_aggr_map__new()
269 struct aggr_cpu_id cpu_id = get_id(cpu, data); in cpu_aggr_map__new()
300 int cpu__get_die_id(struct perf_cpu cpu) in cpu__get_die_id() argument
302 int value, ret = cpu__get_topology_int(cpu.cpu, "die_id", &value); in cpu__get_die_id()
307 struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data) in aggr_cpu_id__die() argument
312 die = cpu__get_die_id(cpu); in aggr_cpu_id__die()
322 id = aggr_cpu_id__socket(cpu, data); in aggr_cpu_id__die()
330 int cpu__get_cluster_id(struct perf_cpu cpu) in cpu__get_cluster_id() argument
332 int value, ret = cpu__get_topology_int(cpu.cpu, "cluster_id", &value); in cpu__get_cluster_id()
337 struct aggr_cpu_id aggr_cpu_id__cluster(struct perf_cpu cpu, void *data) in aggr_cpu_id__cluster() argument
339 int cluster = cpu__get_cluster_id(cpu); in aggr_cpu_id__cluster()
346 id = aggr_cpu_id__die(cpu, data); in aggr_cpu_id__cluster()
354 int cpu__get_core_id(struct perf_cpu cpu) in cpu__get_core_id() argument
356 int value, ret = cpu__get_topology_int(cpu.cpu, "core_id", &value); in cpu__get_core_id()
360 struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data) in aggr_cpu_id__core() argument
363 int core = cpu__get_core_id(cpu); in aggr_cpu_id__core()
366 id = aggr_cpu_id__cluster(cpu, data); in aggr_cpu_id__core()
379 struct aggr_cpu_id aggr_cpu_id__cpu(struct perf_cpu cpu, void *data) in aggr_cpu_id__cpu() argument
384 id = aggr_cpu_id__core(cpu, data); in aggr_cpu_id__cpu()
388 id.cpu = cpu; in aggr_cpu_id__cpu()
393 struct aggr_cpu_id aggr_cpu_id__node(struct perf_cpu cpu, void *data __maybe_unused) in aggr_cpu_id__node() argument
397 id.node = cpu__get_node(cpu); in aggr_cpu_id__node()
401 struct aggr_cpu_id aggr_cpu_id__global(struct perf_cpu cpu, void *data __maybe_unused) in aggr_cpu_id__global() argument
406 cpu.cpu = 0; in aggr_cpu_id__global()
407 id.cpu = cpu; in aggr_cpu_id__global()
451 max_cpu_num.cpu = 4096; in set_max_cpu_num()
452 max_present_cpu_num.cpu = 4096; in set_max_cpu_num()
469 max_cpu_num.cpu = max; in set_max_cpu_num()
485 max_present_cpu_num.cpu = (int16_t)max; in set_max_cpu_num()
488 pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num.cpu); in set_max_cpu_num()
529 if (unlikely(!max_cpu_num.cpu)) in cpu__max_cpu()
537 if (unlikely(!max_present_cpu_num.cpu)) in cpu__max_present_cpu()
544 int cpu__get_node(struct perf_cpu cpu) in cpu__get_node() argument
551 return cpunode_map[cpu.cpu]; in cpu__get_node()
561 cpunode_map = calloc(max_cpu_num.cpu, sizeof(int)); in init_cpunode_map()
567 for (i = 0; i < max_cpu_num.cpu; i++) in init_cpunode_map()
577 unsigned int cpu, mem; in cpu__setup_cpunode_map() local
616 if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1) in cpu__setup_cpunode_map()
618 cpunode_map[cpu] = mem; in cpu__setup_cpunode_map()
635 struct perf_cpu cpu = { .cpu = INT16_MAX }; in cpu_map__snprint() local
639 cpu = perf_cpu_map__cpu(map, i); in cpu_map__snprint()
646 perf_cpu_map__cpu(map, i).cpu); in cpu_map__snprint()
648 } else if (((i - start) != (cpu.cpu - perf_cpu_map__cpu(map, start).cpu)) || last) { in cpu_map__snprint()
654 perf_cpu_map__cpu(map, start).cpu); in cpu_map__snprint()
658 perf_cpu_map__cpu(map, start).cpu, perf_cpu_map__cpu(map, end).cpu); in cpu_map__snprint()
690 bitmap = zalloc(last_cpu.cpu / 8 + 1); in cpu_map__snprint_mask()
697 bitmap[c.cpu / 8] |= 1 << (c.cpu % 8); in cpu_map__snprint_mask()
699 for (int cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) { in cpu_map__snprint_mask() local
700 unsigned char bits = bitmap[cpu / 8]; in cpu_map__snprint_mask()
702 if (cpu % 8) in cpu_map__snprint_mask()
708 if ((cpu % 32) == 0 && cpu > 0) in cpu_map__snprint_mask()
738 a->cpu.cpu == b->cpu.cpu; in aggr_cpu_id__equal()
751 a->cpu.cpu == -1; in aggr_cpu_id__is_empty()
765 .cpu = (struct perf_cpu){ .cpu = -1 }, in aggr_cpu_id__empty()