Lines Matching refs:i

57 	uint32_t i, group_id;  in profiling_initialize_pmi()  local
71 for (i = 0U; i < MAX_MSR_LIST_NUM; i++) { in profiling_initialize_pmi()
72 msrop = &(ss->pmi_initial_msr_list[group_id][i]); in profiling_initialize_pmi()
101 uint32_t i; in profiling_enable_pmu() local
150 for (i = 0U; i < MAX_MSR_LIST_NUM; i++) { in profiling_enable_pmu()
151 msrop = &(ss->pmi_start_msr_list[group_id][i]); in profiling_enable_pmu()
177 uint32_t i; in profiling_disable_pmu() local
197 for (i = 0U; i < MAX_MSR_LIST_NUM; i++) { in profiling_disable_pmu()
198 msrop = &(ss->pmi_stop_msr_list[group_id][i]); in profiling_disable_pmu()
470 uint32_t i, j; in profiling_handle_msrops() local
494 for (i = 0U; i < my_msr_node->num_entries; i++) { in profiling_handle_msrops()
495 switch (my_msr_node->entries[i].msr_op_type) { in profiling_handle_msrops()
497 my_msr_node->entries[i].value in profiling_handle_msrops()
498 = msr_read(my_msr_node->entries[i].msr_id); in profiling_handle_msrops()
501 __func__, get_pcpu_id(), my_msr_node->entries[i].msr_id, in profiling_handle_msrops()
502 my_msr_node->entries[i].value); in profiling_handle_msrops()
505 my_msr_node->entries[i].value in profiling_handle_msrops()
506 = msr_read(my_msr_node->entries[i].msr_id); in profiling_handle_msrops()
509 __func__, get_pcpu_id(), my_msr_node->entries[i].msr_id, in profiling_handle_msrops()
510 my_msr_node->entries[i].value); in profiling_handle_msrops()
511 msr_write(my_msr_node->entries[i].msr_id, 0U); in profiling_handle_msrops()
514 msr_write(my_msr_node->entries[i].msr_id, in profiling_handle_msrops()
515 my_msr_node->entries[i].value); in profiling_handle_msrops()
518 __func__, get_pcpu_id(), my_msr_node->entries[i].msr_id, in profiling_handle_msrops()
519 my_msr_node->entries[i].value); in profiling_handle_msrops()
523 __func__, my_msr_node->entries[i].msr_op_type, in profiling_handle_msrops()
573 uint32_t i; in profiling_pmi_handler() local
588 for (i = 0U; i < MAX_MSR_LIST_NUM; i++) { in profiling_pmi_handler()
589 msrop = &(ss->pmi_entry_msr_list[group_id][i]); in profiling_pmi_handler()
650 for (i = 0U; i < LBR_NUM_REGISTERS; i++) { in profiling_pmi_handler()
651 psample->lsample.lbr_from_ip[i] in profiling_pmi_handler()
652 = msr_read(MSR_CORE_LASTBRANCH_0_FROM_IP + i); in profiling_pmi_handler()
653 psample->lsample.lbr_to_ip[i] in profiling_pmi_handler()
654 = msr_read(MSR_CORE_LASTBRANCH_0_TO_IP + i); in profiling_pmi_handler()
670 for (i = 0U; i < MAX_MSR_LIST_NUM; i++) { in profiling_pmi_handler()
671 msrop = &(ss->pmi_exit_msr_list[group_id][i]); in profiling_pmi_handler()
697 for (i = 0U; i < MAX_MSR_LIST_NUM; i++) { in profiling_pmi_handler()
698 msrop = &(ss->pmi_start_msr_list[group_id][i]); in profiling_pmi_handler()
720 uint16_t i; in profiling_start_pmu() local
729 for (i = 0U; i < pcpu_nums; i++) { in profiling_start_pmu()
730 if (per_cpu(profiling_info.s_state, i).pmu_state != PMU_SETUP) { in profiling_start_pmu()
732 __func__, get_cpu_var(profiling_info.s_state).pmu_state, i); in profiling_start_pmu()
737 for (i = 0U; i < pcpu_nums; i++) { in profiling_start_pmu()
738 per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_START; in profiling_start_pmu()
739 per_cpu(profiling_info.s_state, i).samples_logged = 0U; in profiling_start_pmu()
740 per_cpu(profiling_info.s_state, i).samples_dropped = 0U; in profiling_start_pmu()
741 per_cpu(profiling_info.s_state, i).valid_pmi_count = 0U; in profiling_start_pmu()
742 per_cpu(profiling_info.s_state, i).total_pmi_count = 0U; in profiling_start_pmu()
743 per_cpu(profiling_info.s_state, i).total_vmexit_count = 0U; in profiling_start_pmu()
744 per_cpu(profiling_info.s_state, i).frozen_well = 0U; in profiling_start_pmu()
745 per_cpu(profiling_info.s_state, i).frozen_delayed = 0U; in profiling_start_pmu()
746 per_cpu(profiling_info.s_state, i).nofrozen_pmi = 0U; in profiling_start_pmu()
747 per_cpu(profiling_info.s_state, i).pmu_state = PMU_RUNNING; in profiling_start_pmu()
762 uint16_t i; in profiling_stop_pmu() local
768 for (i = 0U; i < pcpu_nums; i++) { in profiling_stop_pmu()
769 per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_STOP; in profiling_stop_pmu()
770 if (per_cpu(profiling_info.s_state, i).pmu_state == PMU_RUNNING) { in profiling_stop_pmu()
771 per_cpu(profiling_info.s_state, i).pmu_state = PMU_SETUP; in profiling_stop_pmu()
776 __func__, i, per_cpu(profiling_info.s_state, i).total_pmi_count, in profiling_stop_pmu()
777 per_cpu(profiling_info.s_state, i).valid_pmi_count, in profiling_stop_pmu()
778 per_cpu(profiling_info.s_state, i).total_vmexit_count); in profiling_stop_pmu()
782 __func__, i, per_cpu(profiling_info.s_state, i).frozen_well, in profiling_stop_pmu()
783 per_cpu(profiling_info.s_state, i).frozen_delayed, in profiling_stop_pmu()
784 per_cpu(profiling_info.s_state, i).nofrozen_pmi); in profiling_stop_pmu()
788 __func__, i, per_cpu(profiling_info.s_state, i).samples_logged, in profiling_stop_pmu()
789 per_cpu(profiling_info.s_state, i).samples_dropped); in profiling_stop_pmu()
808 uint16_t i; in profiling_msr_ops_all_cpus() local
818 for (i = 0U; i < pcpu_nums; i++) { in profiling_msr_ops_all_cpus()
819 per_cpu(profiling_info.ipi_cmd, i) = IPI_MSR_OP; in profiling_msr_ops_all_cpus()
820 per_cpu(profiling_info.msr_node, i) = &(msr_list[i]); in profiling_msr_ops_all_cpus()
841 uint16_t i, j; in profiling_vm_list_info() local
854 for (i = 0U; i < pcpu_nums; i++) { in profiling_vm_list_info()
855 vm_info_list.vm_list[vm_idx].cpu_map[i].vcpu_id = i; in profiling_vm_list_info()
856 vm_info_list.vm_list[vm_idx].cpu_map[i].pcpu_id = i; in profiling_vm_list_info()
857 vm_info_list.vm_list[vm_idx].cpu_map[i].apic_id in profiling_vm_list_info()
858 = per_cpu(lapic_id, i); in profiling_vm_list_info()
860 vm_info_list.vm_list[vm_idx].num_vcpus = i; in profiling_vm_list_info()
875 i = 0U; in profiling_vm_list_info()
876 foreach_vcpu(i, tmp_vm, vcpu) { in profiling_vm_list_info()
877 vm_info_list.vm_list[vm_idx].cpu_map[i].vcpu_id in profiling_vm_list_info()
879 vm_info_list.vm_list[vm_idx].cpu_map[i].pcpu_id in profiling_vm_list_info()
881 vm_info_list.vm_list[vm_idx].cpu_map[i].apic_id = 0; in profiling_vm_list_info()
965 uint16_t i; in profiling_set_control() local
985 for (i = 0U; i < (uint16_t)MAX_SEP_FEATURE_ID; i++) { in profiling_set_control()
986 if (((new_switch ^ old_switch) & (0x1UL << i)) != 0UL) { in profiling_set_control()
987 switch (i) { in profiling_set_control()
990 if ((new_switch & (0x1UL << i)) != 0UL) { in profiling_set_control()
1003 __func__, i); in profiling_set_control()
1022 for (i = 0U; i < (uint16_t)MAX_SOCWATCH_FEATURE_ID; i++) { in profiling_set_control()
1023 if ((socwatch_collection_switch & (0x1UL << i)) != 0UL) { in profiling_set_control()
1024 switch (i) { in profiling_set_control()
1035 __func__, i); in profiling_set_control()
1040 for (i = 0U; i < pcpu_nums ; i++) { in profiling_set_control()
1041 per_cpu(profiling_info.soc_state, i) in profiling_set_control()
1048 for (i = 0U; i < pcpu_nums ; i++) { in profiling_set_control()
1049 per_cpu(profiling_info.soc_state, i) in profiling_set_control()
1074 uint16_t i; in profiling_configure_pmi() local
1084 for (i = 0U; i < pcpu_nums; i++) { in profiling_configure_pmi()
1085 if (!((per_cpu(profiling_info.s_state, i).pmu_state == in profiling_configure_pmi()
1087 (per_cpu(profiling_info.s_state, i).pmu_state == in profiling_configure_pmi()
1090 __func__, per_cpu(profiling_info.s_state, i).pmu_state, i); in profiling_configure_pmi()
1102 for (i = 0U; i < pcpu_nums; i++) { in profiling_configure_pmi()
1103 per_cpu(profiling_info.ipi_cmd, i) = IPI_PMU_CONFIG; in profiling_configure_pmi()
1104 per_cpu(profiling_info.s_state, i).num_pmi_groups in profiling_configure_pmi()
1107 (void)memcpy_s((void *)per_cpu(profiling_info.s_state, i).pmi_initial_msr_list, in profiling_configure_pmi()
1112 (void)memcpy_s((void *)per_cpu(profiling_info.s_state, i).pmi_start_msr_list, in profiling_configure_pmi()
1117 (void)memcpy_s((void *)per_cpu(profiling_info.s_state, i).pmi_stop_msr_list, in profiling_configure_pmi()
1122 (void)memcpy_s((void *)per_cpu(profiling_info.s_state, i).pmi_entry_msr_list, in profiling_configure_pmi()
1127 (void)memcpy_s((void *)per_cpu(profiling_info.s_state, i).pmi_exit_msr_list, in profiling_configure_pmi()
1148 uint16_t i; in profiling_configure_vmsw() local
1161 for (i = 0U; i < pcpu_nums; i++) { in profiling_configure_vmsw()
1162 per_cpu(profiling_info.ipi_cmd, i) = IPI_VMSW_CONFIG; in profiling_configure_vmsw()
1165 (void *)per_cpu(profiling_info.s_state, i).vmsw_initial_msr_list, in profiling_configure_vmsw()
1171 (void *)per_cpu(profiling_info.s_state, i).vmsw_entry_msr_list, in profiling_configure_vmsw()
1177 (void *)per_cpu(profiling_info.s_state, i).vmsw_exit_msr_list, in profiling_configure_vmsw()
1234 uint16_t i; in profiling_get_status_info() local
1245 for (i = 0U; i < pcpu_nums; i++) { in profiling_get_status_info()
1246 pstats[i].samples_logged = in profiling_get_status_info()
1247 per_cpu(profiling_info.s_state, i).samples_logged; in profiling_get_status_info()
1248 pstats[i].samples_dropped = in profiling_get_status_info()
1249 per_cpu(profiling_info.s_state, i).samples_dropped; in profiling_get_status_info()