| /include/linux/ |
| A D | topology.h | 197 #define topology_die_id(cpu) ((void)(cpu), -1) argument 200 #define topology_cluster_id(cpu) ((void)(cpu), -1) argument 203 #define topology_core_id(cpu) ((void)(cpu), 0) argument 206 #define topology_book_id(cpu) ((void)(cpu), -1) argument 209 #define topology_drawer_id(cpu) ((void)(cpu), -1) argument 212 #define topology_ppin(cpu) ((void)(cpu), 0ull) argument 215 #define topology_sibling_cpumask(cpu) cpumask_of(cpu) argument 218 #define topology_core_cpumask(cpu) cpumask_of(cpu) argument 224 #define topology_die_cpumask(cpu) cpumask_of(cpu) argument 227 #define topology_book_cpumask(cpu) cpumask_of(cpu) argument [all …]
|
| A D | arch_topology.h | 22 return per_cpu(capacity_freq_ref, cpu); in topology_get_freq_ref() 29 return per_cpu(arch_freq_scale, cpu); in topology_get_freq_scale() 56 return per_cpu(hw_pressure, cpu); in topology_get_hw_pressure() 76 #define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id) argument 77 #define topology_cluster_id(cpu) (cpu_topology[cpu].cluster_id) argument 78 #define topology_core_id(cpu) (cpu_topology[cpu].core_id) argument 79 #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) argument 80 #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) argument 81 #define topology_cluster_cpumask(cpu) (&cpu_topology[cpu].cluster_sibling) argument 82 #define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) argument [all …]
|
| A D | cpumask.h | 143 return cpu; in cpumask_check() 490 int cpu) in cpumask_any_and_but() 499 if (i != cpu) in cpumask_any_and_but() 526 if (i != cpu) in cpumask_any_andnot_but() 939 #define cpumask_of(cpu) (get_cpu_mask(cpu)) argument 1109 #define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) argument 1110 #define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) argument 1111 #define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) argument 1114 for ((void)(start), (cpu) = 0; (cpu) < 1; (cpu)++) 1116 for ((void)(start), (cpu) = 0; (cpu) < 1; (cpu)++) [all …]
|
| A D | kernel_stat.h | 51 #define kstat_cpu(cpu) per_cpu(kstat, cpu) argument 52 #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) argument 54 extern unsigned long long nr_context_switches_cpu(int cpu); 57 extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); 67 return kstat_cpu(cpu).softirqs[irq]; in kstat_softirqs_cpu() 70 static inline unsigned int kstat_cpu_softirqs_sum(int cpu) in kstat_cpu_softirqs_sum() argument 76 sum += kstat_softirqs_cpu(i, cpu); in kstat_cpu_softirqs_sum() 99 return kstat_cpu(cpu).irqs_sum; in kstat_cpu_irqs_sum() 104 enum cpu_usage_stat usage, int cpu); 108 enum cpu_usage_stat usage, int cpu) in kcpustat_field() argument [all …]
|
| A D | cacheinfo.h | 85 int early_cache_level(unsigned int cpu); 86 int init_cache_level(unsigned int cpu); 87 int init_of_cache_level(unsigned int cpu); 88 int populate_cache_leaves(unsigned int cpu); 89 int cache_setup_acpi(unsigned int cpu); 90 bool last_level_cache_is_valid(unsigned int cpu); 92 int fetch_cache_info(unsigned int cpu); 93 int detect_cache_attributes(unsigned int cpu); 104 int acpi_get_cache_info(unsigned int cpu, in acpi_get_cache_info() argument 110 int acpi_get_cache_info(unsigned int cpu, [all …]
|
| A D | ring_buffer.h | 151 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, 154 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, 171 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu); 177 struct trace_buffer *buffer_b, int cpu); 181 struct trace_buffer *buffer_b, int cpu) in ring_buffer_swap_cpu() argument 188 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu); 212 int cpu, u64 *ts); 222 ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu); 227 size_t len, int cpu, int full); 249 int ring_buffer_map(struct trace_buffer *buffer, int cpu, [all …]
|
| A D | smpboot.h | 34 int (*thread_should_run)(unsigned int cpu); 35 void (*thread_fn)(unsigned int cpu); 36 void (*create)(unsigned int cpu); 37 void (*setup)(unsigned int cpu); 38 void (*cleanup)(unsigned int cpu, bool online); 39 void (*park)(unsigned int cpu); 40 void (*unpark)(unsigned int cpu);
|
| A D | tick.h | 30 extern int tick_cpu_dying(unsigned int cpu); 130 extern bool tick_nohz_tick_stopped_cpu(int cpu); 140 extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); 141 extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 210 extern bool tick_nohz_cpu_hotpluggable(unsigned int cpu); 230 if (tick_nohz_full_cpu(cpu)) in tick_dep_set_cpu() 231 tick_nohz_dep_set_cpu(cpu, bit); in tick_dep_set_cpu() 236 if (tick_nohz_full_cpu(cpu)) in tick_dep_clear_cpu() 237 tick_nohz_dep_clear_cpu(cpu, bit); in tick_dep_clear_cpu() 272 extern void tick_nohz_full_kick_cpu(int cpu); [all …]
|
| A D | cpu.h | 27 struct cpu { struct 38 extern int register_cpu(struct cpu *cpu, int num); argument 43 int cpu, unsigned int *thread); 92 extern int arch_register_cpu(int cpu); 93 extern void arch_unregister_cpu(int cpu); 95 extern void unregister_cpu(struct cpu *cpu); 101 DECLARE_PER_CPU(struct cpu, cpu_devices); 117 int add_cpu(unsigned int cpu); 150 int cpu = 0; in suspend_disable_secondary_cpus() local 153 cpu = -1; in suspend_disable_secondary_cpus() [all …]
|
| A D | context_tracking_state.h | 64 static __always_inline int ct_rcu_watching_cpu(int cpu) in ct_rcu_watching_cpu() argument 66 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); in ct_rcu_watching_cpu() 71 static __always_inline int ct_rcu_watching_cpu_acquire(int cpu) in ct_rcu_watching_cpu_acquire() argument 73 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); in ct_rcu_watching_cpu_acquire() 83 static __always_inline long ct_nesting_cpu(int cpu) in ct_nesting_cpu() argument 85 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); in ct_nesting_cpu() 95 static __always_inline long ct_nmi_nesting_cpu(int cpu) in ct_nmi_nesting_cpu() argument 97 struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); in ct_nmi_nesting_cpu() 111 static __always_inline bool context_tracking_enabled_cpu(int cpu) in context_tracking_enabled_cpu() argument 113 return context_tracking_enabled() && per_cpu(context_tracking.active, cpu); in context_tracking_enabled_cpu() [all …]
|
| A D | intel_tcc.h | 13 int intel_tcc_get_tjmax(int cpu); 14 int intel_tcc_get_offset(int cpu); 15 int intel_tcc_set_offset(int cpu, int offset); 16 int intel_tcc_get_temp(int cpu, int *temp, bool pkg);
|
| A D | rcutree.h | 108 int rcutree_prepare_cpu(unsigned int cpu); 109 int rcutree_online_cpu(unsigned int cpu); 110 void rcutree_report_cpu_starting(unsigned int cpu); 113 int rcutree_dead_cpu(unsigned int cpu); 114 int rcutree_dying_cpu(unsigned int cpu); 115 int rcutree_offline_cpu(unsigned int cpu); 122 void rcutree_migrate_callbacks(int cpu);
|
| A D | nmi.h | 48 extern int lockup_detector_online_cpu(unsigned int cpu); 49 extern int lockup_detector_offline_cpu(unsigned int cpu); 98 void watchdog_hardlockup_touch_cpu(unsigned int cpu); 99 void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs); 117 void watchdog_hardlockup_enable(unsigned int cpu); 118 void watchdog_hardlockup_disable(unsigned int cpu); 176 static inline bool trigger_single_cpu_backtrace(int cpu) in trigger_single_cpu_backtrace() argument 178 arch_trigger_cpumask_backtrace(cpumask_of(cpu), -1); in trigger_single_cpu_backtrace() 201 static inline bool trigger_single_cpu_backtrace(int cpu) in trigger_single_cpu_backtrace() argument
|
| A D | cpuhotplug.h | 252 int (*startup)(unsigned int cpu), 257 int (*startup)(unsigned int cpu), 258 int (*teardown)(unsigned int cpu), 273 int (*startup)(unsigned int cpu), in cpuhp_setup_state() argument 274 int (*teardown)(unsigned int cpu)) in cpuhp_setup_state() argument 293 int (*startup)(unsigned int cpu), in cpuhp_setup_state_cpuslocked() argument 313 int (*startup)(unsigned int cpu), in cpuhp_setup_state_nocalls() argument 314 int (*teardown)(unsigned int cpu)) in cpuhp_setup_state_nocalls() argument 335 int (*startup)(unsigned int cpu), in cpuhp_setup_state_nocalls_cpuslocked() argument 356 int (*startup)(unsigned int cpu, in cpuhp_setup_state_multi() argument [all …]
|
| A D | smp.h | 18 typedef bool (*smp_cond_func_t)(int cpu, void *info); 45 extern void __smp_call_single_queue(int cpu, struct llist_node *node); 56 int smp_call_function_single_async(int cpu, call_single_data_t *csd); 134 extern void arch_smp_send_reschedule(int cpu); 139 #define smp_send_reschedule(cpu) ({ \ argument 140 trace_ipi_send_cpu(cpu, _RET_IP_, NULL); \ 141 arch_smp_send_reschedule(cpu); \ 205 static inline void smp_send_reschedule(int cpu) { } in smp_send_reschedule() argument 293 int smpcfd_prepare_cpu(unsigned int cpu); 294 int smpcfd_dead_cpu(unsigned int cpu); [all …]
|
| /include/trace/events/ |
| A D | cpuhp.h | 12 TP_PROTO(unsigned int cpu, 17 TP_ARGS(cpu, target, idx, fun), 20 __field( unsigned int, cpu ) 27 __entry->cpu = cpu; 39 TP_PROTO(unsigned int cpu, 48 __field( unsigned int, cpu ) 55 __entry->cpu = cpu; 67 TP_PROTO(unsigned int cpu, 72 TP_ARGS(cpu, state, idx, ret), 75 __field( unsigned int, cpu ) [all …]
|
| A D | hw_pressure.h | 11 TP_PROTO(int cpu, unsigned long hw_pressure), 12 TP_ARGS(cpu, hw_pressure), 16 __field(int, cpu) 21 __entry->cpu = cpu; 24 TP_printk("cpu=%d hw_pressure=%lu", __entry->cpu, __entry->hw_pressure)
|
| A D | irq_matrix.h | 70 TP_ARGS(bit, cpu, matrix, cmap), 74 __field( unsigned int, cpu ) 87 __entry->cpu = cpu; 143 TP_PROTO(int bit, unsigned int cpu, 146 TP_ARGS(bit, cpu, matrix, cmap) 151 TP_PROTO(int bit, unsigned int cpu, 154 TP_ARGS(bit, cpu, matrix, cmap) 162 TP_ARGS(bit, cpu, matrix, cmap) 170 TP_ARGS(bit, cpu, matrix, cmap) 178 TP_ARGS(bit, cpu, matrix, cmap) [all …]
|
| /include/asm-generic/ |
| A D | numa.h | 18 void numa_clear_node(unsigned int cpu); 35 void __init early_map_cpu_to_node(unsigned int cpu, int nid); 36 int early_cpu_to_node(int cpu); 37 void numa_store_cpu_info(unsigned int cpu); 38 void numa_add_cpu(unsigned int cpu); 39 void numa_remove_cpu(unsigned int cpu); 43 static inline void numa_store_cpu_info(unsigned int cpu) { } in numa_store_cpu_info() argument 44 static inline void numa_add_cpu(unsigned int cpu) { } in numa_add_cpu() argument 45 static inline void numa_remove_cpu(unsigned int cpu) { } in numa_remove_cpu() argument 48 static inline int early_cpu_to_node(int cpu) { return 0; } in early_cpu_to_node() argument [all …]
|
| A D | topology.h | 35 #define cpu_to_node(cpu) ((void)(cpu),0) argument 41 #define set_cpu_numa_node(cpu, node) argument 44 #define cpu_to_mem(cpu) ((void)(cpu),0) argument 72 #define set_cpu_numa_mem(cpu, node) argument
|
| /include/acpi/ |
| A D | cppc_acpi.h | 155 extern int cppc_set_enable(int cpu, bool enable); 163 extern unsigned int cppc_get_transition_latency(int cpu); 170 extern int cppc_set_epp(int cpu, u64 epp_val); 172 extern int cppc_set_auto_act_window(int cpu, u64 auto_act_window); 173 extern int cppc_get_auto_sel(int cpu, bool *enable); 174 extern int cppc_set_auto_sel(int cpu, bool enable); 199 static inline int cppc_set_enable(int cpu, bool enable) in cppc_set_enable() argument 219 static inline unsigned int cppc_get_transition_latency(int cpu) in cppc_get_transition_latency() argument 243 static inline int cppc_set_epp(int cpu, u64 epp_val) in cppc_set_epp() argument 255 static inline int cppc_get_auto_sel(int cpu, bool *enable) in cppc_get_auto_sel() argument [all …]
|
| /include/linux/clk/ |
| A D | tegra.h | 33 void (*wait_for_reset)(u32 cpu); 34 void (*put_in_reset)(u32 cpu); 35 void (*out_of_reset)(u32 cpu); 36 void (*enable_clock)(u32 cpu); 37 void (*disable_clock)(u32 cpu); 53 tegra_cpu_car_ops->wait_for_reset(cpu); in tegra_wait_cpu_in_reset() 56 static inline void tegra_put_cpu_in_reset(u32 cpu) in tegra_put_cpu_in_reset() argument 61 tegra_cpu_car_ops->put_in_reset(cpu); in tegra_put_cpu_in_reset() 69 tegra_cpu_car_ops->out_of_reset(cpu); in tegra_cpu_out_of_reset() 77 tegra_cpu_car_ops->enable_clock(cpu); in tegra_enable_cpu_clock() [all …]
|
| /include/linux/sched/ |
| A D | hotplug.h | 9 extern int sched_cpu_starting(unsigned int cpu); 10 extern int sched_cpu_activate(unsigned int cpu); 11 extern int sched_cpu_deactivate(unsigned int cpu); 14 extern int sched_cpu_wait_empty(unsigned int cpu); 15 extern int sched_cpu_dying(unsigned int cpu);
|
| A D | isolation.h | 33 extern bool housekeeping_test_cpu(int cpu, enum hk_type type); 56 static inline bool housekeeping_test_cpu(int cpu, enum hk_type type) in housekeeping_test_cpu() argument 64 static inline bool housekeeping_cpu(int cpu, enum hk_type type) in housekeeping_cpu() argument 68 return housekeeping_test_cpu(cpu, type); in housekeeping_cpu() 73 static inline bool cpu_is_isolated(int cpu) in cpu_is_isolated() argument 75 return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN) || in cpu_is_isolated() 76 !housekeeping_test_cpu(cpu, HK_TYPE_TICK) || in cpu_is_isolated() 77 cpuset_cpu_is_isolated(cpu); in cpu_is_isolated()
|
| A D | nohz.h | 10 extern void nohz_balance_enter_idle(int cpu); 13 static inline void nohz_balance_enter_idle(int cpu) { } in nohz_balance_enter_idle() argument 27 extern void wake_up_nohz_cpu(int cpu); 29 static inline void wake_up_nohz_cpu(int cpu) { } in wake_up_nohz_cpu() argument
|