1 /*
2  * Copyright (C) 2018-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef PER_CPU_H
8 #define PER_CPU_H
9 
10 #include <types.h>
11 #include <sbuf.h>
12 #include <irq.h>
13 #include <timer.h>
14 #include <profiling.h>
15 #include <logmsg.h>
16 #include <schedule.h>
17 #include <asm/notify.h>
18 #include <asm/page.h>
19 #include <asm/gdt.h>
20 #include <asm/security.h>
21 #include <asm/vm_config.h>
22 
23 struct per_cpu_region {
24 	/* vmxon_region MUST be 4KB-aligned */
25 	uint8_t vmxon_region[PAGE_SIZE];
26 	void *vmcs_run;
27 #ifdef HV_DEBUG
28 	struct shared_buf *sbuf[ACRN_SBUF_PER_PCPU_ID_MAX];
29 	char logbuf[LOG_MESSAGE_MAX_SIZE];
30 	uint32_t npk_log_ref;
31 #endif
32 	uint64_t irq_count[NR_IRQS];
33 	uint64_t softirq_pending;
34 	uint64_t spurious;
35 	struct acrn_vcpu *ever_run_vcpu;
36 #ifdef STACK_PROTECTOR
37 	struct stack_canary stk_canary;
38 #endif
39 	struct per_cpu_timers cpu_timers;
40 	struct sched_control sched_ctl;
41 	struct sched_noop_control sched_noop_ctl;
42 	struct sched_iorr_control sched_iorr_ctl;
43 	struct sched_bvt_control sched_bvt_ctl;
44 	struct sched_prio_control sched_prio_ctl;
45 	struct thread_object idle;
46 	struct host_gdt gdt;
47 	struct tss_64 tss;
48 	enum pcpu_boot_state boot_state;
49 	uint64_t pcpu_flag;
50 	uint8_t mc_stack[CONFIG_STACK_SIZE] __aligned(16);
51 	uint8_t df_stack[CONFIG_STACK_SIZE] __aligned(16);
52 	uint8_t sf_stack[CONFIG_STACK_SIZE] __aligned(16);
53 	uint8_t stack[CONFIG_STACK_SIZE] __aligned(16);
54 	uint32_t lapic_id;
55 	uint32_t lapic_ldr;
56 	uint32_t softirq_servicing;
57 	uint32_t mode_to_kick_pcpu;
58 	uint32_t mode_to_idle;
59 	struct smp_call_info_data smp_call_info;
60 	struct list_head softirq_dev_entry_list;
61 #ifdef PROFILING_ON
62 	struct profiling_info_wrapper profiling_info;
63 #endif
64 	uint64_t shutdown_vm_bitmap;
65 	uint64_t tsc_suspend;
66 	struct acrn_vcpu *whose_iwkey;
67 	/*
68 	 * We maintain a per-pCPU array of vCPUs. vCPUs of a VM won't
69 	 * share same pCPU. So the maximum possible # of vCPUs that can
70 	 * run on a pCPU is CONFIG_MAX_VM_NUM.
71 	 * vcpu_array address must be aligned to 64-bit for atomic access
72 	 * to avoid contention between offline_vcpu and posted interrupt handler
73 	 */
74 	struct acrn_vcpu *vcpu_array[CONFIG_MAX_VM_NUM] __aligned(8);
75 } __aligned(PAGE_SIZE); /* per_cpu_region size aligned with PAGE_SIZE */
76 
77 extern struct per_cpu_region per_cpu_data[MAX_PCPU_NUM];
78 /*
79  * get percpu data for pcpu_id.
80  */
81 #define per_cpu(name, pcpu_id)	\
82 	(per_cpu_data[(pcpu_id)].name)
83 
84 /* get percpu data for current pcpu */
85 #define get_cpu_var(name)	per_cpu(name, get_pcpu_id())
86 
87 #endif
88