1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #include <xen/cpu.h>
3 #include <xen/init.h>
4 #include <xen/mm.h>
5 #include <xen/numa.h>
6 #include <xen/percpu.h>
7 #include <xen/rcupdate.h>
8 #include <xen/sections.h>
9
10 #ifndef INVALID_PERCPU_AREA
11 #define INVALID_PERCPU_AREA (-(long)__per_cpu_start)
12 #endif
13
14 #define PERCPU_ORDER get_order_from_bytes(__per_cpu_data_end - __per_cpu_start)
15
16 extern char __per_cpu_start[];
17 extern const char __per_cpu_data_end[];
18
19 unsigned long __read_mostly __per_cpu_offset[NR_CPUS];
20
percpu_init_areas(void)21 void __init percpu_init_areas(void)
22 {
23 unsigned int cpu;
24
25 for ( cpu = 1; cpu < NR_CPUS; cpu++ )
26 __per_cpu_offset[cpu] = INVALID_PERCPU_AREA;
27 }
28
init_percpu_area(unsigned int cpu)29 static int init_percpu_area(unsigned int cpu)
30 {
31 nodeid_t node = cpu_to_node(cpu);
32 unsigned int memflags = node != NUMA_NO_NODE ? MEMF_node(node) : 0;
33 char *p;
34
35 if ( __per_cpu_offset[cpu] != INVALID_PERCPU_AREA )
36 return park_offline_cpus || system_state == SYS_STATE_resume
37 ? 0
38 : -EBUSY;
39
40 if ( (p = alloc_xenheap_pages(PERCPU_ORDER, memflags)) == NULL )
41 return -ENOMEM;
42
43 memset(p, 0, __per_cpu_data_end - __per_cpu_start);
44 __per_cpu_offset[cpu] = p - __per_cpu_start;
45
46 return 0;
47 }
48
49 struct free_info {
50 unsigned int cpu;
51 struct rcu_head rcu;
52 };
53 static DEFINE_PER_CPU(struct free_info, free_info);
54
_free_percpu_area(struct rcu_head * head)55 static void cf_check _free_percpu_area(struct rcu_head *head)
56 {
57 struct free_info *info = container_of(head, struct free_info, rcu);
58 unsigned int cpu = info->cpu;
59 char *p = __per_cpu_start + __per_cpu_offset[cpu];
60
61 free_xenheap_pages(p, PERCPU_ORDER);
62 __per_cpu_offset[cpu] = INVALID_PERCPU_AREA;
63 }
64
free_percpu_area(unsigned int cpu)65 static void free_percpu_area(unsigned int cpu)
66 {
67 struct free_info *info = &per_cpu(free_info, cpu);
68
69 info->cpu = cpu;
70 call_rcu(&info->rcu, _free_percpu_area);
71 }
72
cpu_percpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)73 static int cf_check cpu_percpu_callback(
74 struct notifier_block *nfb, unsigned long action, void *hcpu)
75 {
76 unsigned int cpu = (unsigned long)hcpu;
77 int rc = 0;
78
79 switch ( action )
80 {
81 case CPU_UP_PREPARE:
82 rc = init_percpu_area(cpu);
83 break;
84
85 case CPU_UP_CANCELED:
86 case CPU_DEAD:
87 case CPU_RESUME_FAILED:
88 if ( !park_offline_cpus && system_state != SYS_STATE_suspend )
89 free_percpu_area(cpu);
90 break;
91
92 case CPU_REMOVE:
93 if ( park_offline_cpus )
94 free_percpu_area(cpu);
95 break;
96
97 default:
98 break;
99 }
100
101 return notifier_from_errno(rc);
102 }
103
104 static struct notifier_block cpu_percpu_nfb = {
105 .notifier_call = cpu_percpu_callback,
106 .priority = 100 /* highest priority */
107 };
108
percpu_presmp_init(void)109 static int __init cf_check percpu_presmp_init(void)
110 {
111 register_cpu_notifier(&cpu_percpu_nfb);
112
113 return 0;
114 }
115 presmp_initcall(percpu_presmp_init);
116
117 /*
118 * Local variables:
119 * mode: C
120 * c-file-style: "BSD"
121 * c-basic-offset: 4
122 * indent-tabs-mode: nil
123 * End:
124 */
125