1 #include <xen/percpu.h>
2 #include <xen/cpu.h>
3 #include <xen/init.h>
4 #include <xen/mm.h>
5 #include <xen/rcupdate.h>
6 
7 unsigned long __per_cpu_offset[NR_CPUS];
8 
9 /*
10  * Force uses of per_cpu() with an invalid area to attempt to access the
11  * middle of the non-canonical address space resulting in a #GP, rather than a
12  * possible #PF at (NULL + a little) which has security implications in the
13  * context of PV guests.
14  */
15 #define INVALID_PERCPU_AREA (0x8000000000000000L - (long)__per_cpu_start)
16 #define PERCPU_ORDER get_order_from_bytes(__per_cpu_data_end - __per_cpu_start)
17 
percpu_init_areas(void)18 void __init percpu_init_areas(void)
19 {
20     unsigned int cpu;
21 
22     for ( cpu = 1; cpu < NR_CPUS; cpu++ )
23         __per_cpu_offset[cpu] = INVALID_PERCPU_AREA;
24 }
25 
init_percpu_area(unsigned int cpu)26 static int init_percpu_area(unsigned int cpu)
27 {
28     char *p;
29 
30     if ( __per_cpu_offset[cpu] != INVALID_PERCPU_AREA )
31         return -EBUSY;
32 
33     if ( (p = alloc_xenheap_pages(PERCPU_ORDER, 0)) == NULL )
34         return -ENOMEM;
35 
36     memset(p, 0, __per_cpu_data_end - __per_cpu_start);
37     __per_cpu_offset[cpu] = p - __per_cpu_start;
38 
39     return 0;
40 }
41 
42 struct free_info {
43     unsigned int cpu;
44     struct rcu_head rcu;
45 };
46 static DEFINE_PER_CPU(struct free_info, free_info);
47 
_free_percpu_area(struct rcu_head * head)48 static void _free_percpu_area(struct rcu_head *head)
49 {
50     struct free_info *info = container_of(head, struct free_info, rcu);
51     unsigned int cpu = info->cpu;
52     char *p = __per_cpu_start + __per_cpu_offset[cpu];
53 
54     free_xenheap_pages(p, PERCPU_ORDER);
55     __per_cpu_offset[cpu] = INVALID_PERCPU_AREA;
56 }
57 
free_percpu_area(unsigned int cpu)58 static void free_percpu_area(unsigned int cpu)
59 {
60     struct free_info *info = &per_cpu(free_info, cpu);
61 
62     info->cpu = cpu;
63     call_rcu(&info->rcu, _free_percpu_area);
64 }
65 
cpu_percpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)66 static int cpu_percpu_callback(
67     struct notifier_block *nfb, unsigned long action, void *hcpu)
68 {
69     unsigned int cpu = (unsigned long)hcpu;
70     int rc = 0;
71 
72     switch ( action )
73     {
74     case CPU_UP_PREPARE:
75         rc = init_percpu_area(cpu);
76         break;
77     case CPU_UP_CANCELED:
78     case CPU_DEAD:
79         free_percpu_area(cpu);
80         break;
81     default:
82         break;
83     }
84 
85     return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
86 }
87 
88 static struct notifier_block cpu_percpu_nfb = {
89     .notifier_call = cpu_percpu_callback,
90     .priority = 100 /* highest priority */
91 };
92 
percpu_presmp_init(void)93 static int __init percpu_presmp_init(void)
94 {
95     register_cpu_notifier(&cpu_percpu_nfb);
96 
97     return 0;
98 }
99 presmp_initcall(percpu_presmp_init);
100 
101 /*
102  * Local variables:
103  * mode: C
104  * c-file-style: "BSD"
105  * c-basic-offset: 4
106  * indent-tabs-mode: nil
107  * End:
108  */
109