1 /*
2  * asid.c: ASID management
3  * Copyright (c) 2007, Advanced Micro Devices, Inc.
4  * Copyright (c) 2009, Citrix Systems, Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include <xen/init.h>
20 #include <xen/lib.h>
21 #include <xen/sched.h>
22 #include <xen/smp.h>
23 #include <xen/percpu.h>
24 #include <asm/hvm/asid.h>
25 
26 /* Xen command-line option to enable ASIDs */
27 static int opt_asid_enabled = 1;
28 boolean_param("asid", opt_asid_enabled);
29 
30 /*
31  * ASIDs partition the physical TLB.  In the current implementation ASIDs are
32  * introduced to reduce the number of TLB flushes.  Each time the guest's
33  * virtual address space changes (e.g. due to an INVLPG, MOV-TO-{CR3, CR4}
34  * operation), instead of flushing the TLB, a new ASID is assigned.  This
35  * reduces the number of TLB flushes to at most 1/#ASIDs.  The biggest
36  * advantage is that hot parts of the hypervisor's code and data retain in
37  * the TLB.
38  *
39  * Sketch of the Implementation:
40  *
41  * ASIDs are a CPU-local resource.  As preemption of ASIDs is not possible,
42  * ASIDs are assigned in a round-robin scheme.  To minimize the overhead of
43  * ASID invalidation, at the time of a TLB flush,  ASIDs are tagged with a
44  * 64-bit generation.  Only on a generation overflow the code needs to
45  * invalidate all ASID information stored at the VCPUs with are run on the
46  * specific physical processor.  This overflow appears after about 2^80
47  * host processor cycles, so we do not optimize this case, but simply disable
48  * ASID useage to retain correctness.
49  */
50 
51 /* Per-CPU ASID management. */
52 struct hvm_asid_data {
53    uint64_t core_asid_generation;
54    uint32_t next_asid;
55    uint32_t max_asid;
56    bool_t disabled;
57 };
58 
59 static DEFINE_PER_CPU(struct hvm_asid_data, hvm_asid_data);
60 
hvm_asid_init(int nasids)61 void hvm_asid_init(int nasids)
62 {
63     static int8_t g_disabled = -1;
64     struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
65 
66     data->max_asid = nasids - 1;
67     data->disabled = !opt_asid_enabled || (nasids <= 1);
68 
69     if ( g_disabled != data->disabled )
70     {
71         printk("HVM: ASIDs %sabled.\n", data->disabled ? "dis" : "en");
72         if ( g_disabled < 0 )
73             g_disabled = data->disabled;
74     }
75 
76     /* Zero indicates 'invalid generation', so we start the count at one. */
77     data->core_asid_generation = 1;
78 
79     /* Zero indicates 'ASIDs disabled', so we start the count at one. */
80     data->next_asid = 1;
81 }
82 
hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid * asid)83 void hvm_asid_flush_vcpu_asid(struct hvm_vcpu_asid *asid)
84 {
85     asid->generation = 0;
86 }
87 
hvm_asid_flush_vcpu(struct vcpu * v)88 void hvm_asid_flush_vcpu(struct vcpu *v)
89 {
90     hvm_asid_flush_vcpu_asid(&v->arch.hvm_vcpu.n1asid);
91     hvm_asid_flush_vcpu_asid(&vcpu_nestedhvm(v).nv_n2asid);
92 }
93 
hvm_asid_flush_core(void)94 void hvm_asid_flush_core(void)
95 {
96     struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
97 
98     if ( data->disabled )
99         return;
100 
101     if ( likely(++data->core_asid_generation != 0) )
102         return;
103 
104     /*
105      * ASID generations are 64 bit.  Overflow of generations never happens.
106      * For safety, we simply disable ASIDs, so correctness is established; it
107      * only runs a bit slower.
108      */
109     printk("HVM: ASID generation overrun. Disabling ASIDs.\n");
110     data->disabled = 1;
111 }
112 
hvm_asid_handle_vmenter(struct hvm_vcpu_asid * asid)113 bool_t hvm_asid_handle_vmenter(struct hvm_vcpu_asid *asid)
114 {
115     struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
116 
117     /* On erratum #170 systems we must flush the TLB.
118      * Generation overruns are taken here, too. */
119     if ( data->disabled )
120         goto disabled;
121 
122     /* Test if VCPU has valid ASID. */
123     if ( asid->generation == data->core_asid_generation )
124         return 0;
125 
126     /* If there are no free ASIDs, need to go to a new generation */
127     if ( unlikely(data->next_asid > data->max_asid) )
128     {
129         hvm_asid_flush_core();
130         data->next_asid = 1;
131         if ( data->disabled )
132             goto disabled;
133     }
134 
135     /* Now guaranteed to be a free ASID. */
136     asid->asid = data->next_asid++;
137     asid->generation = data->core_asid_generation;
138 
139     /*
140      * When we assign ASID 1, flush all TLB entries as we are starting a new
141      * generation, and all old ASID allocations are now stale.
142      */
143     return (asid->asid == 1);
144 
145  disabled:
146     asid->asid = 0;
147     return 0;
148 }
149 
150 /*
151  * Local variables:
152  * mode: C
153  * c-file-style: "BSD"
154  * c-basic-offset: 4
155  * tab-width: 4
156  * indent-tabs-mode: nil
157  * End:
158  */
159