1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Nested HVM
4  * Copyright (c) 2011, Advanced Micro Devices, Inc.
5  * Author: Christoph Egger <Christoph.Egger@amd.com>
6  */
7 
8 #include <asm/msr.h>
9 #include <asm/hvm/support.h>
10 #include <asm/hvm/hvm.h>
11 #include <asm/p2m.h>    /* for struct p2m_domain */
12 #include <asm/hvm/nestedhvm.h>
13 #include <asm/event.h>  /* for local_event_delivery_(en|dis)able */
14 #include <asm/paging.h> /* for paging_mode_hap() */
15 
16 static unsigned long *shadow_io_bitmap[3];
17 
18 /* Nested VCPU */
19 bool
nestedhvm_vcpu_in_guestmode(struct vcpu * v)20 nestedhvm_vcpu_in_guestmode(struct vcpu *v)
21 {
22     return vcpu_nestedhvm(v).nv_guestmode;
23 }
24 
25 void
nestedhvm_vcpu_reset(struct vcpu * v)26 nestedhvm_vcpu_reset(struct vcpu *v)
27 {
28     struct nestedvcpu *nv = &vcpu_nestedhvm(v);
29 
30     nv->nv_vmentry_pending = 0;
31     nv->nv_vmexit_pending = 0;
32     nv->nv_vmswitch_in_progress = 0;
33     nv->nv_ioport80 = 0;
34     nv->nv_ioportED = 0;
35 
36     hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
37     nv->nv_vvmcx = NULL;
38     nv->nv_vvmcxaddr = INVALID_PADDR;
39     nv->nv_flushp2m = 0;
40     nv->nv_p2m = NULL;
41     nv->stale_np2m = false;
42     nv->np2m_generation = 0;
43 
44     hvm_asid_flush_vcpu_asid(&nv->nv_n2asid);
45 
46     alternative_vcall(hvm_funcs.nhvm_vcpu_reset, v);
47 
48     /* vcpu is in host mode */
49     nestedhvm_vcpu_exit_guestmode(v);
50 }
51 
52 int
nestedhvm_vcpu_initialise(struct vcpu * v)53 nestedhvm_vcpu_initialise(struct vcpu *v)
54 {
55     int rc;
56 
57     if ( !shadow_io_bitmap[0] )
58         return -ENOMEM;
59 
60     rc = alternative_call(hvm_funcs.nhvm_vcpu_initialise, v);
61     if ( rc )
62         return rc;
63 
64     nestedhvm_vcpu_reset(v);
65     return 0;
66 }
67 
68 void
nestedhvm_vcpu_destroy(struct vcpu * v)69 nestedhvm_vcpu_destroy(struct vcpu *v)
70 {
71     alternative_vcall(hvm_funcs.nhvm_vcpu_destroy, v);
72 }
73 
nestedhvm_flushtlb_ipi(void * info)74 static void cf_check nestedhvm_flushtlb_ipi(void *info)
75 {
76     struct vcpu *v = current;
77     struct domain *d = info;
78 
79     ASSERT(d != NULL);
80     if (v->domain != d) {
81         /* This cpu doesn't belong to the domain */
82         return;
83     }
84 
85     /* Just flush the ASID (or request a new one).
86      * This is cheaper than flush_tlb_local() and has
87      * the same desired effect.
88      */
89     hvm_asid_flush_core();
90     vcpu_nestedhvm(v).nv_p2m = NULL;
91     vcpu_nestedhvm(v).stale_np2m = true;
92 }
93 
94 void
nestedhvm_vmcx_flushtlb(struct p2m_domain * p2m)95 nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m)
96 {
97     on_selected_cpus(p2m->dirty_cpumask, nestedhvm_flushtlb_ipi,
98         p2m->domain, 1);
99     cpumask_clear(p2m->dirty_cpumask);
100 }
101 
102 /* Common shadow IO Permission bitmap */
103 
104 /* There four global patterns of io bitmap each guest can
105  * choose depending on interception of io port 0x80 and/or
106  * 0xED (shown in table below).
107  * The users of the bitmap patterns are in SVM/VMX specific code.
108  *
109  * bitmap        port 0x80  port 0xed
110  * hvm_io_bitmap cleared    cleared
111  * iomap[0]      cleared    set
112  * iomap[1]      set        cleared
113  * iomap[2]      set        set
114  */
115 
nestedhvm_setup(void)116 static int __init cf_check nestedhvm_setup(void)
117 {
118     /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */
119     unsigned nr = cpu_has_vmx ? 2 : 3;
120     unsigned int i, order = get_order_from_pages(nr);
121 
122     if ( !hvm_funcs.name )
123         return 0;
124 
125     /* shadow_io_bitmaps can't be declared static because
126      *   they must fulfill hw requirements (page aligned section)
127      *   and doing so triggers the ASSERT(va >= XEN_VIRT_START)
128      *   in virt_to_maddr()
129      *
130      * So as a compromise pre-allocate them when xen boots.
131      * This function must be called from within start_xen() when
132      * it is valid to use _xmalloc()
133      */
134 
135     for ( i = 0; i < ARRAY_SIZE(shadow_io_bitmap); i++ )
136     {
137         shadow_io_bitmap[i] = alloc_xenheap_pages(order, 0);
138         if ( !shadow_io_bitmap[i] )
139         {
140             while ( i-- )
141             {
142                 free_xenheap_pages(shadow_io_bitmap[i], order);
143                 shadow_io_bitmap[i] = NULL;
144             }
145             return -ENOMEM;
146         }
147         memset(shadow_io_bitmap[i], ~0U, nr << PAGE_SHIFT);
148     }
149 
150     __clear_bit(0x80, shadow_io_bitmap[0]);
151     __clear_bit(0xed, shadow_io_bitmap[1]);
152 
153     /*
154      * NB this must be called after all command-line processing has been
155      * done, so that if (for example) HAP is disabled, nested virt is
156      * disabled as well.
157      */
158     if ( using_vmx() )
159         start_nested_vmx(&hvm_funcs);
160     else if ( using_svm() )
161         start_nested_svm(&hvm_funcs);
162 
163     return 0;
164 }
165 __initcall(nestedhvm_setup);
166 
167 unsigned long *
nestedhvm_vcpu_iomap_get(bool ioport_80,bool ioport_ed)168 nestedhvm_vcpu_iomap_get(bool ioport_80, bool ioport_ed)
169 {
170     int i;
171 
172     if (!hvm_port80_allowed)
173         ioport_80 = 1;
174 
175     if (ioport_80 == 0) {
176         if (ioport_ed == 0)
177             return hvm_io_bitmap;
178         i = 0;
179     } else {
180         if (ioport_ed == 0)
181             i = 1;
182         else
183             i = 2;
184     }
185 
186     return shadow_io_bitmap[i];
187 }
188