1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13 #include <xen/errno.h>
14 #include <xen/sched.h>
15 #include <xen/types.h>
16
17 #include <asm/current.h>
18 #include <asm/gic.h>
19 #include <asm/vgic.h>
20 #include <asm/psci.h>
21 #include <asm/event.h>
22
23 #include <public/sched.h>
24
do_common_cpu_on(register_t target_cpu,register_t entry_point,register_t context_id,int ver)25 static int do_common_cpu_on(register_t target_cpu, register_t entry_point,
26 register_t context_id,int ver)
27 {
28 struct vcpu *v;
29 struct domain *d = current->domain;
30 struct vcpu_guest_context *ctxt;
31 int rc;
32 int is_thumb = entry_point & 1;
33 register_t vcpuid;
34
35 vcpuid = vaffinity_to_vcpuid(target_cpu);
36
37 if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
38 return PSCI_INVALID_PARAMETERS;
39
40 /* THUMB set is not allowed with 64-bit domain */
41 if ( is_64bit_domain(d) && is_thumb )
42 return PSCI_INVALID_PARAMETERS;
43
44 if ( (ver == PSCI_VERSION(0, 2)) &&
45 !test_bit(_VPF_down, &v->pause_flags) )
46 return PSCI_ALREADY_ON;
47
48 if ( (ctxt = alloc_vcpu_guest_context()) == NULL )
49 return PSCI_DENIED;
50
51 vgic_clear_pending_irqs(v);
52
53 memset(ctxt, 0, sizeof(*ctxt));
54 ctxt->user_regs.pc64 = (u64) entry_point;
55 ctxt->sctlr = SCTLR_GUEST_INIT;
56 ctxt->ttbr0 = 0;
57 ctxt->ttbr1 = 0;
58 ctxt->ttbcr = 0; /* Defined Reset Value */
59 if ( is_32bit_domain(d) )
60 {
61 ctxt->user_regs.cpsr = PSR_GUEST32_INIT;
62 if ( ver == PSCI_VERSION(0, 2) )
63 ctxt->user_regs.r0_usr = context_id;
64 }
65 #ifdef CONFIG_ARM_64
66 else
67 {
68 ctxt->user_regs.cpsr = PSR_GUEST64_INIT;
69 if ( ver == PSCI_VERSION(0, 2) )
70 ctxt->user_regs.x0 = context_id;
71 }
72 #endif
73
74 /* Start the VCPU with THUMB set if it's requested by the kernel */
75 if ( is_thumb )
76 ctxt->user_regs.cpsr |= PSR_THUMB;
77 ctxt->flags = VGCF_online;
78
79 domain_lock(d);
80 rc = arch_set_info_guest(v, ctxt);
81 free_vcpu_guest_context(ctxt);
82
83 if ( rc < 0 )
84 {
85 domain_unlock(d);
86 return PSCI_DENIED;
87 }
88 domain_unlock(d);
89
90 vcpu_wake(v);
91
92 return PSCI_SUCCESS;
93 }
94
do_psci_cpu_on(uint32_t vcpuid,register_t entry_point)95 int32_t do_psci_cpu_on(uint32_t vcpuid, register_t entry_point)
96 {
97 return do_common_cpu_on(vcpuid, entry_point, 0 , PSCI_VERSION(0, 1));
98 }
99
do_psci_cpu_off(uint32_t power_state)100 int32_t do_psci_cpu_off(uint32_t power_state)
101 {
102 struct vcpu *v = current;
103 if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
104 vcpu_sleep_nosync(v);
105 return PSCI_SUCCESS;
106 }
107
do_psci_0_2_version(void)108 uint32_t do_psci_0_2_version(void)
109 {
110 return PSCI_VERSION(0, 2);
111 }
112
do_psci_0_2_cpu_suspend(uint32_t power_state,register_t entry_point,register_t context_id)113 register_t do_psci_0_2_cpu_suspend(uint32_t power_state, register_t entry_point,
114 register_t context_id)
115 {
116 struct vcpu *v = current;
117
118 /*
119 * Power off requests are treated as performing standby
120 * as this simplifies Xen implementation.
121 */
122
123 vcpu_block_unless_event_pending(v);
124 return PSCI_SUCCESS;
125 }
126
do_psci_0_2_cpu_off(void)127 int32_t do_psci_0_2_cpu_off(void)
128 {
129 return do_psci_cpu_off(0);
130 }
131
do_psci_0_2_cpu_on(register_t target_cpu,register_t entry_point,register_t context_id)132 int32_t do_psci_0_2_cpu_on(register_t target_cpu, register_t entry_point,
133 register_t context_id)
134 {
135 return do_common_cpu_on(target_cpu, entry_point, context_id,
136 PSCI_VERSION(0, 2));
137 }
138
139 static const unsigned long target_affinity_mask[] = {
140 ( MPIDR_HWID_MASK & AFFINITY_MASK( 0 ) ),
141 ( MPIDR_HWID_MASK & AFFINITY_MASK( 1 ) ),
142 ( MPIDR_HWID_MASK & AFFINITY_MASK( 2 ) )
143 #ifdef CONFIG_ARM_64
144 ,( MPIDR_HWID_MASK & AFFINITY_MASK( 3 ) )
145 #endif
146 };
147
do_psci_0_2_affinity_info(register_t target_affinity,uint32_t lowest_affinity_level)148 int32_t do_psci_0_2_affinity_info(register_t target_affinity,
149 uint32_t lowest_affinity_level)
150 {
151 struct domain *d = current->domain;
152 struct vcpu *v;
153 uint32_t vcpuid;
154 unsigned long tmask;
155
156 if ( lowest_affinity_level < ARRAY_SIZE(target_affinity_mask) )
157 {
158 tmask = target_affinity_mask[lowest_affinity_level];
159 target_affinity &= tmask;
160 }
161 else
162 return PSCI_INVALID_PARAMETERS;
163
164 for ( vcpuid = 0; vcpuid < d->max_vcpus; vcpuid++ )
165 {
166 v = d->vcpu[vcpuid];
167
168 if ( ( ( v->arch.vmpidr & tmask ) == target_affinity )
169 && ( !test_bit(_VPF_down, &v->pause_flags) ) )
170 return PSCI_0_2_AFFINITY_LEVEL_ON;
171 }
172
173 return PSCI_0_2_AFFINITY_LEVEL_OFF;
174 }
175
do_psci_0_2_migrate(uint32_t target_cpu)176 int32_t do_psci_0_2_migrate(uint32_t target_cpu)
177 {
178 return PSCI_NOT_SUPPORTED;
179 }
180
do_psci_0_2_migrate_info_type(void)181 uint32_t do_psci_0_2_migrate_info_type(void)
182 {
183 return PSCI_0_2_TOS_MP_OR_NOT_PRESENT;
184 }
185
do_psci_0_2_migrate_info_up_cpu(void)186 register_t do_psci_0_2_migrate_info_up_cpu(void)
187 {
188 return PSCI_NOT_SUPPORTED;
189 }
190
do_psci_0_2_system_off(void)191 void do_psci_0_2_system_off( void )
192 {
193 struct domain *d = current->domain;
194 domain_shutdown(d,SHUTDOWN_poweroff);
195 }
196
do_psci_0_2_system_reset(void)197 void do_psci_0_2_system_reset(void)
198 {
199 struct domain *d = current->domain;
200 domain_shutdown(d,SHUTDOWN_reboot);
201 }
202
203 /*
204 * Local variables:
205 * mode: C
206 * c-file-style: "BSD"
207 * c-basic-offset: 4
208 * indent-tabs-mode: nil
209 * End:
210 */
211