1/*
2 * Copyright (C) 2012 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/preempt.h>
19#include <linux/kvm_host.h>
20#include <linux/wait.h>
21
22#include <asm/cputype.h>
23#include <asm/kvm_emulate.h>
24#include <asm/kvm_psci.h>
25#include <asm/kvm_host.h>
26
27/*
28 * This is an implementation of the Power State Coordination Interface
29 * as described in ARM document number ARM DEN 0022A.
30 */
31
32#define AFFINITY_MASK(level)	~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
33
34static unsigned long psci_affinity_mask(unsigned long affinity_level)
35{
36	if (affinity_level <= 3)
37		return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
38
39	return 0;
40}
41
42static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
43{
44	/*
45	 * NOTE: For simplicity, we make VCPU suspend emulation to be
46	 * same-as WFI (Wait-for-interrupt) emulation.
47	 *
48	 * This means for KVM the wakeup events are interrupts and
49	 * this is consistent with intended use of StateID as described
50	 * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
51	 *
52	 * Further, we also treat power-down request to be same as
53	 * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
54	 * specification (ARM DEN 0022A). This means all suspend states
55	 * for KVM will preserve the register state.
56	 */
57	kvm_vcpu_block(vcpu);
58
59	return PSCI_RET_SUCCESS;
60}
61
62static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
63{
64	vcpu->arch.pause = true;
65}
66
67static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
68{
69	struct kvm *kvm = source_vcpu->kvm;
70	struct kvm_vcpu *vcpu = NULL;
71	wait_queue_head_t *wq;
72	unsigned long cpu_id;
73	unsigned long context_id;
74	phys_addr_t target_pc;
75
76	cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
77	if (vcpu_mode_is_32bit(source_vcpu))
78		cpu_id &= ~((u32) 0);
79
80	vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
81
82	/*
83	 * Make sure the caller requested a valid CPU and that the CPU is
84	 * turned off.
85	 */
86	if (!vcpu)
87		return PSCI_RET_INVALID_PARAMS;
88	if (!vcpu->arch.pause) {
89		if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
90			return PSCI_RET_ALREADY_ON;
91		else
92			return PSCI_RET_INVALID_PARAMS;
93	}
94
95	target_pc = *vcpu_reg(source_vcpu, 2);
96	context_id = *vcpu_reg(source_vcpu, 3);
97
98	kvm_reset_vcpu(vcpu);
99
100	/* Gracefully handle Thumb2 entry point */
101	if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
102		target_pc &= ~((phys_addr_t) 1);
103		vcpu_set_thumb(vcpu);
104	}
105
106	/* Propagate caller endianness */
107	if (kvm_vcpu_is_be(source_vcpu))
108		kvm_vcpu_set_be(vcpu);
109
110	*vcpu_pc(vcpu) = target_pc;
111	/*
112	 * NOTE: We always update r0 (or x0) because for PSCI v0.1
113	 * the general puspose registers are undefined upon CPU_ON.
114	 */
115	*vcpu_reg(vcpu, 0) = context_id;
116	vcpu->arch.pause = false;
117	smp_mb();		/* Make sure the above is visible */
118
119	wq = kvm_arch_vcpu_wq(vcpu);
120	wake_up_interruptible(wq);
121
122	return PSCI_RET_SUCCESS;
123}
124
125static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
126{
127	int i;
128	unsigned long mpidr;
129	unsigned long target_affinity;
130	unsigned long target_affinity_mask;
131	unsigned long lowest_affinity_level;
132	struct kvm *kvm = vcpu->kvm;
133	struct kvm_vcpu *tmp;
134
135	target_affinity = *vcpu_reg(vcpu, 1);
136	lowest_affinity_level = *vcpu_reg(vcpu, 2);
137
138	/* Determine target affinity mask */
139	target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
140	if (!target_affinity_mask)
141		return PSCI_RET_INVALID_PARAMS;
142
143	/* Ignore other bits of target affinity */
144	target_affinity &= target_affinity_mask;
145
146	/*
147	 * If one or more VCPU matching target affinity are running
148	 * then ON else OFF
149	 */
150	kvm_for_each_vcpu(i, tmp, kvm) {
151		mpidr = kvm_vcpu_get_mpidr_aff(tmp);
152		if (((mpidr & target_affinity_mask) == target_affinity) &&
153		    !tmp->arch.pause) {
154			return PSCI_0_2_AFFINITY_LEVEL_ON;
155		}
156	}
157
158	return PSCI_0_2_AFFINITY_LEVEL_OFF;
159}
160
161static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
162{
163	int i;
164	struct kvm_vcpu *tmp;
165
166	/*
167	 * The KVM ABI specifies that a system event exit may call KVM_RUN
168	 * again and may perform shutdown/reboot at a later time that when the
169	 * actual request is made.  Since we are implementing PSCI and a
170	 * caller of PSCI reboot and shutdown expects that the system shuts
171	 * down or reboots immediately, let's make sure that VCPUs are not run
172	 * after this call is handled and before the VCPUs have been
173	 * re-initialized.
174	 */
175	kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
176		tmp->arch.pause = true;
177		kvm_vcpu_kick(tmp);
178	}
179
180	memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
181	vcpu->run->system_event.type = type;
182	vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
183}
184
185static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
186{
187	kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
188}
189
190static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
191{
192	kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
193}
194
195int kvm_psci_version(struct kvm_vcpu *vcpu)
196{
197	if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
198		return KVM_ARM_PSCI_0_2;
199
200	return KVM_ARM_PSCI_0_1;
201}
202
203static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
204{
205	int ret = 1;
206	unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
207	unsigned long val;
208
209	switch (psci_fn) {
210	case PSCI_0_2_FN_PSCI_VERSION:
211		/*
212		 * Bits[31:16] = Major Version = 0
213		 * Bits[15:0] = Minor Version = 2
214		 */
215		val = 2;
216		break;
217	case PSCI_0_2_FN_CPU_SUSPEND:
218	case PSCI_0_2_FN64_CPU_SUSPEND:
219		val = kvm_psci_vcpu_suspend(vcpu);
220		break;
221	case PSCI_0_2_FN_CPU_OFF:
222		kvm_psci_vcpu_off(vcpu);
223		val = PSCI_RET_SUCCESS;
224		break;
225	case PSCI_0_2_FN_CPU_ON:
226	case PSCI_0_2_FN64_CPU_ON:
227		val = kvm_psci_vcpu_on(vcpu);
228		break;
229	case PSCI_0_2_FN_AFFINITY_INFO:
230	case PSCI_0_2_FN64_AFFINITY_INFO:
231		val = kvm_psci_vcpu_affinity_info(vcpu);
232		break;
233	case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
234		/*
235		 * Trusted OS is MP hence does not require migration
236	         * or
237		 * Trusted OS is not present
238		 */
239		val = PSCI_0_2_TOS_MP;
240		break;
241	case PSCI_0_2_FN_SYSTEM_OFF:
242		kvm_psci_system_off(vcpu);
243		/*
244		 * We should'nt be going back to guest VCPU after
245		 * receiving SYSTEM_OFF request.
246		 *
247		 * If user space accidently/deliberately resumes
248		 * guest VCPU after SYSTEM_OFF request then guest
249		 * VCPU should see internal failure from PSCI return
250		 * value. To achieve this, we preload r0 (or x0) with
251		 * PSCI return value INTERNAL_FAILURE.
252		 */
253		val = PSCI_RET_INTERNAL_FAILURE;
254		ret = 0;
255		break;
256	case PSCI_0_2_FN_SYSTEM_RESET:
257		kvm_psci_system_reset(vcpu);
258		/*
259		 * Same reason as SYSTEM_OFF for preloading r0 (or x0)
260		 * with PSCI return value INTERNAL_FAILURE.
261		 */
262		val = PSCI_RET_INTERNAL_FAILURE;
263		ret = 0;
264		break;
265	default:
266		val = PSCI_RET_NOT_SUPPORTED;
267		break;
268	}
269
270	*vcpu_reg(vcpu, 0) = val;
271	return ret;
272}
273
274static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
275{
276	unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
277	unsigned long val;
278
279	switch (psci_fn) {
280	case KVM_PSCI_FN_CPU_OFF:
281		kvm_psci_vcpu_off(vcpu);
282		val = PSCI_RET_SUCCESS;
283		break;
284	case KVM_PSCI_FN_CPU_ON:
285		val = kvm_psci_vcpu_on(vcpu);
286		break;
287	default:
288		val = PSCI_RET_NOT_SUPPORTED;
289		break;
290	}
291
292	*vcpu_reg(vcpu, 0) = val;
293	return 1;
294}
295
296/**
297 * kvm_psci_call - handle PSCI call if r0 value is in range
298 * @vcpu: Pointer to the VCPU struct
299 *
300 * Handle PSCI calls from guests through traps from HVC instructions.
301 * The calling convention is similar to SMC calls to the secure world
302 * where the function number is placed in r0.
303 *
304 * This function returns: > 0 (success), 0 (success but exit to user
305 * space), and < 0 (errors)
306 *
307 * Errors:
308 * -EINVAL: Unrecognized PSCI function
309 */
310int kvm_psci_call(struct kvm_vcpu *vcpu)
311{
312	switch (kvm_psci_version(vcpu)) {
313	case KVM_ARM_PSCI_0_2:
314		return kvm_psci_0_2_call(vcpu);
315	case KVM_ARM_PSCI_0_1:
316		return kvm_psci_0_1_call(vcpu);
317	default:
318		return -EINVAL;
319	};
320}
321