1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2010
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#ifndef __ASM_KVM_BOOK3S_64_H__
21#define __ASM_KVM_BOOK3S_64_H__
22
23#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
24static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
25{
26	preempt_disable();
27	return &get_paca()->shadow_vcpu;
28}
29
30static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
31{
32	preempt_enable();
33}
34#endif
35
36#define SPAPR_TCE_SHIFT		12
37
38#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
39#define KVM_DEFAULT_HPT_ORDER	24	/* 16MB HPT by default */
40#endif
41
42#define VRMA_VSID	0x1ffffffUL	/* 1TB VSID reserved for VRMA */
43
44/*
45 * We use a lock bit in HPTE dword 0 to synchronize updates and
46 * accesses to each HPTE, and another bit to indicate non-present
47 * HPTEs.
48 */
49#define HPTE_V_HVLOCK	0x40UL
50#define HPTE_V_ABSENT	0x20UL
51
52/*
53 * We use this bit in the guest_rpte field of the revmap entry
54 * to indicate a modified HPTE.
55 */
56#define HPTE_GR_MODIFIED	(1ul << 62)
57
58/* These bits are reserved in the guest view of the HPTE */
59#define HPTE_GR_RESERVED	HPTE_GR_MODIFIED
60
61static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
62{
63	unsigned long tmp, old;
64	__be64 be_lockbit, be_bits;
65
66	/*
67	 * We load/store in native endian, but the HTAB is in big endian. If
68	 * we byte swap all data we apply on the PTE we're implicitly correct
69	 * again.
70	 */
71	be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
72	be_bits = cpu_to_be64(bits);
73
74	asm volatile("	ldarx	%0,0,%2\n"
75		     "	and.	%1,%0,%3\n"
76		     "	bne	2f\n"
77		     "	or	%0,%0,%4\n"
78		     "  stdcx.	%0,0,%2\n"
79		     "	beq+	2f\n"
80		     "	mr	%1,%3\n"
81		     "2:	isync"
82		     : "=&r" (tmp), "=&r" (old)
83		     : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
84		     : "cc", "memory");
85	return old == 0;
86}
87
88static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
89{
90	hpte_v &= ~HPTE_V_HVLOCK;
91	asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
92	hpte[0] = cpu_to_be64(hpte_v);
93}
94
95/* Without barrier */
96static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
97{
98	hpte_v &= ~HPTE_V_HVLOCK;
99	hpte[0] = cpu_to_be64(hpte_v);
100}
101
102static inline int __hpte_actual_psize(unsigned int lp, int psize)
103{
104	int i, shift;
105	unsigned int mask;
106
107	/* start from 1 ignoring MMU_PAGE_4K */
108	for (i = 1; i < MMU_PAGE_COUNT; i++) {
109
110		/* invalid penc */
111		if (mmu_psize_defs[psize].penc[i] == -1)
112			continue;
113		/*
114		 * encoding bits per actual page size
115		 *        PTE LP     actual page size
116		 *    rrrr rrrz		>=8KB
117		 *    rrrr rrzz		>=16KB
118		 *    rrrr rzzz		>=32KB
119		 *    rrrr zzzz		>=64KB
120		 * .......
121		 */
122		shift = mmu_psize_defs[i].shift - LP_SHIFT;
123		if (shift > LP_BITS)
124			shift = LP_BITS;
125		mask = (1 << shift) - 1;
126		if ((lp & mask) == mmu_psize_defs[psize].penc[i])
127			return i;
128	}
129	return -1;
130}
131
132static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
133					     unsigned long pte_index)
134{
135	int b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
136	unsigned int penc;
137	unsigned long rb = 0, va_low, sllp;
138	unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
139
140	if (v & HPTE_V_LARGE) {
141		for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
142
143			/* valid entries have a shift value */
144			if (!mmu_psize_defs[b_psize].shift)
145				continue;
146
147			a_psize = __hpte_actual_psize(lp, b_psize);
148			if (a_psize != -1)
149				break;
150		}
151	}
152	/*
153	 * Ignore the top 14 bits of va
154	 * v have top two bits covering segment size, hence move
155	 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
156	 * AVA field in v also have the lower 23 bits ignored.
157	 * For base page size 4K we need 14 .. 65 bits (so need to
158	 * collect extra 11 bits)
159	 * For others we need 14..14+i
160	 */
161	/* This covers 14..54 bits of va*/
162	rb = (v & ~0x7fUL) << 16;		/* AVA field */
163
164	rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8;	/*  B field */
165	/*
166	 * AVA in v had cleared lower 23 bits. We need to derive
167	 * that from pteg index
168	 */
169	va_low = pte_index >> 3;
170	if (v & HPTE_V_SECONDARY)
171		va_low = ~va_low;
172	/*
173	 * get the vpn bits from va_low using reverse of hashing.
174	 * In v we have va with 23 bits dropped and then left shifted
175	 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
176	 * right shift it with (SID_SHIFT - (23 - 7))
177	 */
178	if (!(v & HPTE_V_1TB_SEG))
179		va_low ^= v >> (SID_SHIFT - 16);
180	else
181		va_low ^= v >> (SID_SHIFT_1T - 16);
182	va_low &= 0x7ff;
183
184	switch (b_psize) {
185	case MMU_PAGE_4K:
186		sllp = ((mmu_psize_defs[a_psize].sllp & SLB_VSID_L) >> 6) |
187			((mmu_psize_defs[a_psize].sllp & SLB_VSID_LP) >> 4);
188		rb |= sllp << 5;	/*  AP field */
189		rb |= (va_low & 0x7ff) << 12;	/* remaining 11 bits of AVA */
190		break;
191	default:
192	{
193		int aval_shift;
194		/*
195		 * remaining bits of AVA/LP fields
196		 * Also contain the rr bits of LP
197		 */
198		rb |= (va_low << mmu_psize_defs[b_psize].shift) & 0x7ff000;
199		/*
200		 * Now clear not needed LP bits based on actual psize
201		 */
202		rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1);
203		/*
204		 * AVAL field 58..77 - base_page_shift bits of va
205		 * we have space for 58..64 bits, Missing bits should
206		 * be zero filled. +1 is to take care of L bit shift
207		 */
208		aval_shift = 64 - (77 - mmu_psize_defs[b_psize].shift) + 1;
209		rb |= ((va_low << aval_shift) & 0xfe);
210
211		rb |= 1;		/* L field */
212		penc = mmu_psize_defs[b_psize].penc[a_psize];
213		rb |= penc << 12;	/* LP field */
214		break;
215	}
216	}
217	rb |= (v >> 54) & 0x300;		/* B field */
218	return rb;
219}
220
221static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
222					     bool is_base_size)
223{
224
225	int size, a_psize;
226	/* Look at the 8 bit LP value */
227	unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
228
229	/* only handle 4k, 64k and 16M pages for now */
230	if (!(h & HPTE_V_LARGE))
231		return 1ul << 12;
232	else {
233		for (size = 0; size < MMU_PAGE_COUNT; size++) {
234			/* valid entries have a shift value */
235			if (!mmu_psize_defs[size].shift)
236				continue;
237
238			a_psize = __hpte_actual_psize(lp, size);
239			if (a_psize != -1) {
240				if (is_base_size)
241					return 1ul << mmu_psize_defs[size].shift;
242				return 1ul << mmu_psize_defs[a_psize].shift;
243			}
244		}
245
246	}
247	return 0;
248}
249
250static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
251{
252	return __hpte_page_size(h, l, 0);
253}
254
255static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
256{
257	return __hpte_page_size(h, l, 1);
258}
259
260static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
261{
262	return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
263}
264
265static inline int hpte_is_writable(unsigned long ptel)
266{
267	unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
268
269	return pp != PP_RXRX && pp != PP_RXXX;
270}
271
272static inline unsigned long hpte_make_readonly(unsigned long ptel)
273{
274	if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
275		ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
276	else
277		ptel |= PP_RXRX;
278	return ptel;
279}
280
281static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
282{
283	unsigned int wimg = ptel & HPTE_R_WIMG;
284
285	/* Handle SAO */
286	if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
287	    cpu_has_feature(CPU_FTR_ARCH_206))
288		wimg = HPTE_R_M;
289
290	if (!io_type)
291		return wimg == HPTE_R_M;
292
293	return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type;
294}
295
296/*
297 * If it's present and writable, atomically set dirty and referenced bits and
298 * return the PTE, otherwise return 0.
299 */
300static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
301{
302	pte_t old_pte, new_pte = __pte(0);
303
304	while (1) {
305		/*
306		 * Make sure we don't reload from ptep
307		 */
308		old_pte = READ_ONCE(*ptep);
309		/*
310		 * wait until _PAGE_BUSY is clear then set it atomically
311		 */
312		if (unlikely(pte_val(old_pte) & _PAGE_BUSY)) {
313			cpu_relax();
314			continue;
315		}
316		/* If pte is not present return None */
317		if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
318			return __pte(0);
319
320		new_pte = pte_mkyoung(old_pte);
321		if (writing && pte_write(old_pte))
322			new_pte = pte_mkdirty(new_pte);
323
324		if (pte_val(old_pte) == __cmpxchg_u64((unsigned long *)ptep,
325						      pte_val(old_pte),
326						      pte_val(new_pte))) {
327			break;
328		}
329	}
330	return new_pte;
331}
332
333
334/* Return HPTE cache control bits corresponding to Linux pte bits */
335static inline unsigned long hpte_cache_bits(unsigned long pte_val)
336{
337#if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
338	return pte_val & (HPTE_R_W | HPTE_R_I);
339#else
340	return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) +
341		((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0);
342#endif
343}
344
345static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
346{
347	if (key)
348		return PP_RWRX <= pp && pp <= PP_RXRX;
349	return true;
350}
351
352static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
353{
354	if (key)
355		return pp == PP_RWRW;
356	return pp <= PP_RWRW;
357}
358
359static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
360{
361	unsigned long skey;
362
363	skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
364		((hpte_r & HPTE_R_KEY_LO) >> 9);
365	return (amr >> (62 - 2 * skey)) & 3;
366}
367
368static inline void lock_rmap(unsigned long *rmap)
369{
370	do {
371		while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
372			cpu_relax();
373	} while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
374}
375
376static inline void unlock_rmap(unsigned long *rmap)
377{
378	__clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
379}
380
381static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
382				   unsigned long pagesize)
383{
384	unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
385
386	if (pagesize <= PAGE_SIZE)
387		return true;
388	return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
389}
390
391/*
392 * This works for 4k, 64k and 16M pages on POWER7,
393 * and 4k and 16M pages on PPC970.
394 */
395static inline unsigned long slb_pgsize_encoding(unsigned long psize)
396{
397	unsigned long senc = 0;
398
399	if (psize > 0x1000) {
400		senc = SLB_VSID_L;
401		if (psize == 0x10000)
402			senc |= SLB_VSID_LP_01;
403	}
404	return senc;
405}
406
407static inline int is_vrma_hpte(unsigned long hpte_v)
408{
409	return (hpte_v & ~0xffffffUL) ==
410		(HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
411}
412
413#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
414/*
415 * Note modification of an HPTE; set the HPTE modified bit
416 * if anyone is interested.
417 */
418static inline void note_hpte_modification(struct kvm *kvm,
419					  struct revmap_entry *rev)
420{
421	if (atomic_read(&kvm->arch.hpte_mod_interest))
422		rev->guest_rpte |= HPTE_GR_MODIFIED;
423}
424
425/*
426 * Like kvm_memslots(), but for use in real mode when we can't do
427 * any RCU stuff (since the secondary threads are offline from the
428 * kernel's point of view), and we can't print anything.
429 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
430 */
431static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
432{
433	return rcu_dereference_raw_notrace(kvm->memslots);
434}
435
436extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
437
438extern void kvmhv_rm_send_ipi(int cpu);
439
440#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
441
442#endif /* __ASM_KVM_BOOK3S_64_H__ */
443