1#ifndef __KVM_X86_MMU_H
2#define __KVM_X86_MMU_H
3
4#include <linux/kvm_host.h>
5#include "kvm_cache_regs.h"
6
7#define PT64_PT_BITS 9
8#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
9#define PT32_PT_BITS 10
10#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
11
12#define PT_WRITABLE_SHIFT 1
13
14#define PT_PRESENT_MASK (1ULL << 0)
15#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
16#define PT_USER_MASK (1ULL << 2)
17#define PT_PWT_MASK (1ULL << 3)
18#define PT_PCD_MASK (1ULL << 4)
19#define PT_ACCESSED_SHIFT 5
20#define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
21#define PT_DIRTY_SHIFT 6
22#define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
23#define PT_PAGE_SIZE_SHIFT 7
24#define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
25#define PT_PAT_MASK (1ULL << 7)
26#define PT_GLOBAL_MASK (1ULL << 8)
27#define PT64_NX_SHIFT 63
28#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
29
30#define PT_PAT_SHIFT 7
31#define PT_DIR_PAT_SHIFT 12
32#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
33
34#define PT32_DIR_PSE36_SIZE 4
35#define PT32_DIR_PSE36_SHIFT 13
36#define PT32_DIR_PSE36_MASK \
37	(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
38
39#define PT64_ROOT_LEVEL 4
40#define PT32_ROOT_LEVEL 2
41#define PT32E_ROOT_LEVEL 3
42
43#define PT_PDPE_LEVEL 3
44#define PT_DIRECTORY_LEVEL 2
45#define PT_PAGE_TABLE_LEVEL 1
46
47static inline u64 rsvd_bits(int s, int e)
48{
49	return ((1ULL << (e - s + 1)) - 1) << s;
50}
51
52int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
53void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
54
55/*
56 * Return values of handle_mmio_page_fault_common:
57 * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
58 *			directly.
59 * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
60 *			fault path update the mmio spte.
61 * RET_MMIO_PF_RETRY: let CPU fault again on the address.
62 * RET_MMIO_PF_BUG: bug is detected.
63 */
64enum {
65	RET_MMIO_PF_EMULATE = 1,
66	RET_MMIO_PF_INVALID = 2,
67	RET_MMIO_PF_RETRY = 0,
68	RET_MMIO_PF_BUG = -1
69};
70
71int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
72void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
73void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
74
75static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
76{
77	if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
78		return kvm->arch.n_max_mmu_pages -
79			kvm->arch.n_used_mmu_pages;
80
81	return 0;
82}
83
84static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
85{
86	if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
87		return 0;
88
89	return kvm_mmu_load(vcpu);
90}
91
92static inline int is_present_gpte(unsigned long pte)
93{
94	return pte & PT_PRESENT_MASK;
95}
96
97/*
98 * Currently, we have two sorts of write-protection, a) the first one
99 * write-protects guest page to sync the guest modification, b) another one is
100 * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences
101 * between these two sorts are:
102 * 1) the first case clears SPTE_MMU_WRITEABLE bit.
103 * 2) the first case requires flushing tlb immediately avoiding corrupting
104 *    shadow page table between all vcpus so it should be in the protection of
105 *    mmu-lock. And the another case does not need to flush tlb until returning
106 *    the dirty bitmap to userspace since it only write-protects the page
107 *    logged in the bitmap, that means the page in the dirty bitmap is not
108 *    missed, so it can flush tlb out of mmu-lock.
109 *
110 * So, there is the problem: the first case can meet the corrupted tlb caused
111 * by another case which write-protects pages but without flush tlb
112 * immediately. In order to making the first case be aware this problem we let
113 * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit
114 * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit.
115 *
116 * Anyway, whenever a spte is updated (only permission and status bits are
117 * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes
118 * readonly, if that happens, we need to flush tlb. Fortunately,
119 * mmu_spte_update() has already handled it perfectly.
120 *
121 * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK:
122 * - if we want to see if it has writable tlb entry or if the spte can be
123 *   writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most
124 *   case, otherwise
125 * - if we fix page fault on the spte or do write-protection by dirty logging,
126 *   check PT_WRITABLE_MASK.
127 *
128 * TODO: introduce APIs to split these two cases.
129 */
130static inline int is_writable_pte(unsigned long pte)
131{
132	return pte & PT_WRITABLE_MASK;
133}
134
135static inline bool is_write_protection(struct kvm_vcpu *vcpu)
136{
137	return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
138}
139
140/*
141 * Will a fault with a given page-fault error code (pfec) cause a permission
142 * fault with the given access (in ACC_* format)?
143 */
144static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
145				    unsigned pte_access, unsigned pfec)
146{
147	int cpl = kvm_x86_ops->get_cpl(vcpu);
148	unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
149
150	/*
151	 * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
152	 *
153	 * If CPL = 3, SMAP applies to all supervisor-mode data accesses
154	 * (these are implicit supervisor accesses) regardless of the value
155	 * of EFLAGS.AC.
156	 *
157	 * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
158	 * the result in X86_EFLAGS_AC. We then insert it in place of
159	 * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
160	 * but it will be one in index if SMAP checks are being overridden.
161	 * It is important to keep this branchless.
162	 */
163	unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
164	int index = (pfec >> 1) +
165		    (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
166
167	WARN_ON(pfec & PFERR_RSVD_MASK);
168
169	return (mmu->permissions[index] >> pte_access) & 1;
170}
171
172void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
173#endif
174