root/tools/testing/selftests/kvm/lib/aarch64/processor.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. page_align
  2. pgd_index
  3. pud_index
  4. pmd_index
  5. pte_index
  6. pte_addr
  7. ptrs_per_pgd
  8. ptrs_per_pte
  9. virt_pgd_alloc
  10. _virt_pg_map
  11. virt_pg_map
  12. addr_gva2gpa
  13. pte_dump
  14. virt_dump
  15. vm_create_default
  16. aarch64_vcpu_setup
  17. vcpu_dump
  18. aarch64_vcpu_add_default
  19. vm_vcpu_add_default

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * AArch64 code
   4  *
   5  * Copyright (C) 2018, Red Hat, Inc.
   6  */
   7 
   8 #define _GNU_SOURCE /* for program_invocation_name */
   9 
  10 #include <linux/compiler.h>
  11 
  12 #include "kvm_util.h"
  13 #include "../kvm_util_internal.h"
  14 #include "processor.h"
  15 
  16 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR          0x180000
  17 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN     0xac0000
  18 
  19 static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
  20 {
  21         return (v + vm->page_size) & ~(vm->page_size - 1);
  22 }
  23 
  24 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
  25 {
  26         unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
  27         uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
  28 
  29         return (gva >> shift) & mask;
  30 }
  31 
  32 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
  33 {
  34         unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
  35         uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
  36 
  37         TEST_ASSERT(vm->pgtable_levels == 4,
  38                 "Mode %d does not have 4 page table levels", vm->mode);
  39 
  40         return (gva >> shift) & mask;
  41 }
  42 
  43 static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
  44 {
  45         unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
  46         uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
  47 
  48         TEST_ASSERT(vm->pgtable_levels >= 3,
  49                 "Mode %d does not have >= 3 page table levels", vm->mode);
  50 
  51         return (gva >> shift) & mask;
  52 }
  53 
  54 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
  55 {
  56         uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
  57         return (gva >> vm->page_shift) & mask;
  58 }
  59 
  60 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
  61 {
  62         uint64_t mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift;
  63         return entry & mask;
  64 }
  65 
  66 static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
  67 {
  68         unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
  69         return 1 << (vm->va_bits - shift);
  70 }
  71 
  72 static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
  73 {
  74         return 1 << (vm->page_shift - 3);
  75 }
  76 
  77 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
  78 {
  79         if (!vm->pgd_created) {
  80                 vm_paddr_t paddr = vm_phy_pages_alloc(vm,
  81                         page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size,
  82                         KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
  83                 vm->pgd = paddr;
  84                 vm->pgd_created = true;
  85         }
  86 }
  87 
  88 void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
  89                   uint32_t pgd_memslot, uint64_t flags)
  90 {
  91         uint8_t attr_idx = flags & 7;
  92         uint64_t *ptep;
  93 
  94         TEST_ASSERT((vaddr % vm->page_size) == 0,
  95                 "Virtual address not on page boundary,\n"
  96                 "  vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
  97         TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
  98                 (vaddr >> vm->page_shift)),
  99                 "Invalid virtual address, vaddr: 0x%lx", vaddr);
 100         TEST_ASSERT((paddr % vm->page_size) == 0,
 101                 "Physical address not on page boundary,\n"
 102                 "  paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
 103         TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
 104                 "Physical address beyond beyond maximum supported,\n"
 105                 "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
 106                 paddr, vm->max_gfn, vm->page_size);
 107 
 108         ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
 109         if (!*ptep) {
 110                 *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
 111                 *ptep |= 3;
 112         }
 113 
 114         switch (vm->pgtable_levels) {
 115         case 4:
 116                 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
 117                 if (!*ptep) {
 118                         *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
 119                         *ptep |= 3;
 120                 }
 121                 /* fall through */
 122         case 3:
 123                 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
 124                 if (!*ptep) {
 125                         *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
 126                         *ptep |= 3;
 127                 }
 128                 /* fall through */
 129         case 2:
 130                 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
 131                 break;
 132         default:
 133                 TEST_ASSERT(false, "Page table levels must be 2, 3, or 4");
 134         }
 135 
 136         *ptep = paddr | 3;
 137         *ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
 138 }
 139 
 140 void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
 141                  uint32_t pgd_memslot)
 142 {
 143         uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */
 144 
 145         _virt_pg_map(vm, vaddr, paddr, pgd_memslot, attr_idx);
 146 }
 147 
 148 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
 149 {
 150         uint64_t *ptep;
 151 
 152         if (!vm->pgd_created)
 153                 goto unmapped_gva;
 154 
 155         ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
 156         if (!ptep)
 157                 goto unmapped_gva;
 158 
 159         switch (vm->pgtable_levels) {
 160         case 4:
 161                 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
 162                 if (!ptep)
 163                         goto unmapped_gva;
 164                 /* fall through */
 165         case 3:
 166                 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
 167                 if (!ptep)
 168                         goto unmapped_gva;
 169                 /* fall through */
 170         case 2:
 171                 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
 172                 if (!ptep)
 173                         goto unmapped_gva;
 174                 break;
 175         default:
 176                 TEST_ASSERT(false, "Page table levels must be 2, 3, or 4");
 177         }
 178 
 179         return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
 180 
 181 unmapped_gva:
 182         TEST_ASSERT(false, "No mapping for vm virtual address, "
 183                     "gva: 0x%lx", gva);
 184         exit(1);
 185 }
 186 
 187 static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
 188 {
 189 #ifdef DEBUG_VM
 190         static const char * const type[] = { "", "pud", "pmd", "pte" };
 191         uint64_t pte, *ptep;
 192 
 193         if (level == 4)
 194                 return;
 195 
 196         for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
 197                 ptep = addr_gpa2hva(vm, pte);
 198                 if (!*ptep)
 199                         continue;
 200                 printf("%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
 201                 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
 202         }
 203 #endif
 204 }
 205 
 206 void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
 207 {
 208         int level = 4 - (vm->pgtable_levels - 1);
 209         uint64_t pgd, *ptep;
 210 
 211         if (!vm->pgd_created)
 212                 return;
 213 
 214         for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
 215                 ptep = addr_gpa2hva(vm, pgd);
 216                 if (!*ptep)
 217                         continue;
 218                 printf("%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
 219                 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
 220         }
 221 }
 222 
 223 struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
 224                                  void *guest_code)
 225 {
 226         uint64_t ptrs_per_4k_pte = 512;
 227         uint64_t extra_pg_pages = (extra_mem_pages / ptrs_per_4k_pte) * 2;
 228         struct kvm_vm *vm;
 229 
 230         vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
 231 
 232         kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
 233         vm_vcpu_add_default(vm, vcpuid, guest_code);
 234 
 235         return vm;
 236 }
 237 
 238 void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *init)
 239 {
 240         struct kvm_vcpu_init default_init = { .target = -1, };
 241         uint64_t sctlr_el1, tcr_el1;
 242 
 243         if (!init)
 244                 init = &default_init;
 245 
 246         if (init->target == -1) {
 247                 struct kvm_vcpu_init preferred;
 248                 vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred);
 249                 init->target = preferred.target;
 250         }
 251 
 252         vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_INIT, init);
 253 
 254         /*
 255          * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
 256          * registers, which the variable argument list macros do.
 257          */
 258         set_reg(vm, vcpuid, ARM64_SYS_REG(CPACR_EL1), 3 << 20);
 259 
 260         get_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), &sctlr_el1);
 261         get_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), &tcr_el1);
 262 
 263         switch (vm->mode) {
 264         case VM_MODE_P52V48_4K:
 265                 TEST_ASSERT(false, "AArch64 does not support 4K sized pages "
 266                                    "with 52-bit physical address ranges");
 267         case VM_MODE_PXXV48_4K:
 268                 TEST_ASSERT(false, "AArch64 does not support 4K sized pages "
 269                                    "with ANY-bit physical address ranges");
 270         case VM_MODE_P52V48_64K:
 271                 tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
 272                 tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
 273                 break;
 274         case VM_MODE_P48V48_4K:
 275                 tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
 276                 tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
 277                 break;
 278         case VM_MODE_P48V48_64K:
 279                 tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
 280                 tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
 281                 break;
 282         case VM_MODE_P40V48_4K:
 283                 tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
 284                 tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
 285                 break;
 286         case VM_MODE_P40V48_64K:
 287                 tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
 288                 tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
 289                 break;
 290         default:
 291                 TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", vm->mode);
 292         }
 293 
 294         sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
 295         /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
 296         tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
 297         tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
 298 
 299         set_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), sctlr_el1);
 300         set_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), tcr_el1);
 301         set_reg(vm, vcpuid, ARM64_SYS_REG(MAIR_EL1), DEFAULT_MAIR_EL1);
 302         set_reg(vm, vcpuid, ARM64_SYS_REG(TTBR0_EL1), vm->pgd);
 303 }
 304 
 305 void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
 306 {
 307         uint64_t pstate, pc;
 308 
 309         get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
 310         get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
 311 
 312         fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
 313                 indent, "", pstate, pc);
 314 }
 315 
 316 void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid,
 317                               struct kvm_vcpu_init *init, void *guest_code)
 318 {
 319         size_t stack_size = vm->page_size == 4096 ?
 320                                         DEFAULT_STACK_PGS * vm->page_size :
 321                                         vm->page_size;
 322         uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
 323                                         DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, 0, 0);
 324 
 325         vm_vcpu_add(vm, vcpuid);
 326         aarch64_vcpu_setup(vm, vcpuid, init);
 327 
 328         set_reg(vm, vcpuid, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
 329         set_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
 330 }
 331 
 332 void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
 333 {
 334         aarch64_vcpu_add_default(vm, vcpuid, NULL, guest_code);
 335 }

/* [<][>][^][v][top][bottom][index][help] */