Last active
January 23, 2023 22:48
-
-
Save Lichtso/167b0589602f516197aef0383eb7e50c to your computer and use it in GitHub Desktop.
a 64 bit VM running. Supports x86-64 and arm64 on Linux and macOS.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| // Copyright (C) 2023 Alexander Meißner | |
| // | |
| // Minimal code necessary to get a 64 bit VM running. Supports x86-64 and arm64 on Linux and macOS. | |
| #if !defined(__x86_64__) && !defined(__aarch64__) | |
| #error Unsupported OS | |
| #endif | |
| #if !defined(__linux__) && !defined(__APPLE__) | |
| #error Unsupported ISA | |
| #endif | |
| #include <assert.h> | |
| #include <string.h> | |
| #include <stdint.h> | |
| #include <stdio.h> | |
| #include <stdlib.h> | |
| #ifdef __linux__ | |
| #include <unistd.h> | |
| #include <fcntl.h> | |
| #include <sys/ioctl.h> | |
| #include <sys/mman.h> | |
| #include <linux/kvm.h> | |
| #ifdef __aarch64__ | |
| #include <malloc.h> | |
| #include <stddef.h> | |
| #endif | |
| #elif __APPLE__ | |
| #ifdef __x86_64__ | |
| #include <Hypervisor/hv.h> | |
| #include <Hypervisor/hv_vmx.h> | |
| #include <Hypervisor/hv_arch_vmx.h> | |
| #elif __aarch64__ | |
| #include <Hypervisor/Hypervisor.h> | |
| #endif | |
| #endif | |
| #ifdef __x86_64__ | |
| // CR0 bits | |
| #define CR0_PE (1U << 0) | |
| #define CR0_NE (1U << 5) | |
| #define CR0_WP (1U << 16) | |
| #define CR0_PG (1U << 31) | |
| // CR4 bits | |
| #define CR4_PAE (1U << 5) | |
| #define CR4_VMXE (1U << 13) | |
| // EFER bits | |
| #define EFER_LME (1U << 8) | |
| #define EFER_LMA (1U << 10) | |
| #define EFER_NXE (1U << 11) | |
| // Page table entry | |
| #define PT_PRE (1UL << 0) // present / valid | |
| #define PT_RW (1UL << 1) // read-write | |
| #define PT_USER (1UL << 2) // unprivileged | |
| #define PT_ACC (1UL << 5) // accessed flag | |
| #define PT_DIRTY (1UL << 6) // write accessed flag | |
| #define PT_PS (1UL << 7) // 1GiB / 2MiB granule | |
| #define PT_G (1UL << 8) // keep in TLB on context switch | |
| #define PT_NX (1UL << 63) // no execute | |
| #elif __aarch64__ | |
| // Page table entry | |
| #define PT_BLOCK (1UL << 0) // 2MiB granule | |
| #define PT_PAGE (3UL << 0) // 4KiB granule | |
| #define PT_MEM (0UL << 2) // attribute index: normal memory | |
| #define PT_USER (1UL << 6) // unprivileged | |
| #define PT_RO (1UL << 7) // read-only | |
| #define PT_OSH (2UL << 8) // outter shareable | |
| #define PT_ISH (3UL << 8) // inner shareable | |
| #define PT_ACC (1UL << 10) // accessed flag | |
| #define PT_NG (1UL << 11) // remove from TLB on context switch | |
| #define PT_CONT (1UL << 52) // contiguous | |
| #define PT_PNX (1UL << 53) // no execute (privileged) | |
| #define PT_NX (1UL << 54) // no execute | |
| // MSRs | |
| #define ID_AA64MMFR0_EL1 0xC038 | |
| #define SCTLR_EL1 0xC080 | |
| #define TTBR0_EL1 0xC100 | |
| #define TTBR1_EL1 0xC101 | |
| #define TCR_EL1 0xC102 | |
| #define MAIR_EL1 0xC510 | |
| #endif | |
| struct vm { | |
| #ifdef __linux__ | |
| int kvm_fd, fd; | |
| #elif __APPLE__ | |
| uint8_t initialized; | |
| #endif | |
| }; | |
| #ifdef __linux__ | |
| void vm_ctl(struct vm* vm, uint32_t request, uint64_t param) { | |
| assert(ioctl(vm->fd, request, param) >= 0); | |
| } | |
| #endif | |
| void create_vm(struct vm* vm) { | |
| #ifdef __linux__ | |
| vm->kvm_fd = open("/dev/kvm", O_RDWR); | |
| assert(vm->kvm_fd >= 0); | |
| int api_ver = ioctl(vm->kvm_fd, KVM_GET_API_VERSION, 0); | |
| assert(api_ver == KVM_API_VERSION); | |
| vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, 0); | |
| assert(vm->fd >= 0); | |
| vm_ctl(vm, KVM_CHECK_EXTENSION, KVM_CAP_IMMEDIATE_EXIT); | |
| vm_ctl(vm, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY); | |
| #ifdef __aarch64__ | |
| vm_ctl(vm, KVM_CHECK_EXTENSION, KVM_CAP_ONE_REG); | |
| vm_ctl(vm, KVM_CHECK_EXTENSION, KVM_CAP_ARM_PSCI_0_2); | |
| #endif | |
| #elif __APPLE__ | |
| vm->initialized = 1; | |
| #ifdef __x86_64__ | |
| assert(hv_vm_create(HV_VM_DEFAULT) == 0); | |
| #elif __aarch64__ | |
| assert(hv_vm_create(NULL) == 0); | |
| #endif | |
| #endif | |
| } | |
| void map_memory_of_vm(struct vm* vm, uint64_t guest_phys_addr, uint64_t vm_mem_size, void* host_addr) { | |
| #ifdef __linux__ | |
| struct kvm_userspace_memory_region memreg; | |
| memreg.slot = 0; | |
| memreg.flags = 0; | |
| memreg.guest_phys_addr = guest_phys_addr; | |
| memreg.memory_size = vm_mem_size; | |
| memreg.userspace_addr = (uint64_t)host_addr; | |
| vm_ctl(vm, KVM_SET_USER_MEMORY_REGION, (uint64_t)&memreg); | |
| #elif __APPLE__ | |
| assert(vm->initialized); | |
| assert(hv_vm_map(host_addr, guest_phys_addr, vm_mem_size, HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC) == 0); | |
| #endif | |
| } | |
| void unmap_memory_of_vm(struct vm* vm, uint64_t guest_phys_addr, uint64_t vm_mem_size) { | |
| #ifdef __linux__ | |
| (void)vm_mem_size; | |
| struct kvm_userspace_memory_region memreg; | |
| memreg.slot = 0; | |
| memreg.flags = 0; | |
| memreg.guest_phys_addr = guest_phys_addr; | |
| memreg.memory_size = 0; | |
| memreg.userspace_addr = 0; | |
| vm_ctl(vm, KVM_SET_USER_MEMORY_REGION, (uint64_t)&memreg); | |
| #elif __APPLE__ | |
| assert(vm->initialized); | |
| assert(hv_vm_unmap(guest_phys_addr, vm_mem_size) == 0); | |
| #endif | |
| } | |
| void destroy_vm(struct vm* vm) { | |
| #ifdef __linux__ | |
| assert(close(vm->fd) >= 0); | |
| assert(close(vm->kvm_fd) >= 0); | |
| #elif __APPLE__ | |
| assert(vm->initialized); | |
| assert(hv_vm_destroy() == 0); | |
| #endif | |
| } | |
| struct vcpu { | |
| #ifdef __linux__ | |
| int fd; | |
| struct kvm_run* kvm_run; | |
| #elif __APPLE__ | |
| #ifdef __x86_64__ | |
| hv_vcpuid_t id; | |
| #elif __aarch64__ | |
| hv_vcpu_t id; | |
| hv_vcpu_exit_t* exit; | |
| #endif | |
| #endif | |
| }; | |
| #ifdef __linux__ | |
| void vcpu_ctl(struct vcpu* vcpu, uint32_t request, uint64_t param) { | |
| assert(ioctl(vcpu->fd, request, param) >= 0); | |
| } | |
| #ifdef __aarch64__ | |
| #define REG_ID(field) KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | offsetof(struct kvm_regs, field) / sizeof(uint32_t) | |
| #define MSR_ID(field) KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM64_SYSREG | field | |
| uint64_t rreg(struct vcpu* vcpu, uint64_t id) { | |
| uint64_t v; | |
| struct kvm_one_reg reg; | |
| reg.id = id; | |
| reg.addr = (uint64_t)&v; | |
| vcpu_ctl(vcpu, KVM_GET_ONE_REG, (uint64_t)®); | |
| return v; | |
| } | |
| void wreg(struct vcpu* vcpu, uint64_t id, uint64_t v) { | |
| struct kvm_one_reg reg; | |
| reg.id = id; | |
| reg.addr = (uint64_t)&v; | |
| vcpu_ctl(vcpu, KVM_SET_ONE_REG, (uint64_t)®); | |
| } | |
| #endif | |
| #elif __APPLE__ | |
| #ifdef __x86_64__ | |
| uint64_t rvmcs(struct vcpu* vcpu, uint32_t id) { | |
| uint64_t v; | |
| assert(hv_vmx_vcpu_read_vmcs(vcpu->id, id, &v) == 0); | |
| return v; | |
| } | |
| void wvmcs(struct vcpu* vcpu, uint32_t id, uint64_t v) { | |
| assert(hv_vmx_vcpu_write_vmcs(vcpu->id, id, v) == 0); | |
| } | |
| #endif | |
| #endif | |
| void create_vcpu(struct vm* vm, struct vcpu* vcpu, uint64_t page_table) { | |
| #ifdef __linux__ | |
| vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, 0); | |
| assert(vcpu->fd >= 0); | |
| size_t vcpu_mmap_size = (size_t)ioctl(vm->kvm_fd, KVM_GET_VCPU_MMAP_SIZE, 0); | |
| assert(vcpu_mmap_size > 0); | |
| vcpu->kvm_run = mmap(NULL, vcpu_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0); | |
| assert(vcpu->kvm_run != MAP_FAILED); | |
| #ifdef __aarch64__ | |
| struct kvm_vcpu_init vcpu_init; | |
| vm_ctl(vm, KVM_ARM_PREFERRED_TARGET, (uint64_t)&vcpu_init); | |
| vcpu_init.features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; | |
| vcpu_ctl(vcpu, KVM_ARM_VCPU_INIT, (uint64_t)&vcpu_init); | |
| uint64_t mmfr = rreg(vcpu, MSR_ID(ID_AA64MMFR0_EL1)); | |
| #endif | |
| #elif __APPLE__ | |
| assert(vm->initialized); | |
| #ifdef __x86_64__ | |
| assert(hv_vcpu_create(&vcpu->id, HV_VCPU_DEFAULT) == 0); | |
| // Configure control registers | |
| wvmcs(vcpu, VMCS_CTRL_CPU_BASED, CPU_BASED_HLT | CPU_BASED_CR8_LOAD | CPU_BASED_CR8_STORE | CPU_BASED_SECONDARY_CTLS); | |
| wvmcs(vcpu, VMCS_CTRL_CPU_BASED2, 0); | |
| wvmcs(vcpu, VMCS_CTRL_VMEXIT_CONTROLS, 0); | |
| wvmcs(vcpu, VMCS_CTRL_VMENTRY_CONTROLS, VMENTRY_GUEST_IA32E); | |
| // Enable MSR access | |
| assert(hv_vcpu_enable_native_msr(vcpu->id, 0xc0000102, 1) == 0); // MSR_KERNELGSBASE | |
| #elif __aarch64__ | |
| assert(hv_vcpu_create(&vcpu->id, &vcpu->exit, NULL) == 0); | |
| uint64_t mmfr; | |
| assert(hv_vcpu_get_sys_reg(vcpu->id, HV_SYS_REG_ID_AA64MMFR0_EL1, &mmfr) == 0); | |
| #endif | |
| #endif | |
| #ifdef __x86_64__ | |
| // Configure segmentation | |
| uint64_t descriptors[3] = { | |
| 0x0000000000000000UL, | |
| 0x00209B0000000000UL, // code segment | |
| 0x00008B0000000FFFUL, // task state segment | |
| }; | |
| #ifdef __linux__ | |
| struct kvm_sregs sregs; | |
| vcpu_ctl(vcpu, KVM_GET_SREGS, (uint64_t)&sregs); | |
| uint16_t selectors[8] = { 1, 0, 0, 0, 0, 0, 2, 0 }; // CS, DS, ES, FS, GS, SS, TR, LDT | |
| #elif __APPLE__ | |
| uint16_t selectors[8] = { 0, 1, 0, 0, 0, 0, 0, 2 }; // ES, CS, SS, DS, FS, GS, LDT, TR | |
| #endif | |
| for(uint32_t segment_index = 0UL; segment_index < sizeof selectors / sizeof *selectors; ++segment_index) { | |
| uint16_t selector = selectors[segment_index]; | |
| uint64_t segment_descriptor = descriptors[selector]; | |
| uint64_t base = (segment_descriptor >> 16 & 0xFFFFFFL) | ((segment_descriptor >> 56 & 0xFFL) << 24); | |
| uint64_t limit = (segment_descriptor & 0xFFFFL) | ((segment_descriptor >> 48 & 0xFL) << 16); | |
| uint64_t access_rights = (segment_descriptor >> 40 & 0xFFL) | ((segment_descriptor >> 52 & 0xFL) << 12); | |
| #ifdef __linux__ | |
| struct kvm_segment* segment = &((struct kvm_segment*)&sregs)[segment_index]; | |
| segment->base = base; | |
| segment->limit = (uint32_t)limit; | |
| segment->selector = (uint16_t)(selector << 3); | |
| segment->present = (uint8_t)(access_rights >> 7); | |
| segment->type = (uint8_t)access_rights; | |
| segment->dpl = (uint8_t)(access_rights >> 5); | |
| segment->db = (uint8_t)(access_rights >> 14); | |
| segment->s = (uint8_t)(access_rights >> 4); | |
| segment->l = (uint8_t)(access_rights >> 13); | |
| segment->g = (uint8_t)(access_rights >> 15); | |
| #elif __APPLE__ | |
| if(selector == 0UL) | |
| access_rights = 0x10000UL; | |
| wvmcs(vcpu, VMCS_GUEST_ES, selector * 8UL); | |
| wvmcs(vcpu, VMCS_GUEST_ES_BASE + segment_index * 2UL, base); | |
| wvmcs(vcpu, VMCS_GUEST_ES_LIMIT + segment_index * 2UL, limit); | |
| wvmcs(vcpu, VMCS_GUEST_ES_AR + segment_index * 2UL, access_rights); | |
| #endif | |
| } | |
| // Configure system registers | |
| uint64_t cr0 = CR0_PG | CR0_WP | CR0_NE | CR0_PE; | |
| uint64_t cr4 = CR4_PAE; | |
| uint64_t efer = EFER_NXE | EFER_LMA | EFER_LME; | |
| #ifdef __linux__ | |
| sregs.cr0 = cr0; | |
| sregs.cr3 = page_table; | |
| sregs.cr4 = cr4; | |
| sregs.efer = efer; | |
| vcpu_ctl(vcpu, KVM_SET_SREGS, (uint64_t)&sregs); | |
| #elif __APPLE__ | |
| wvmcs(vcpu, VMCS_GUEST_CR0, cr0); | |
| wvmcs(vcpu, VMCS_GUEST_CR3, page_table); | |
| wvmcs(vcpu, VMCS_GUEST_CR4, CR4_VMXE | cr4); | |
| wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer); | |
| #endif | |
| #elif __aarch64__ | |
| assert((mmfr & 0xF) >= 1); // At least 36 bits physical address range | |
| assert(((mmfr >> 28) & 0xF) != 0xF); // 4KB granule supported | |
| uint64_t mair_el1 = | |
| (0xFFUL << 0); // PT_MEM: Normal Memory, Inner Write-back non-transient (RW), Outer Write-back non-transient (RW). | |
| uint64_t tcr_el1 = | |
| (9UL << 32) | // IPS=48 bits (256TB) | |
| (1UL << 23) | // EPD1 disable higher half | |
| (0UL << 14) | // TG0=4k | |
| (3UL << 12) | // SH0=3 inner | |
| (1UL << 10) | // ORGN0=1 write back | |
| (1UL << 8) | // IRGN0=1 write back | |
| (0UL << 7) | // EPD0 enable lower half | |
| (16UL << 0); // T0SZ=16, 4 levels | |
| uint64_t sctlr_el1 = | |
| 0xC00800UL | // set mandatory reserved bits | |
| (1UL << 0); // enable MMU | |
| uint64_t pstate = | |
| (5UL << 0); // PSR_MODE_EL1H | |
| #ifdef __linux__ | |
| wreg(vcpu, MSR_ID(MAIR_EL1), mair_el1); | |
| wreg(vcpu, MSR_ID(TCR_EL1), tcr_el1); | |
| wreg(vcpu, MSR_ID(TTBR0_EL1), page_table); | |
| wreg(vcpu, MSR_ID(TTBR1_EL1), page_table); | |
| wreg(vcpu, MSR_ID(SCTLR_EL1), sctlr_el1); | |
| wreg(vcpu, REG_ID(regs.pstate), pstate); | |
| #elif __APPLE__ | |
| assert(hv_vcpu_set_sys_reg(vcpu->id, HV_SYS_REG_MAIR_EL1, mair_el1) == 0); | |
| assert(hv_vcpu_set_sys_reg(vcpu->id, HV_SYS_REG_TCR_EL1, tcr_el1) == 0); | |
| assert(hv_vcpu_set_sys_reg(vcpu->id, HV_SYS_REG_TTBR0_EL1, page_table) == 0); | |
| assert(hv_vcpu_set_sys_reg(vcpu->id, HV_SYS_REG_TTBR1_EL1, page_table) == 0); | |
| assert(hv_vcpu_set_sys_reg(vcpu->id, HV_SYS_REG_SCTLR_EL1, sctlr_el1) == 0); | |
| assert(hv_vcpu_set_reg(vcpu->id, HV_REG_CPSR, pstate) == 0); | |
| #endif | |
| #endif | |
| } | |
| void set_program_pointers_of_vcpu(struct vcpu* vcpu, uint64_t instruction_pointer, uint64_t stack_pointer) { | |
| #ifdef __x86_64__ | |
| #ifdef __linux__ | |
| struct kvm_regs regs; | |
| memset(®s, 0, sizeof(regs)); | |
| regs.rflags = 1L<<1; | |
| regs.rip = instruction_pointer; | |
| regs.rsp = stack_pointer; | |
| vcpu_ctl(vcpu, KVM_SET_REGS, (uint64_t)®s); | |
| #elif __APPLE__ | |
| wvmcs(vcpu, VMCS_GUEST_RFLAGS, 1L<<1); | |
| wvmcs(vcpu, VMCS_GUEST_RIP, instruction_pointer); | |
| wvmcs(vcpu, VMCS_GUEST_RSP, stack_pointer); | |
| #endif | |
| #elif __aarch64__ | |
| #ifdef __linux__ | |
| wreg(vcpu, REG_ID(regs.pc), instruction_pointer); | |
| wreg(vcpu, REG_ID(sp_el1), stack_pointer); | |
| #elif __APPLE__ | |
| assert(hv_vcpu_set_reg(vcpu->id, HV_REG_PC, instruction_pointer) == 0); | |
| assert(hv_vcpu_set_sys_reg(vcpu->id, HV_SYS_REG_SP_EL1, stack_pointer) == 0); | |
| #endif | |
| #endif | |
| } | |
| uint32_t run_vcpu(struct vcpu* vcpu) { | |
| #ifdef __linux__ | |
| vcpu_ctl(vcpu, KVM_RUN, 0); | |
| return vcpu->kvm_run->exit_reason; | |
| #elif __APPLE__ | |
| assert(hv_vcpu_run(vcpu->id) == 0); | |
| #ifdef __x86_64__ | |
| return (uint32_t)rvmcs(vcpu, VMCS_RO_EXIT_REASON); | |
| #elif __aarch64__ | |
| return vcpu->exit->reason; | |
| #endif | |
| #endif | |
| } | |
| void destroy_vcpu(struct vm* vm, struct vcpu* vcpu) { | |
| #ifdef __linux__ | |
| size_t vcpu_mmap_size = (size_t)ioctl(vm->kvm_fd, KVM_GET_VCPU_MMAP_SIZE, 0); | |
| assert(vcpu_mmap_size > 0); | |
| assert(munmap(vcpu->kvm_run, vcpu_mmap_size) >= 0); | |
| assert(close(vcpu->fd) >= 0); | |
| #elif __APPLE__ | |
| assert(vm->initialized); | |
| assert(hv_vcpu_destroy(vcpu->id) == 0); | |
| #endif | |
| } | |
| int main(int argc, char** argv) { | |
| (void)argc; | |
| (void)argv; | |
| // Define memory layout | |
| uint64_t page_table = 0x0000UL; | |
| uint64_t code_page = 0x4000UL; | |
| uint64_t stack_page = 0x5000UL; | |
| uint64_t vm_mem_size = 0x100000UL; | |
| uint64_t guest_instruction_pointer = 0x0040UL; | |
| uint64_t guest_stack_pointer = 0x2000UL; | |
| // Allocate page aligned memory for the virtual machine | |
| uint8_t* vm_mem = valloc(vm_mem_size); | |
| assert(vm_mem); | |
| // Assemble guest machine code | |
| #ifdef __x86_64__ | |
| uint8_t* instructions = vm_mem + code_page + guest_instruction_pointer; | |
| instructions[0] = 0x90; // nop | |
| instructions[1] = 0x50; // push rax | |
| instructions[2] = 0xF4; // hlt | |
| #elif __aarch64__ | |
| uint32_t* instructions = (uint32_t*)(vm_mem + code_page + guest_instruction_pointer); | |
| instructions[0] = 0xD503201F; // nop | |
| instructions[1] = 0xF81F8FE0; // str x0, [sp, #-8]! // push x0 | |
| instructions[2] = 0x58000040; // ldr x0, #8 | |
| instructions[3] = 0xD4000002; // hvc #0 | |
| instructions[4] = 0x84000008; // constant 0x84000008 // SYSTEM_OFF function ID | |
| #endif | |
| // Configure page table | |
| #ifdef __x86_64__ | |
| *(uint64_t*)(vm_mem + page_table + 0x0000UL) = (page_table + 0x1000UL) | PT_RW | PT_PRE; // Level 4, Entry 0 | |
| *(uint64_t*)(vm_mem + page_table + 0x1000UL) = (page_table + 0x2000UL) | PT_RW | PT_PRE; // Level 3, Entry 0 | |
| *(uint64_t*)(vm_mem + page_table + 0x2000UL) = (page_table + 0x3000UL) | PT_RW | PT_PRE; // Level 2, Entry 0 | |
| *(uint64_t*)(vm_mem + page_table + 0x3000UL) = code_page | PT_PRE; // Level 1, Entry 0 | |
| *(uint64_t*)(vm_mem + page_table + 0x3008UL) = stack_page | PT_NX | PT_RW | PT_PRE; // Level 1, Entry 1 | |
| #elif __aarch64__ | |
| *(uint64_t*)(vm_mem + page_table + 0x0000UL) = (page_table + 0x1000UL) | PT_ISH | PT_ACC | PT_PAGE; // Level 4, Entry 0 | |
| *(uint64_t*)(vm_mem + page_table + 0x1000UL) = (page_table + 0x2000UL) | PT_ISH | PT_ACC | PT_PAGE; // Level 3, Entry 0 | |
| *(uint64_t*)(vm_mem + page_table + 0x2000UL) = (page_table + 0x3000UL) | PT_ISH | PT_ACC | PT_PAGE; // Level 2, Entry 0 | |
| *(uint64_t*)(vm_mem + page_table + 0x3000UL) = code_page | PT_RO | PT_ISH | PT_ACC | PT_PAGE; // Level 1, Entry 0 | |
| *(uint64_t*)(vm_mem + page_table + 0x3008UL) = stack_page | PT_NX | PT_ISH | PT_ACC | PT_PAGE; // Level 1, Entry 1 | |
| #endif | |
| // Configure vm and vcpu | |
| struct vm vm; | |
| create_vm(&vm); | |
| map_memory_of_vm(&vm, 0, vm_mem_size, vm_mem); | |
| struct vcpu vcpu; | |
| create_vcpu(&vm, &vcpu, page_table); | |
| set_program_pointers_of_vcpu(&vcpu, guest_instruction_pointer, guest_stack_pointer); | |
| // Run | |
| int stop = 0; | |
| while(!stop) { | |
| printf("Run\n"); | |
| uint32_t exit_reason = run_vcpu(&vcpu); | |
| switch(exit_reason) { | |
| #ifdef __linux__ | |
| #ifdef __x86_64__ | |
| case KVM_EXIT_HLT: | |
| printf("HLT\n"); | |
| stop = 1; | |
| break; | |
| #elif __aarch64__ | |
| case KVM_EXIT_SYSTEM_EVENT: | |
| switch(vcpu.kvm_run->system_event.type) { | |
| case KVM_SYSTEM_EVENT_SHUTDOWN: | |
| printf("EVENT_SHUTDOWN\n"); | |
| break; | |
| case KVM_SYSTEM_EVENT_RESET: | |
| printf("EVENT_RESET\n"); | |
| break; | |
| case KVM_SYSTEM_EVENT_CRASH: | |
| printf("EVENT_CRASH\n"); | |
| break; | |
| default: | |
| printf("Unhandled KVM_EXIT_SYSTEM_EVENT\n"); | |
| break; | |
| } | |
| stop = 1; | |
| break; | |
| #endif | |
| #elif __APPLE__ | |
| #ifdef __x86_64__ | |
| case VMX_REASON_HLT: | |
| printf("HLT\n"); | |
| stop = 1; | |
| break; | |
| case VMX_REASON_IRQ: | |
| printf("IRQ\n"); | |
| break; | |
| case VMX_REASON_EPT_VIOLATION: | |
| printf("EPT_VIOLATION\n"); | |
| break; | |
| #elif __aarch64__ | |
| case HV_EXIT_REASON_CANCELED: | |
| printf("CANCELED\n"); | |
| stop = 1; | |
| break; | |
| case HV_EXIT_REASON_EXCEPTION: | |
| printf("EXCEPTION\n"); | |
| stop = 1; | |
| break; | |
| case HV_EXIT_REASON_VTIMER_ACTIVATED: | |
| printf("VTIMER_ACTIVATED\n"); | |
| break; | |
| case HV_EXIT_REASON_UNKNOWN: | |
| printf("UNKNOWN\n"); | |
| stop = 1; | |
| break; | |
| #endif | |
| #endif | |
| default: | |
| fprintf(stderr, "Unexpected exit %u %d\n", exit_reason & 0xFFFFFF, (uint8_t)(exit_reason >> 31)); | |
| stop = 1; | |
| break; | |
| } | |
| } | |
| // Check result | |
| guest_stack_pointer -= 8; // Should have decreased by 8 bytes. This proves that it ran in 64 bit mode. | |
| #ifdef __x86_64__ | |
| #ifdef __linux__ | |
| struct kvm_regs regs; | |
| vcpu_ctl(&vcpu, KVM_GET_REGS, (uint64_t)®s); | |
| assert(regs.rsp == guest_stack_pointer); | |
| #elif __APPLE__ | |
| assert(rvmcs(&vcpu, VMCS_GUEST_RSP) == guest_stack_pointer); | |
| #endif | |
| #elif __aarch64__ | |
| #ifdef __linux__ | |
| assert(rreg(&vcpu, REG_ID(sp_el1)) == guest_stack_pointer); | |
| #elif __APPLE__ | |
| uint64_t stack_pointer; | |
| assert(hv_vcpu_get_sys_reg(vcpu.id, HV_SYS_REG_SP_EL1, &stack_pointer) == 0); | |
| assert(stack_pointer == guest_stack_pointer); | |
| #endif | |
| #endif | |
| // Cleanup | |
| destroy_vcpu(&vm, &vcpu); | |
| unmap_memory_of_vm(&vm, 0, vm_mem_size); | |
| destroy_vm(&vm); | |
| free(vm_mem); | |
| printf("Done\n"); | |
| return 0; | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment