aarch64/kernel: Add synchronous exception entry logic

To handle faults such as data aborts, alignment faults, or supervisor
calls, the CPU must transition from the guest's context into a
privileged exception handler. This patch emulates the hardware sequence
for this entry process.

1. The vcpu_state_t struct includes the essential EL1 system registers
   required for exception handling (ELR_EL1, SPSR_EL1, ESR_EL1, FAR_EL1,
   and VBAR_EL1).

2. A new function, take_synchronous_exception(), is introduced. It
   models the requirements for entering an exception targeting EL1:
      - Saves the return address (PC) into ELR_EL1.
      - Saves the current proccess state (PSTATE) into SPSR_EL1.
      - Contructs the Exception Syndrome Register (ESR_EL1) from the
        provided Exception Class and ISS.
      - Saves the faulting address to FAR_EL1 for data aborts.
      - Updates the live PSTATE to a safe state for the handler.

This implementation is intentially partial. The final step of updating the
PC to jump to a handler in the guest's vector table (using VBAR_EL1) is
stubbed out. The vector table will contain assembly instructions so a
functional instruction decoder is required to fully complete the
exception handler.

Signed-off-by: Ronald Caesar <github43132@proton.me>
This commit is contained in:
Ronald Caesar 2025-08-16 13:11:40 -04:00
parent 556ace64e8
commit 65f589e853
7 changed files with 326 additions and 364 deletions

View file

@ -1,71 +0,0 @@
// Copyright 2025 Pound Emulator Project. All rights reserved.
#include "jit.h"
#include <rem.h>
#ifdef WIN32
#include <Windows.h>
#else
#include <sys/mman.h>
#endif
#include <vector>
using JitFunc = void (*)();
void JIT::translate_and_run(CPU& cpu)
{
// TODO: Create REM Context
create_rem_context(nullptr, nullptr, nullptr, nullptr, nullptr);
#ifdef WIN32
u8* code = (u8*)VirtualAlloc(NULL, 64, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
#else
u8* code = (u8*)mmap(nullptr, 64, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON, -1, 0);
#endif
size_t offset = 0;
// Decode mock instructions from cpu.memory
if (cpu.memory[0] == 0x05)
{ // MOVZ placeholder
code[offset++] = 0x48; // mov rax, imm64
code[offset++] = 0xB8;
u64 imm = 5;
std::memcpy(&code[offset], &imm, sizeof(imm));
offset += 8;
}
if (cpu.memory[4] == 0x03)
{ // ADD placeholder
code[offset++] = 0x48; // add rax, imm32
code[offset++] = 0x05;
u32 addval = 3;
std::memcpy(&code[offset], &addval, sizeof(addval));
offset += 4;
}
code[offset++] = 0xC3; // ret
JitFunc fn = reinterpret_cast<JitFunc>(code);
u64 result;
#if defined(__x86_64__)
asm volatile(
"call *%1\n"
"mov %%rax, %0\n"
: "=r"(result)
: "r"(fn)
: "%rax");
#elif defined(__aarch64__)
asm volatile("blr %1\n"
"mov %0, x0\n"
: "=r"(result)
: "r"(fn)
: "x0");
#endif
cpu.regs[0] = result;
}

View file

@ -1,11 +0,0 @@
// Copyright 2025 Pound Emulator Project. All rights reserved.
#pragma once
#include "aarch64/isa.h"
class JIT
{
public:
void translate_and_run(CPU& cpu);
};

View file

@ -1,113 +1,57 @@
#include "isa.h"
#include "Base/Assert.h"
#include "memory.h"
#include "memory/arena.h"
// TODO(GloriousTacoo:aarch64) Implement big to little endian conversion for guest_mem read and write functions.
namespace pound::aarch64
{
static inline uint8_t* gpa_to_hva(guest_memory_t* memory, uint64_t gpa)
void take_synchronous_exception(vcpu_state_t* vcpu, uint8_t exception_class, uint32_t iss, uint64_t faulting_address)
{
ASSERT(nullptr != memory);
ASSERT(nullptr != memory->base);
ASSERT(gpa < memory->size);
uint8_t* hva = memory->base + gpa;
return hva;
}
ASSERT(nullptr != vcpu);
/* An EC holds 6 bits.*/
ASSERT(0 == (exception_class & 11000000));
/* An ISS holds 25 bits */
ASSERT(0 == (iss & 0xFE000000));
/*
* ============================================================================
* Guest Memory Read Functions
* ============================================================================
*/
vcpu->elr_el1 = vcpu->pc;
vcpu->spsr_el1 = vcpu->pstate;
vcpu->esr_el1 = 0;
static inline uint8_t guest_mem_readb(guest_memory_t* memory, uint64_t gpa)
{
ASSERT(nullptr != memory);
ASSERT(nullptr != memory->base);
ASSERT(gpa <= memory->size);
uint8_t* hva = gpa_to_hva(memory, gpa);
return *hva;
}
/* Bits [31:26] are the Exception Class (EC). */
/* Bits [25] is the Instruction Length (IL), 1 for a 32-bit instruction. */
/* Bits [24:0] are the Instruction Specific Syndrome (ISS) */
const uint64_t esr_il_bit = (1ULL << 25);
vcpu->esr_el1 = ((uint64_t)exception_class << 26) | esr_il_bit | iss;
static inline uint16_t guest_mem_readw(guest_memory_t* memory, uint64_t gpa)
{
ASSERT(nullptr != memory);
ASSERT(nullptr != memory->base);
ASSERT((gpa + sizeof(uint16_t)) <= memory->size);
// Check if gpa is aligned to 2 bytes.
ASSERT((gpa & 1) == 0);
uint16_t* hva = (uint16_t*)gpa_to_hva(memory, gpa);
return *hva;
}
if ((exception_class == EC_DATA_ABORT) || (exception_class == EC_DATA_ABORT_LOWER_EL))
{
vcpu->far_el1 = faulting_address;
}
static inline uint32_t guest_mem_readl(guest_memory_t* memory, uint64_t gpa)
{
ASSERT(nullptr != memory);
ASSERT(nullptr != memory->base);
ASSERT((gpa + sizeof(uint32_t)) <= memory->size);
// Check if gpa is aligned to 4 bytes.
ASSERT((gpa & 3) == 0);
uint32_t* hva = (uint32_t*)gpa_to_hva(memory, gpa);
return *hva;
}
/* The CPU state must be changed to a known safe state for handling */
vcpu->pstate &= ~0xF0000000;
static inline uint64_t guest_mem_readq(guest_memory_t* memory, uint64_t gpa)
{
ASSERT(nullptr != memory);
ASSERT(nullptr != memory->base);
ASSERT((gpa + sizeof(uint64_t)) <= memory->size);
// Check if gpa is aligned to 8 bytes.
ASSERT((gpa & 7) == 0);
uint64_t* hva = (uint64_t*)gpa_to_hva(memory, gpa);
return *hva;
}
/* Mask asynchronous exceptions (IRQ, FIQ, SError). We dont want the
* Exception handler to be interruoted by a less important event. */
const uint32_t PSTATE_IRQ_BIT = (1 << 7);
const uint32_t PSTATE_FIQ_BIT = (1 << 6);
const uint32_t PSTATE_SERROR_BIT = (1 << 8);
vcpu->pstate |= (PSTATE_IRQ_BIT | PSTATE_FIQ_BIT | PSTATE_SERROR_BIT);
/*
* ============================================================================
* Guest Memory Write Functions
* ============================================================================
*/
/* Set the target exception level to EL1. The mode field M[3:0] is set
* to 0b0101 for EL1h (using SP_EL1). (page 913 in manual) */
const uint32_t PSTATE_EL_MASK = 0b1111;
vcpu->pstate &= ~PSTATE_EL_MASK;
const uint32_t PSTATE_EL1H = 0b0101;
vcpu->pstate |= PSTATE_EL1H;
static inline void guest_mem_writeb(guest_memory_t* memory, uint64_t gpa, uint8_t val)
{
ASSERT(nullptr != memory);
ASSERT(nullptr != memory->base);
ASSERT(gpa <= memory->size);
uint8_t* hva = gpa_to_hva(memory, gpa);
*hva = val;
}
static inline void guest_mem_writew(guest_memory_t* memory, uint64_t gpa, uint16_t val)
{
ASSERT(nullptr != memory);
ASSERT(nullptr != memory->base);
ASSERT((gpa + sizeof(uint16_t)) <= memory->size);
// Check if gpa is aligned to 2 bytes.
ASSERT((gpa & 1) == 0);
uint16_t* hva = (uint16_t*)gpa_to_hva(memory, gpa);
*hva = val;
}
static inline void guest_mem_writel(guest_memory_t* memory, uint64_t gpa, uint32_t val)
{
ASSERT(nullptr != memory->base);
ASSERT((gpa + sizeof(uint32_t)) <= memory->size);
// Check if gpa is aligned to 4 bytes.
ASSERT((gpa & 3) == 0);
uint32_t* hva = (uint32_t*)gpa_to_hva(memory, gpa);
*hva = val;
}
static inline void guest_mem_writeq(guest_memory_t* memory, uint64_t gpa, uint64_t val)
{
ASSERT(nullptr != memory);
ASSERT(nullptr != memory->base);
ASSERT((gpa + sizeof(uint64_t)) <= memory->size);
// Check if gpa is aligned to 8 bytes.
ASSERT((gpa & 7) == 0);
uint64_t* hva = (uint64_t*)gpa_to_hva(memory, gpa);
*hva = val;
/* TODO(GloriousTacoo:arm): DO NOT IMPLEMENT UNTIL THE INSTRUCTION
* DECODER IS FINISHED.
*
* Create an Exception Vector Table, determine
* the address of the exception handler, then update the PC.
*
* vcpu->pc = vcpu->vbar_el1 + offset; */
}
/** THIS FUNCTION WAS MADE WITH AI AND IS CALLED WHEN RUNNING THE CPU TEST FROM THE GUI!
@ -124,7 +68,7 @@ static inline void guest_mem_writeq(guest_memory_t* memory, uint64_t gpa, uint64
* @param memory A pointer to an initialized guest_memory_t struct.
* @return true if all tests pass, false otherwise.
*/
bool test_guest_ram_access(guest_memory_t* memory)
bool test_guest_ram_access(pound::aarch64::memory::guest_memory_t* memory)
{
LOG_INFO(Memory, "--- [ Starting Guest RAM Access Test ] ---");
if (memory == nullptr || memory->base == nullptr || memory->size < 4096)
@ -211,10 +155,10 @@ bool test_guest_ram_access(guest_memory_t* memory)
void cpuTest()
{
vcpu_state_t vcpu_states[CPU_CORES] = {};
memory::arena_t guest_memory_arena = memory::arena_init(GUEST_RAM_SIZE);
pound::memory::arena_t guest_memory_arena = pound::memory::arena_init(GUEST_RAM_SIZE);
ASSERT(nullptr != guest_memory_arena.data);
guest_memory_t guest_ram = {};
pound::aarch64::memory::guest_memory_t guest_ram = {};
guest_ram.base = static_cast<uint8_t*>(guest_memory_arena.data);
guest_ram.size = guest_memory_arena.capacity;

View file

@ -19,18 +19,34 @@ namespace pound::aarch64
#define GUEST_RAM_SIZE 10240 // 10KiB
#define CPU_CORES 8
/* Data Abort exception taken without a change in Exception level. */
#define EC_DATA_ABORT 0b100101
/* Data Abort exception from a lower Exception level. */
#define EC_DATA_ABORT_LOWER_EL 0b100100
/*
* vcpu_state_t - Holds the architectural and selected system-register state for an emulated vCPU.
* @v: 128-bit SIMD/FP vector registers V0V31.
* @r: General-purpose registers X0X31 (X31 as SP/ZR as appropriate).
* @pc: Program Counter.
* @pstate: Process State Register (NZCV, DAIF, EL, etc.).
*
* System registers (subset mirrored for fast-path emulation at EL0):
* - ctr_el0, dczid_el0: Cache/type identification.
* - tpidrro_el0, tpidr_el0: Thread pointers (host-mapped TLS pointers).
* - cntfrq_el0, cntpct_el0, cntvct_el0, cntv_ctl_el0, cntv_cval_el0: Generic timers/counters.
* - pmccntr_el0, pmcr_el0: PMU cycle counter and control.
* @v: 128-bit SIMD/FP vector registers V0V31.
* @r: General-purpose registers X0X31 (X31 as SP/ZR as appropriate).
* @pc: Program Counter.
* @cntfreq_el0: Counter Frequency.
* @cntpct_el0: Physical Counter.
* @cntvct_el0: Virtual Counter - CRITICAL for timing.
* @cntv_cval_el0: Virtual Timer Compare Value.
* @pmccntr_el0: Cycle Counter.
* @tpidr_el0: Thread Pointer ID Register.
* @tpidrro_el0: Thread Pointer ID, read-only.
* @elr_el1: Exception Link Register.
* @esr_el1: Exception Syndrome Register.
* @far_el1: Fault Address Register.
* @vbar_el1: Vector Base Address Register.
* @spsr_el1: Saved Program Status Register.
* @ctr_el0: Cache-Type.
* @cntv_ctl_el0: Virtual Timer Control.
* @dczid_el0: Data Cache Zero ID.
* @pmcr_el0: Performance Monitor Counter.
* @pstate: Process State Register (NZCV, DAIF, EL, etc.).
*
* This structure is aligned to the L1 cache line size to prevent false sharing
* when multiple host threads are emulating vCPUs on different physical cores.
@ -40,182 +56,68 @@ typedef struct alignas(CACHE_LINE_SIZE)
unsigned __int128 v[FP_REGISTERS];
uint64_t r[GP_REGISTERS];
uint64_t pc;
uint64_t cntfreq_el0;
uint64_t cntpct_el0;
uint64_t cntvct_el0;
uint64_t cntv_cval_el0;
uint64_t pmccntr_el0;
uint64_t tpidr_el0;
uint64_t tpidrro_el0;
/*
* Stores the Program Counter of the instruction that was interrupted.
* For a synchronous fault, it's the address of the faulting instruction
* itself.
*/
uint64_t elr_el1;
/*
* Tells the guest OS why the exception happened. It contains a high
* level Exception Class (EC) (eg, Data Abort) and a low level
* Instruction Specific Syndrome (ISS) with fine-grained details.
* (eg, it was an allignment fault cause by a write operation.)
*/
uint64_t esr_el1;
/* The memory address that caused a Data Abort exception. */
uint64_t far_el1;
/*
* A snapshot of the current PSTATE register before the exception.
* This is for restoring the program's state when returning from an
* exception.
*/
uint64_t spsr_el1;
/*
* The base address in guest memory where the Exception Vector Table
* can be found.
*/
uint64_t vbar_el1;
uint32_t ctr_el0;
uint32_t cntv_ctl_el0;
uint32_t dczid_el0;
uint32_t pmcr_el0;
uint32_t pstate;
// ========================= System Registers ==================================
// Basics
uint32_t ctr_elo; // cache-type register
uint32_t dczid_elo; // data cache zero-ID
const uint64_t* tpidrro_e10; // thread pointer ID register, read-only
uint64_t* tpidr_e10; // thread pointer ID register
// Counters
uint64_t cntfreq_elo; // counter frequency
uint64_t cntvct_el0; // Virtual counter - CRITICAL for timing
uint64_t cntpct_el0; // Physical counter
uint32_t cntv_ctl_el0; // Virtual timer control
uint64_t cntv_cval_el0; // Virtual timer compare value
// Performance monitoring (if games use them):
uint64_t pmccntr_el0; // Cycle counter
uint32_t pmcr_el0; // Performance monitor control
// =============================================================================
} vcpu_state_t;
/*
* guest_memory_t - Describes a contiguous block of guest physical RAM.
* @base: Pointer to the start of the host-allocated memory block.
* @size: The size of the memory block in bytes.
*/
typedef struct
{
uint8_t* base;
uint64_t size;
} guest_memory_t;
/*
* gpa_to_hva() - Translate a Guest Physical Address to a Host Virtual Address.
* @memory: The guest memory region to translate within.
* @gpa: The Guest Physical Address (offset) to translate.
* take_synchronous_exception() - Emulates the hardware process of taking a synchronous exception to EL1.
*
* This function provides a fast, direct translation for a flat guest memory
* model. It relies on the critical pre-condition that the guest's physical
* RAM is backed by a single, contiguous block of virtual memory in the host's
* userspace (typically allocated with mmap()).
* @vcpu: A pointer to the vCPU state to be modified.
* @exception_class: The high-level Exception Class (EC) code for ESR_EL1.
* @iss: The low-level Instruction Specific Syndrome (ISS) code for ESR_EL1.
* @faulting_address: The faulting address, to be written to FAR_EL1. Only valid for Data/Instruction Aborts. Pass 0 for other exception types.
*
* In this model, memory->base is the Host Virtual Address (HVA) of the start of
* the backing host memory. The provided Guest Physical Address (gpa) is not
* treated as a pointer, but as a simple byte offset from the start of the guest's
* physical address space (PAS).
* This function modifies the vCPU state according to the rules for taking a
* synchronous exception from a lower or same exception level that is targeting EL1.
* It saves the necessary return state, populates the syndrome registers,
* updates the processor state for entry into EL1, and calculates the new
* program counter by branching to the appropriate offset in the EL1 vector table.
*
* The translation is therefore a single pointer-offset calculation. This establishes
* a direct 1:1 mapping between the guest's PAS and the host's virtual memory block.
*
* The function asserts that GPA is within bounds. The caller is responsible for
* ensuring the validity of the GPA prior to calling.
*
* Return: A valid host virtual address pointer corresponding to the GPA.
*/
static inline uint8_t* gpa_to_hva(guest_memory_t* memory, uint64_t gpa);
/*
* ============================================================================
* Guest Memory Read Functions
* ============================================================================
*/
/**
* guest_mem_readb() - Read one byte from guest memory.
* @memory: The guest memory region.
* @gpa: The Guest Physical Address to read from.
* Returns the 8-bit value read from memory.
*/
static inline uint8_t guest_mem_readb(guest_memory_t* memory, uint64_t gpa);
/**
* guest_mem_readw() - Read a 16-bit word from guest memory.
* @memory: The guest memory region.
* @gpa: The Guest Physical Address to read from (must be 2-byte aligned).
* Returns the 16-bit value, corrected for host endianness.
*/
static inline uint16_t guest_mem_readw(guest_memory_t* memory, uint64_t gpa);
/**
* guest_mem_readl() - Read a 32-bit long-word from guest memory.
* @memory: The guest memory region.
* @gpa: The Guest Physical Address to read from (must be 4-byte aligned).
* Returns the 32-bit value, corrected for host endianness.
*/
static inline uint32_t guest_mem_readl(guest_memory_t* memory, uint64_t gpa);
/**
* guest_mem_readq() - Read a 64-bit quad-word from guest memory.
* @memory: The guest memory region.
* @gpa: The Guest Physical Address to read from (must be 8-byte aligned).
* Returns the 64-bit value, corrected for host endianness.
*/
static inline uint64_t guest_mem_readq(guest_memory_t* memory, uint64_t gpa);
/*
* ============================================================================
* Guest Memory Write Functions
* ============================================================================
*/
/**
* guest_mem_writeb() - Write one byte to guest memory.
* @memory: The guest memory region.
* @gpa: The Guest Physical Address to write to.
* @val: The 8-bit value to write.
*/
static inline void guest_mem_writeb(guest_memory_t* memory, uint64_t gpa, uint8_t val);
/**
* guest_mem_writew() - Write a 16-bit word to guest memory.
* @memory: The guest memory region.
* @gpa: The Guest Physical Address to write to (must be 2-byte aligned).
* @val: The 16-bit value to write (will be converted to guest endianness).
*/
static inline void guest_mem_writew(guest_memory_t* memory, uint64_t gpa, uint16_t val);
/**
* guest_mem_writel() - Write a 32-bit long-word to guest memory.
* @memory: The guest memory region.
* @gpa: The Guest Physical Address to write to (must be 4-byte aligned).
* @val: The 32-bit value to write.
*/
static inline void guest_mem_writel(guest_memory_t* memory, uint64_t gpa, uint32_t val);
/**
* guest_mem_writeq() - Write a 64-bit quad-word to guest memory.
* @memory: The guest memory region.
* @gpa: The Guest Physical Address to write to (must be 8-byte aligned).
* @val: The 64-bit value to write.
*/
static inline void guest_mem_writeq(guest_memory_t* memory, uint64_t gpa, uint64_t val);
void take_synchronous_exception(vcpu_state_t* vcpu, uint8_t exception_class, uint32_t iss, uint64_t faulting_address);
void cpuTest();
} // namespace pound::aarch64
//=========================================================
// OUTDATED CODE
//=========================================================
struct CPU
{
u64 regs[31] = {0}; // X0X30
u64 pc = 0;
static constexpr size_t MEM_SIZE = 64 * 1024;
u8 memory[MEM_SIZE];
CPU() { std::memset(memory, 0, MEM_SIZE); }
u64& x(int i) { return regs[i]; }
u8 read_byte(u64 addr)
{
if (addr >= MEM_SIZE)
{
LOG_INFO(ARM, "{} out of bounds", addr);
}
return memory[addr];
}
void write_byte(u64 addr, u8 byte)
{
if (addr >= MEM_SIZE)
{
LOG_INFO(ARM, "{} out of bounds", addr);
}
memory[addr] = byte;
}
void print_debug_information()
{
LOG_INFO(ARM, "PC = {}", pc);
for (int reg = 0; reg < 31; reg++)
{
uint64_t regis = x(reg);
LOG_INFO(ARM, "X{} = {}", reg, regis); // X0 = 0..
}
}
};

7
core/aarch64/memory.cpp Normal file
View file

@ -0,0 +1,7 @@
#include "memory.h"
#include "Base/Assert.h"
namespace pound::aarch64::memory
{
} // namespace pound::aarch64::memory

195
core/aarch64/memory.h Normal file
View file

@ -0,0 +1,195 @@
#pragma once
#include "Base/Assert.h"
namespace pound::aarch64::memory
{
/*
* guest_memory_t - Describes a contiguous block of guest physical RAM.
* @base: Pointer to the start of the host-allocated memory block.
* @size: The size of the memory block in bytes.
*/
typedef struct
{
uint8_t* base;
uint64_t size;
} guest_memory_t;
/*
* gpa_to_hva() - Translate a Guest Physical Address to a Host Virtual Address.
* @memory: The guest memory region to translate within.
* @gpa: The Guest Physical Address (offset) to translate.
*
* This function provides a fast, direct translation for a flat guest memory
* model. It relies on the critical pre-condition that the guest's physical
* RAM is backed by a single, contiguous block of virtual memory in the host's
* userspace (typically allocated with mmap()).
*
* In this model, memory->base is the Host Virtual Address (HVA) of the start of
* the backing host memory. The provided Guest Physical Address (gpa) is not
* treated as a pointer, but as a simple byte offset from the start of the guest's
* physical address space (PAS).
*
* The translation is therefore a single pointer-offset calculation. This establishes
* a direct 1:1 mapping between the guest's PAS and the host's virtual memory block.
*
* The function asserts that GPA is within bounds. The caller is responsible for
* ensuring the validity of the GPA prior to calling.
*
* Return: A valid host virtual address pointer corresponding to the GPA.
*/
static inline uint8_t* gpa_to_hva(guest_memory_t* memory, uint64_t gpa)
{
ASSERT(nullptr != memory);
ASSERT(nullptr != memory->base);
ASSERT(gpa < memory->size);
uint8_t* hva = memory->base + gpa;
return hva;
}
// TODO(GloriousTacoo:aarch64) Implement big to little endian conversion for guest_mem read and write functions.
/*
* ============================================================================
* Guest Memory Read Functions
* ============================================================================
*/
/*
* guest_mem_readb() - Read one byte from guest memory.
* @memory: The guest memory region.
* @gpa: The Guest Physical Address to read from.
* Returns the 8-bit value read from memory.
*/
static inline uint8_t guest_mem_readb(guest_memory_t* memory, uint64_t gpa)
{
ASSERT(nullptr != memory);
ASSERT(nullptr != memory->base);
ASSERT(gpa <= memory->size);
uint8_t* hva = gpa_to_hva(memory, gpa);
return *hva;
}
/*
* guest_mem_readw() - Read a 16-bit word from guest memory.
* @memory: The guest memory region.
* @gpa: The Guest Physical Address to read from (must be 2-byte aligned).
* Returns the 16-bit value, corrected for host endianness.
*/
static inline uint16_t guest_mem_readw(guest_memory_t* memory, uint64_t gpa)
{
ASSERT(nullptr != memory);
ASSERT(nullptr != memory->base);
ASSERT((gpa + sizeof(uint16_t)) <= memory->size);
// Check if gpa is aligned to 2 bytes.
ASSERT((gpa & 1) == 0);
uint16_t* hva = (uint16_t*)gpa_to_hva(memory, gpa);
return *hva;
}
/*
* guest_mem_readl() - Read a 32-bit long-word from guest memory.
* @memory: The guest memory region.
* @gpa: The Guest Physical Address to read from (must be 4-byte aligned).
* Returns the 32-bit value, corrected for host endianness.
*/
static inline uint32_t guest_mem_readl(guest_memory_t* memory, uint64_t gpa)
{
ASSERT(nullptr != memory);
ASSERT(nullptr != memory->base);
ASSERT((gpa + sizeof(uint32_t)) <= memory->size);
// Check if gpa is aligned to 4 bytes.
ASSERT((gpa & 3) == 0);
uint32_t* hva = (uint32_t*)gpa_to_hva(memory, gpa);
return *hva;
}
/*
* guest_mem_readq() - Read a 64-bit quad-word from guest memory.
* @memory: The guest memory region.
* @gpa: The Guest Physical Address to read from (must be 8-byte aligned).
* Returns the 64-bit value, corrected for host endianness.
*/
static inline uint64_t guest_mem_readq(guest_memory_t* memory, uint64_t gpa)
{
ASSERT(nullptr != memory);
ASSERT(nullptr != memory->base);
ASSERT((gpa + sizeof(uint64_t)) <= memory->size);
// Check if gpa is aligned to 8 bytes.
ASSERT((gpa & 7) == 0);
uint64_t* hva = (uint64_t*)gpa_to_hva(memory, gpa);
return *hva;
}
/*
* ============================================================================
* Guest Memory Write Functions
* ============================================================================
*/
/*
* guest_mem_writeb() - Write one byte to guest memory.
* @memory: The guest memory region.
* @gpa: The Guest Physical Address to write to.
* @val: The 8-bit value to write.
*/
static inline void guest_mem_writeb(guest_memory_t* memory, uint64_t gpa, uint8_t val)
{
ASSERT(nullptr != memory);
ASSERT(nullptr != memory->base);
ASSERT(gpa <= memory->size);
uint8_t* hva = gpa_to_hva(memory, gpa);
*hva = val;
}
/*
* guest_mem_writew() - Write a 16-bit word to guest memory.
* @memory: The guest memory region.
* @gpa: The Guest Physical Address to write to (must be 2-byte aligned).
* @val: The 16-bit value to write (will be converted to guest endianness).
*/
static inline void guest_mem_writew(guest_memory_t* memory, uint64_t gpa, uint16_t val)
{
ASSERT(nullptr != memory);
ASSERT(nullptr != memory->base);
ASSERT((gpa + sizeof(uint16_t)) <= memory->size);
// Check if gpa is aligned to 2 bytes.
ASSERT((gpa & 1) == 0);
uint16_t* hva = (uint16_t*)gpa_to_hva(memory, gpa);
*hva = val;
}
/*
* guest_mem_writel() - Write a 32-bit long-word to guest memory.
* @memory: The guest memory region.
* @gpa: The Guest Physical Address to write to (must be 4-byte aligned).
* @val: The 32-bit value to write.
*/
static inline void guest_mem_writel(guest_memory_t* memory, uint64_t gpa, uint32_t val)
{
ASSERT(nullptr != memory->base);
ASSERT((gpa + sizeof(uint32_t)) <= memory->size);
// Check if gpa is aligned to 4 bytes.
ASSERT((gpa & 3) == 0);
uint32_t* hva = (uint32_t*)gpa_to_hva(memory, gpa);
*hva = val;
}
/*
* guest_mem_writeq() - Write a 64-bit quad-word to guest memory.
* @memory: The guest memory region.
* @gpa: The Guest Physical Address to write to (must be 8-byte aligned).
* @val: The 64-bit value to write.
*/
static inline void guest_mem_writeq(guest_memory_t* memory, uint64_t gpa, uint64_t val)
{
ASSERT(nullptr != memory);
ASSERT(nullptr != memory->base);
ASSERT((gpa + sizeof(uint64_t)) <= memory->size);
// Check if gpa is aligned to 8 bytes.
ASSERT((gpa & 7) == 0);
uint64_t* hva = (uint64_t*)gpa_to_hva(memory, gpa);
*hva = val;
}
} // namespace pound::aarch64::memory

View file

@ -5,8 +5,8 @@
#include <thread>
#include "Base/Config.h"
#include "Base/Logging/Log.h"
#include "Base/Logging/Backend.h"
#include "JIT/jit.h"
#include "gui/gui.h"
#include "memory/arena.h"
@ -18,10 +18,6 @@
int main()
{
// This is meant to replace malloc() and its related functions.
// TODO(GloriousTaco:memory): Implement std::allocator for this custom allocator which allows it to manage the memory of C++ standard types like std::vector.
memory::arena_t arena = memory::arena_init(1024);
Base::Log::Initialize();
Base::Log::Start();