mirror of
https://github.com/pound-emu/pound.git
synced 2025-12-11 07:36:57 +00:00
Remove PVM folder
The folder will be rewritten once the JIT compiler is completed. Signed-off-by: Ronald Caesar <github43132@proton.me>
This commit is contained in:
parent
d6c29e412a
commit
21c0f0bdef
11 changed files with 7 additions and 1624 deletions
|
|
@ -137,13 +137,13 @@ foreach(TARGET ${POUND_PROJECT_TARGETS})
|
|||
endif()
|
||||
|
||||
# Set Compile time log level for all targets.
|
||||
# 1: Trace
|
||||
# 2: Debug
|
||||
# 3: Info
|
||||
# 4: Warning
|
||||
# 5: Error
|
||||
# 6: Fatal
|
||||
target_compile_definitions(${TARGET} PRIVATE COMPILE_TIME_LOG_LEVEL=1)
|
||||
# 0: Trace
|
||||
# 1: Debug
|
||||
# 2: Info
|
||||
# 3: Warning
|
||||
# 4: Error
|
||||
# 5: Fatal
|
||||
target_compile_definitions(${TARGET} PRIVATE COMPILE_TIME_LOG_LEVEL=0)
|
||||
endforeach()
|
||||
|
||||
# Optimizations
|
||||
|
|
|
|||
|
|
@ -1,15 +0,0 @@
|
|||
add_library(pvm STATIC)
|
||||
|
||||
target_sources(pvm PRIVATE
|
||||
mmu.cpp
|
||||
mmio.cpp
|
||||
pvm.cpp
|
||||
guest.cpp
|
||||
)
|
||||
|
||||
target_link_libraries(pvm PRIVATE common host)
|
||||
|
||||
target_include_directories(pvm PUBLIC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/..
|
||||
)
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
#ifndef POUND_KVM_ENDIAN_H
|
||||
#define POUND_KVM_ENDIAN_H
|
||||
|
||||
#define GUEST_IS_LITTLE_ENDIAN 1
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
#include <stdlib.h>
|
||||
#define bswap_16(x) _byteswap_ushort(x)
|
||||
#define bswap_32(x) _byteswap_ulong(x)
|
||||
#define bswap_64(x) _byteswap_uint64(x)
|
||||
|
||||
#elif defined(__APPLE__)
|
||||
|
||||
#include <libkern/OSByteOrder.h>
|
||||
#define bswap_16(x) OSSwapInt16(x)
|
||||
#define bswap_32(x) OSSwapInt32(x)
|
||||
#define bswap_64(x) OSSwapInt64(x)
|
||||
|
||||
#else
|
||||
|
||||
#include <byteswap.h>
|
||||
|
||||
#endif
|
||||
|
||||
#endif // POUND_KVM_ENDIAN_H
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
#include "guest.h"
|
||||
#include "common/passert.h"
|
||||
|
||||
namespace pound::pvm::memory
|
||||
{
|
||||
guest_memory_t* guest_memory_create(pound::host::memory::arena_t* arena)
|
||||
{
|
||||
PVM_ASSERT(nullptr != arena);
|
||||
PVM_ASSERT(nullptr != arena->data);
|
||||
|
||||
guest_memory_t* memory = (guest_memory_t*)pound::host::memory::arena_allocate(arena, sizeof(guest_memory_t));
|
||||
size_t ram_size = arena->capacity - arena->size;
|
||||
uint8_t* ram_block = (uint8_t*)pound::host::memory::arena_allocate(arena, ram_size);
|
||||
|
||||
/*
|
||||
* This requires casting away the 'const' qualifier, which is generally unsafe.
|
||||
* However, it is safe in this specific context because:
|
||||
*
|
||||
* a) We are operating on a newly allocated heap object (`memory`).
|
||||
* b) No other part of the system has a reference to this object yet.
|
||||
* c) This is a one-time initialization; the const contract will be
|
||||
* honored for the rest of the object's lifetime after this function
|
||||
* returns.
|
||||
*
|
||||
* This allows us to create an immutable descriptor object on the
|
||||
* heap.
|
||||
*/
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wcast-qual"
|
||||
*(uint8_t**)&memory->base = ram_block;
|
||||
*(uint64_t*)&memory->size = ram_size;
|
||||
#pragma GCC diagnostic pop
|
||||
return memory;
|
||||
}
|
||||
} // namespace pound::pvm::memory
|
||||
459
src/pvm/guest.h
459
src/pvm/guest.h
|
|
@ -1,459 +0,0 @@
|
|||
#ifndef POUND_pvm_GUEST_H
|
||||
#define POUND_pvm_GUEST_H
|
||||
|
||||
#include <cassert>
|
||||
#include <stdint.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "endian.h"
|
||||
|
||||
#include "host/memory/arena.h"
|
||||
|
||||
namespace pound::pvm::memory
|
||||
{
|
||||
|
||||
/*
|
||||
* guest_memory_t - A non-owning descriptor for a block of guest physical RAM.
|
||||
* @base: Pointer to the start of the host-allocated memory block.
|
||||
* @size: The size of the memory block in bytes.
|
||||
*
|
||||
*
|
||||
* This structure describes a contiguous block of guest physical memory. It acts
|
||||
* as a handle or a "view" into a region of host memory, but it does not manage
|
||||
* the lifetime of that memory itself.
|
||||
*
|
||||
* --- Ownership ---
|
||||
* The guest_memory_t struct does NOT own the memory block pointed to by @base.
|
||||
* Ownership of the underlying memory buffer is retained by the host memory
|
||||
* arena from which it was allocated. The party responsible for creating the
|
||||
* arena is also responsible for ultimately freeing it. This struct is merely a
|
||||
* descriptor and can be safely passed by value or pointer without transferring
|
||||
* ownership.
|
||||
*
|
||||
* --- Lifetime ---
|
||||
* An instance of this struct should be considered valid only for as long as the
|
||||
* backing memory arena is valid. Typically, this means it is created once
|
||||
- * during virtual machine initialization and lives for the entire duration of
|
||||
* the emulation session. Its lifetime is tied to the lifetime of the parent
|
||||
* pvm instance.
|
||||
*
|
||||
* --- Invariants ---
|
||||
* Both fields of this struct are declared `const`. This establishes the
|
||||
* invariant that once a guest_memory_t descriptor is created and initialized
|
||||
* by guest_memory_create(), its size and base address are immutable for the
|
||||
* lifetime of the object. This prevents accidental resizing or repointing of
|
||||
* the guest's physical RAM.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
uint8_t* const base;
|
||||
const uint64_t size;
|
||||
} guest_memory_t;
|
||||
|
||||
/*
|
||||
* guest_memory_create() - Allocates and initializes a guest memory region from
|
||||
* an arena.
|
||||
* @arena: A pointer to a host memory arena that will be the source for all
|
||||
* allocations.
|
||||
*
|
||||
* This function sets up the primary guest RAM block. It uses a provided host
|
||||
* memory arena as the backing store for both the guest_memory_t descriptor
|
||||
* struct and the guest RAM itself.
|
||||
*
|
||||
* The function first allocates a small chunk from the arena for the guest_memory_t
|
||||
* struct. It then dedicates the *entire remaining capacity* of the arena to be
|
||||
* the main guest RAM block.
|
||||
*
|
||||
* Preconditions:
|
||||
* - @arena must be a valid, non-NULL pointer to an initialized host arena.
|
||||
* - @arena->data must point to a valid, host-allocated memory buffer.
|
||||
* - The arena provided should be dedicated solely to this guest memory block;
|
||||
* its entire remaining capacity will be consumed.
|
||||
*
|
||||
* Return: A pointer to a fully initialized guest_memory_t struct. The `base`
|
||||
* pointer will point to the start of the guest RAM block within the arena,
|
||||
* and `size` will reflect the size of that block.
|
||||
*/
|
||||
guest_memory_t* guest_memory_create(pound::host::memory::arena_t* arena);
|
||||
|
||||
/*
|
||||
* guest_mem_access_result_t - Defines the set of possible outcomes for a guest
|
||||
* memory access operation.
|
||||
* @GUEST_MEM_ACCESS_OK: The memory operation completed
|
||||
* successfully.
|
||||
* @GUEST_MEM_FAULT_UNALIGNED: The access was unaligned, and the
|
||||
* emulated CPU requires an Alignment
|
||||
* Fault to be raised. The operation was
|
||||
* NOT completed. The host must inject a
|
||||
* data abort into the guest.
|
||||
* @GUEST_MEM_ACCESS_FAULT_BOUNDARY: An access fell outside the bounds of
|
||||
* the defined memory region. The
|
||||
* operation was NOT completed, The host
|
||||
* must inject a Data Abort for a
|
||||
* translation/permission fault into the
|
||||
* guest.
|
||||
* @GUEST_MEM_ACCESS_ERROR_INTERNAL: An unrecoverable internal error occured
|
||||
* within the memory subsystem. This
|
||||
* indicates a fatal host bug, not a guest
|
||||
* induced fault.
|
||||
*/
|
||||
typedef enum
|
||||
{
|
||||
GUEST_MEM_ACCESS_OK = 0,
|
||||
GUEST_MEM_ACCESS_FAULT_UNALIGNED,
|
||||
GUEST_MEM_ACCESS_FAULT_BOUNDARY,
|
||||
GUEST_MEM_ACCESS_ERROR_INTERNAL,
|
||||
} guest_mem_access_result_t;
|
||||
|
||||
/*
|
||||
* ============================================================================
|
||||
* Guest Memory Read Functions
|
||||
* ============================================================================
|
||||
*/
|
||||
|
||||
/*
|
||||
* guest_mem_readb() - Read one byte from guest physical memory.
|
||||
* @memory: A pointer to the guest memory region.
|
||||
* @gpa: The guest physical address to read from.
|
||||
* @out_val: A pointer to a uint8_t where the result will be stored.
|
||||
*
|
||||
* This function safely reads a single 8-bit value from the guest's physical
|
||||
* RAM. It performs a bounds check to ensure the access is within the allocated
|
||||
* memory region.
|
||||
*
|
||||
* Preconditions:
|
||||
* - @memory and @out_val must be valid, non-NULL pointers.
|
||||
* - @memory->base must point to a valid, host-allocated memory buffer.
|
||||
*
|
||||
* Return:
|
||||
* %GUEST_MEM_ACCESS_OK on success.
|
||||
* %GUEST_MEM_ACCESS_FAULT_BOUNDARY if the @gpa is outside the valid memory
|
||||
* range.
|
||||
*/
|
||||
inline guest_mem_access_result_t guest_mem_readb(guest_memory_t* memory, uint64_t gpa, uint8_t* out_val)
|
||||
{
|
||||
assert(nullptr != memory);
|
||||
assert(nullptr != memory->base);
|
||||
assert(nullptr != out_val);
|
||||
|
||||
if (gpa >= memory->size)
|
||||
{
|
||||
return GUEST_MEM_ACCESS_FAULT_BOUNDARY;
|
||||
}
|
||||
|
||||
uint8_t* hva = memory->base + gpa;
|
||||
*out_val = *hva;
|
||||
|
||||
return GUEST_MEM_ACCESS_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* guest_mem_readw() - Read a 16-bit word from guest physical memory.
|
||||
* @memory: A pointer to the guest memory region.
|
||||
* @gpa: The guest physical address to read from.
|
||||
* @out_val: A pointer to a uint16_t where the result will be stored.
|
||||
*
|
||||
* This function safely reads a 16-bit little-endian value from guest RAM.
|
||||
* It performs both boundary and alignment checks before the access.
|
||||
* It will also perform a byte swap if the host system is not little-endian.
|
||||
*
|
||||
* Preconditions:
|
||||
* - @memory and @out_val must be valid, non-NULL pointers.
|
||||
* - @memory->base must point to a valid, host-allocated memory buffer.
|
||||
* - The guest address @gpa must be 2-byte aligned.
|
||||
*
|
||||
* Return:
|
||||
* %GUEST_MEM_ACCESS_OK on success.
|
||||
* %GUEST_MEM_ACCESS_FAULT_BOUNDARY on an out-of-bounds access or
|
||||
* %GUEST_MEM_ACCESS_FAULT_UNALIGNED if @gpa is not 2-byte aligned.
|
||||
*/
|
||||
inline guest_mem_access_result_t guest_mem_readw(guest_memory_t* memory, uint64_t gpa, uint16_t* out_val)
|
||||
{
|
||||
assert(nullptr != memory);
|
||||
assert(nullptr != memory->base);
|
||||
assert(nullptr != out_val);
|
||||
|
||||
if (gpa > (memory->size - sizeof(uint16_t)))
|
||||
{
|
||||
return GUEST_MEM_ACCESS_FAULT_BOUNDARY;
|
||||
}
|
||||
|
||||
if ((gpa & 1) != 0)
|
||||
{
|
||||
return GUEST_MEM_ACCESS_FAULT_UNALIGNED;
|
||||
}
|
||||
|
||||
uint8_t* hva = memory->base + gpa;
|
||||
memcpy(out_val, hva, sizeof(uint16_t));
|
||||
|
||||
#if HOST_IS_LITTLE_ENDIAN != GUEST_IS_LITTLE_ENDIAN
|
||||
*out_val = bswap_16(*out_val);
|
||||
#endif
|
||||
return GUEST_MEM_ACCESS_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* guest_mem_readl() - Read a 32-bit long-word from guest physical memory.
|
||||
* @memory: A pointer to the guest memory region.
|
||||
* @gpa: The guest physical address to read from.
|
||||
* @out_val: A pointer to a uint32_t where the result will be stored.
|
||||
*
|
||||
* This function safely reads a 32-bit little-endian value from guest RAM.
|
||||
* It performs both boundary and alignment checks before the access.
|
||||
* It will also perform a byte swap if the host system is not little-endian.
|
||||
*
|
||||
* Preconditions:
|
||||
* - @memory and @out_val must be valid, non-NULL pointers.
|
||||
* - @memory->base must point to a valid, host-allocated memory buffer.
|
||||
* - The guest address @gpa must be 4-byte aligned.
|
||||
*
|
||||
* Return:
|
||||
* %GUEST_MEM_ACCESS_OK on success.
|
||||
* %GUEST_MEM_ACCESS_FAULT_BOUNDARY on an out-of-bounds access or
|
||||
* %GUEST_MEM_ACCESS_FAULT_UNALIGNED if @gpa is not 4-byte aligned.
|
||||
*/
|
||||
inline guest_mem_access_result_t guest_mem_readl(guest_memory_t* memory, uint64_t gpa, uint32_t* out_val)
|
||||
{
|
||||
assert(nullptr != memory);
|
||||
assert(nullptr != memory->base);
|
||||
assert(nullptr != out_val);
|
||||
|
||||
if (gpa > (memory->size - sizeof(uint32_t)))
|
||||
{
|
||||
return GUEST_MEM_ACCESS_FAULT_BOUNDARY;
|
||||
}
|
||||
|
||||
if ((gpa & 3) != 0)
|
||||
{
|
||||
return GUEST_MEM_ACCESS_FAULT_UNALIGNED;
|
||||
}
|
||||
|
||||
uint8_t* hva = memory->base + gpa;
|
||||
memcpy(out_val, hva, sizeof(uint32_t));
|
||||
|
||||
#if HOST_IS_LITTLE_ENDIAN != GUEST_IS_LITTLE_ENDIAN
|
||||
*out_val = bswap_32(*out_val);
|
||||
#endif
|
||||
return GUEST_MEM_ACCESS_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* guest_mem_readq() - Read a 64-bit quad-word from guest physical memory.
|
||||
* @memory: A pointer to the guest memory region.
|
||||
* @gpa: The guest physical address to read from.
|
||||
* @out_val: A pointer to a uint64_t where the result will be stored.
|
||||
*
|
||||
* This function safely reads a 64-bit little-endian value from guest RAM.
|
||||
* It performs both boundary and alignment checks before the access.
|
||||
* It will also perform a byte swap if the host system is not little-endian.
|
||||
*
|
||||
* Preconditions:
|
||||
* - @memory and @out_val must be valid, non-NULL pointers.
|
||||
* - @memory->base must point to a valid, host-allocated memory buffer.
|
||||
* - The guest address @gpa must be 8-byte aligned.
|
||||
*
|
||||
* Return:
|
||||
* %GUEST_MEM_ACCESS_OK on success.
|
||||
* %GUEST_MEM_ACCESS_FAULT_BOUNDARY on an out-of-bounds access or
|
||||
* %GUEST_MEM_ACCESS_FAULT_UNALIGNED if @gpa is not 8-byte aligned.
|
||||
*/
|
||||
inline guest_mem_access_result_t guest_mem_readq(guest_memory_t* memory, uint64_t gpa, uint64_t* out_val)
|
||||
{
|
||||
assert(nullptr != memory);
|
||||
assert(nullptr != memory->base);
|
||||
assert(nullptr != out_val);
|
||||
|
||||
if (gpa > (memory->size - sizeof(uint64_t)))
|
||||
{
|
||||
return GUEST_MEM_ACCESS_FAULT_BOUNDARY;
|
||||
}
|
||||
|
||||
if ((gpa & 7) != 0)
|
||||
{
|
||||
return GUEST_MEM_ACCESS_FAULT_UNALIGNED;
|
||||
}
|
||||
|
||||
uint8_t* hva = memory->base + gpa;
|
||||
memcpy(out_val, hva, sizeof(uint64_t));
|
||||
|
||||
#if HOST_IS_LITTLE_ENDIAN != GUEST_IS_LITTLE_ENDIAN
|
||||
*out_val = bswap_64(*out_val);
|
||||
#endif
|
||||
return GUEST_MEM_ACCESS_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* ============================================================================
|
||||
* Guest Memory Write Functions
|
||||
* ============================================================================
|
||||
*/
|
||||
|
||||
/*
|
||||
* guest_mem_writeb() - Write one byte to guest physical memory.
|
||||
* @memory: A pointer to the guest memory region.
|
||||
* @gpa: The guest physical address to write to.
|
||||
* @val: The 8-bit value to write.
|
||||
*
|
||||
* This function safely writes a single 8-bit value to the guest's physical
|
||||
* RAM. It performs a bounds check to ensure the access is within the allocated
|
||||
* memory region before performing the write.
|
||||
*
|
||||
* Preconditions:
|
||||
* - @memory must be a valid, non-NULL pointer.
|
||||
* - @memory->base must point to a valid, host-allocated memory buffer.
|
||||
*
|
||||
* Return:
|
||||
* %GUEST_MEM_ACCESS_OK on success.
|
||||
* %GUEST_MEM_ACCESS_FAULT_BOUNDARY if the @gpa is outside the valid memory
|
||||
* range.
|
||||
*/
|
||||
inline guest_mem_access_result_t guest_mem_writeb(guest_memory_t* memory, uint64_t gpa, uint8_t val)
|
||||
{
|
||||
assert(nullptr != memory);
|
||||
assert(nullptr != memory->base);
|
||||
|
||||
if (gpa >= memory->size)
|
||||
{
|
||||
return GUEST_MEM_ACCESS_FAULT_BOUNDARY;
|
||||
}
|
||||
|
||||
uint8_t* hva = memory->base + gpa;
|
||||
*hva = val;
|
||||
return GUEST_MEM_ACCESS_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* guest_mem_writew() - Write a 16-bit word to guest physical memory.
|
||||
* @memory: A pointer to the guest memory region.
|
||||
* @gpa: The guest physical address to write to.
|
||||
* @val: The 16-bit value to write.
|
||||
*
|
||||
* This function safely writes a 16-bit little-endian value to guest RAM.
|
||||
* It performs both boundary and alignment checks before the access.
|
||||
* It will also perform a byte swap if the host system is not little-endian.
|
||||
*
|
||||
* Preconditions:
|
||||
* - @memory must be a valid, non-NULL pointer.
|
||||
* - @memory->base must point to a valid, host-allocated memory buffer.
|
||||
* - The guest address @gpa must be 2-byte aligned.
|
||||
*
|
||||
* Return: %GUEST_MEM_ACCESS_OK on success. Returns
|
||||
* %GUEST_MEM_ACCESS_FAULT_BOUNDARY on an out-of-bounds access or
|
||||
* %GUEST_MEM_ACCESS_FAULT_UNALIGNED if @gpa is not 2-byte aligned.
|
||||
*/
|
||||
inline guest_mem_access_result_t guest_mem_writew(guest_memory_t* memory, uint64_t gpa, uint16_t val)
|
||||
{
|
||||
assert(nullptr != memory);
|
||||
assert(nullptr != memory->base);
|
||||
|
||||
if (gpa > (memory->size - sizeof(uint16_t)))
|
||||
{
|
||||
return GUEST_MEM_ACCESS_FAULT_BOUNDARY;
|
||||
}
|
||||
|
||||
if ((gpa & 1) != 0)
|
||||
{
|
||||
return GUEST_MEM_ACCESS_FAULT_UNALIGNED;
|
||||
}
|
||||
|
||||
uint8_t* hva = memory->base + gpa;
|
||||
|
||||
#if HOST_IS_LITTLE_ENDIAN != GUEST_IS_LITTLE_ENDIAN
|
||||
val = bswap_16(val);
|
||||
#endif
|
||||
|
||||
memcpy(hva, &val, sizeof(uint16_t));
|
||||
return GUEST_MEM_ACCESS_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* guest_mem_writel() - Write a 32-bit long-word to guest physical memory.
|
||||
* @memory: A pointer to the guest memory region.
|
||||
* @gpa: The guest physical address to write to.
|
||||
* @val: The 32-bit value to write.
|
||||
*
|
||||
* This function safely writes a 32-bit little-endian value to guest RAM.
|
||||
* It performs both boundary and alignment checks before the access.
|
||||
* It will also perform a byte swap if the host system is not little-endian.
|
||||
*
|
||||
* Preconditions:
|
||||
* - @memory must be a valid, non-NULL pointer.
|
||||
* - @memory->base must point to a valid, host-allocated memory buffer.
|
||||
* - The guest address @gpa must be 4-byte aligned.
|
||||
*
|
||||
* Return:
|
||||
* %GUEST_MEM_ACCESS_OK on success.
|
||||
* %GUEST_MEM_ACCESS_FAULT_BOUNDARY on an out-of-bounds access or
|
||||
* %GUEST_MEM_ACCESS_FAULT_UNALIGNED if @gpa is not 4-byte aligned.
|
||||
*/
|
||||
inline guest_mem_access_result_t guest_mem_writel(guest_memory_t* memory, uint64_t gpa, uint32_t val)
|
||||
{
|
||||
assert(nullptr != memory);
|
||||
assert(nullptr != memory->base);
|
||||
|
||||
if (gpa > (memory->size - sizeof(uint32_t)))
|
||||
{
|
||||
return GUEST_MEM_ACCESS_FAULT_BOUNDARY;
|
||||
}
|
||||
|
||||
if ((gpa & 3) != 0)
|
||||
{
|
||||
return GUEST_MEM_ACCESS_FAULT_UNALIGNED;
|
||||
}
|
||||
|
||||
uint8_t* hva = memory->base + gpa;
|
||||
|
||||
#if HOST_IS_LITTLE_ENDIAN != GUEST_IS_LITTLE_ENDIAN
|
||||
val = bswap_32(val);
|
||||
#endif
|
||||
|
||||
memcpy(hva, &val, sizeof(uint32_t));
|
||||
return GUEST_MEM_ACCESS_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* guest_mem_writeq() - Write a 64-bit quad-word to guest physical memory.
|
||||
* @memory: A pointer to the guest memory region.
|
||||
* @gpa: The guest physical address to write to.
|
||||
* @val: The 64-bit value to write.
|
||||
*
|
||||
* This function safely writes a 64-bit little-endian value to guest RAM.
|
||||
* It performs both boundary and alignment checks before the access.
|
||||
* It will also perform a byte swap if the host system is not little-endian.
|
||||
*
|
||||
* Preconditions:
|
||||
* - @memory must be a valid, non-NULL pointer.
|
||||
* - @memory->base must point to a valid, host-allocated memory buffer.
|
||||
* - The guest address @gpa must be 8-byte aligned.
|
||||
*
|
||||
* Return:
|
||||
* %GUEST_MEM_ACCESS_OK on success.
|
||||
* %GUEST_MEM_ACCESS_FAULT_BOUNDARY on an out-of-bounds access or
|
||||
* %GUEST_MEM_ACCESS_FAULT_UNALIGNED if @gpa is not 8-byte aligned.
|
||||
*/
|
||||
inline guest_mem_access_result_t guest_mem_writeq(guest_memory_t* memory, uint64_t gpa, uint64_t val)
|
||||
{
|
||||
assert(nullptr != memory);
|
||||
assert(nullptr != memory->base);
|
||||
|
||||
if (gpa > (memory->size - sizeof(uint64_t)))
|
||||
{
|
||||
return GUEST_MEM_ACCESS_FAULT_BOUNDARY;
|
||||
}
|
||||
|
||||
if ((gpa & 7) != 0)
|
||||
{
|
||||
return GUEST_MEM_ACCESS_FAULT_UNALIGNED;
|
||||
}
|
||||
|
||||
uint8_t* hva = memory->base + gpa;
|
||||
|
||||
#if HOST_IS_LITTLE_ENDIAN != GUEST_IS_LITTLE_ENDIAN
|
||||
val = bswap_64(val);
|
||||
#endif
|
||||
|
||||
memcpy(hva, &val, sizeof(uint64_t));
|
||||
return GUEST_MEM_ACCESS_OK;
|
||||
}
|
||||
} // namespace pound::pvm::memory
|
||||
#endif // POUND_pvm_GUEST_H
|
||||
141
src/pvm/mmio.cpp
141
src/pvm/mmio.cpp
|
|
@ -1,141 +0,0 @@
|
|||
#include "mmio.h"
|
||||
#include "common/passert.h"
|
||||
#include <algorithm>
|
||||
|
||||
namespace pound::pvm::memory
|
||||
{
|
||||
/*
|
||||
* This function implements a strict weak ordering comparison on two
|
||||
* MMIO ranges, based solely on their starting guest physical address.
|
||||
*
|
||||
* It is designed to be used with std::lower_bound.
|
||||
*/
|
||||
bool mmio_compare_ranges(const mmio_range_t& a, const mmio_range_t& b)
|
||||
{
|
||||
return a.gpa_base < b.gpa_base;
|
||||
}
|
||||
|
||||
int8_t mmio_db_register(mmio_db_t* db, const mmio_range_t range, const mmio_handler_t handler)
|
||||
{
|
||||
PVM_ASSERT(nullptr != db);
|
||||
PVM_ASSERT((db->address_ranges.size() + 1) <= MMIO_REGIONS);
|
||||
|
||||
auto it = std::lower_bound(db->address_ranges.begin(), db->address_ranges.end(), range, mmio_compare_ranges);
|
||||
auto i = it - db->address_ranges.begin();
|
||||
|
||||
/*
|
||||
* Scenario: UART is a current region, TIMER is a new region being
|
||||
* registered.
|
||||
*
|
||||
* [-- UART --]
|
||||
* 0x9000 0x9004
|
||||
* [---- TIMER ----] <-- CONFLICT!
|
||||
* 0x9002 0x900A
|
||||
*/
|
||||
if (i > 0)
|
||||
{
|
||||
if (range.gpa_base < db->address_ranges[(size_t)i - 1].gpa_end)
|
||||
{
|
||||
return EADDRESS_OVERLAP;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Scenario: UART is a current region, TIMER is a new region being
|
||||
* registered.
|
||||
*
|
||||
* [---- TIMER ----] <-- CONFLICT!
|
||||
* 0x9000 0x9004
|
||||
* [-- UART --]
|
||||
* 0x9002 0x900A
|
||||
*/
|
||||
if (i < db->address_ranges.size())
|
||||
{
|
||||
if (db->address_ranges[(size_t)i].gpa_base < range.gpa_end)
|
||||
{
|
||||
return EADDRESS_OVERLAP;
|
||||
}
|
||||
}
|
||||
|
||||
db->address_ranges.insert(it, range);
|
||||
db->handlers.insert(db->handlers.begin() + i, handler);
|
||||
return MMIO_SUCCESS;
|
||||
}
|
||||
|
||||
bool mmio_compare_addresses(const mmio_range_t& a, const mmio_range_t& b)
|
||||
{
|
||||
return a.gpa_base < b.gpa_base;
|
||||
}
|
||||
|
||||
int8_t mmio_db_dispatch_write(mmio_db_t* db, pvm_t* pvm, uint64_t gpa, uint8_t* data, size_t len)
|
||||
{
|
||||
PVM_ASSERT(nullptr != db);
|
||||
PVM_ASSERT(nullptr != pvm);
|
||||
PVM_ASSERT(nullptr != data);
|
||||
PVM_ASSERT(len > 0);
|
||||
|
||||
mmio_range_t search_key = {.gpa_base = gpa, .gpa_end = 0};
|
||||
/* Find the first region that starts after the target gpa */
|
||||
auto it =
|
||||
std::upper_bound(db->address_ranges.begin(), db->address_ranges.end(), search_key, mmio_compare_addresses);
|
||||
|
||||
/* If `it` is the beginning, then the gpa is smaller than all known regions. */
|
||||
if (db->address_ranges.begin() == it)
|
||||
{
|
||||
return ENOT_HANDLED;
|
||||
}
|
||||
|
||||
mmio_range_t candidate = *(it - 1);
|
||||
/* base <= gpa < end */
|
||||
if ((candidate.gpa_base <= gpa) && (gpa < candidate.gpa_end))
|
||||
{
|
||||
auto i = (it - 1) - db->address_ranges.begin();
|
||||
if (nullptr == db->handlers[(size_t)i].write)
|
||||
{
|
||||
return EACCESS_DENIED;
|
||||
}
|
||||
|
||||
db->handlers[(size_t)i].write(pvm, gpa, data, len);
|
||||
return MMIO_SUCCESS;
|
||||
}
|
||||
|
||||
/* The gpa is not in any mmio region. */
|
||||
return ENOT_HANDLED;
|
||||
}
|
||||
|
||||
int8_t mmio_db_dispatch_read(mmio_db_t* db, pvm_t* pvm, uint64_t gpa, uint8_t* data, size_t len)
|
||||
{
|
||||
PVM_ASSERT(nullptr != db);
|
||||
PVM_ASSERT(nullptr != pvm);
|
||||
PVM_ASSERT(nullptr != data);
|
||||
PVM_ASSERT(len > 0);
|
||||
|
||||
mmio_range_t search_key = {.gpa_base = gpa, .gpa_end = 0};
|
||||
/* Find the first region that starts after the target gpa */
|
||||
auto it =
|
||||
std::upper_bound(db->address_ranges.begin(), db->address_ranges.end(), search_key, mmio_compare_addresses);
|
||||
|
||||
/* If `it` is the beginning, then the gpa is smaller than all known regions. */
|
||||
if (db->address_ranges.begin() == it)
|
||||
{
|
||||
return ENOT_HANDLED;
|
||||
}
|
||||
|
||||
mmio_range_t candidate = *(it - 1);
|
||||
/* base <= gpa < end */
|
||||
if ((candidate.gpa_base <= gpa) && (gpa < candidate.gpa_end))
|
||||
{
|
||||
auto i = (it - 1) - db->address_ranges.begin();
|
||||
if (nullptr == db->handlers[(size_t)i].read)
|
||||
{
|
||||
return EACCESS_DENIED;
|
||||
}
|
||||
|
||||
db->handlers[(size_t)i].read(pvm, gpa, data, len);
|
||||
return MMIO_SUCCESS;
|
||||
}
|
||||
|
||||
/* The gpa is not in any mmio region. */
|
||||
return ENOT_HANDLED;
|
||||
}
|
||||
} // namespace pound::pvm::memory
|
||||
191
src/pvm/mmio.h
191
src/pvm/mmio.h
|
|
@ -1,191 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
#include "host/memory/arena_stl.h"
|
||||
#include "pvm.h"
|
||||
|
||||
namespace pound::pvm::memory
|
||||
{
|
||||
/*
|
||||
* MMIO_REGIONS - The maximum number of distinct MMIO regions supported.
|
||||
*
|
||||
* It sets a hard limit on how many separate hardware device regions
|
||||
* can be registered at boot time.
|
||||
*/
|
||||
#define MMIO_REGIONS 20
|
||||
|
||||
/* MMIO_SUCCESS - Return code for a successfull MMIO operation. */
|
||||
#define MMIO_SUCCESS 0
|
||||
|
||||
/* EADDRESS_OVERLAP - Error code for an MMIO address space conflict. */
|
||||
#define EADDRESS_OVERLAP (-1)
|
||||
|
||||
#define ENOT_HANDLED (-2)
|
||||
|
||||
#define EACCESS_DENIED (-3)
|
||||
|
||||
/*
|
||||
* typedef mmio - Function pointer type for an MMIO access handler.
|
||||
* @pvm: A pointer to the pvm instance.
|
||||
* @gpa: The guest physical address of the access.
|
||||
* @data: A pointer to the data buffer. For reads, this buffer
|
||||
* should be filled by the handler. For writes, this buffer
|
||||
* contains the data written by the guest.
|
||||
* @len: The size of the access in bytes.
|
||||
*
|
||||
* This function pointer defines the contract for all MMIO read
|
||||
* and write handler functions. Handlers are responsible for emulating
|
||||
* the hardware's response to a memory access at a specific register
|
||||
* address.
|
||||
*
|
||||
* Returns: MMIO_SUCCESS on success, negative errno code on failure.
|
||||
*/
|
||||
typedef int8_t (*mmio)(pvm_t* pvm, uint64_t gpa, uint8_t* data, size_t len);
|
||||
|
||||
/*
|
||||
* mmio_handler_t - A pair of handlers for an MMIO region.
|
||||
* @read: A function pointer to be called for read access within the
|
||||
* region. Can be NULL if the region is write-only.
|
||||
* @write: A function pointer to be called for write access within the
|
||||
* region. Can NULL if the region is read-only.
|
||||
*
|
||||
* This structure stores the read and write operations for a single
|
||||
* hardware device or memory region.
|
||||
*
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
mmio read;
|
||||
mmio write;
|
||||
} mmio_handler_t;
|
||||
|
||||
/*
|
||||
* mmio_range_t - Defines a half-open guest physical address range.
|
||||
* @gpa_base: The starting (inclusive) guest physical address of
|
||||
* the region.
|
||||
* @gpa_end: The ending (exclusive) guest physical address of the region.
|
||||
*
|
||||
* This structure defines a contiguous block of guest physical address
|
||||
* space, [gpa_base, gpa_end). The use of an exclusive end address
|
||||
* simplifies range and adjacency calculations.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
uint64_t gpa_base;
|
||||
uint64_t gpa_end;
|
||||
} mmio_range_t;
|
||||
|
||||
/*
|
||||
* mmio_db_t - A data-oriented database for MMIO dispatch.
|
||||
* @handlers: A vector of MMIO handler pairs.
|
||||
* @address_ranges: A vector of physical address ranges, sorted by GPA base.
|
||||
*
|
||||
* This structure manages all registered Memory-Mapped I/O regions for a
|
||||
* virtual machine. It is designed with a "Structure of Arrays" layout
|
||||
* to maximize host CPU cache efficiency during lookups.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
/*
|
||||
* This uses a custom arena allocator to ensure that all handler nodes
|
||||
* are allocated fron a single, pre-allocated memory block.
|
||||
*
|
||||
* This is a parallel array to @address_ranges.
|
||||
*/
|
||||
std::vector<mmio_handler_t, pound::host::memory::arena_allocator<mmio_handler_t>> handlers;
|
||||
|
||||
/*
|
||||
* This vector is the primary target for the binary search lookup
|
||||
* in the MMIO dispatcher. Maintaining its sort order is critical
|
||||
* for the performance of the system.
|
||||
*/
|
||||
std::vector<mmio_range_t, pound::host::memory::arena_allocator<mmio_range_t>> address_ranges;
|
||||
} mmio_db_t;
|
||||
|
||||
/*
|
||||
* Registers a new MMIO region into the database.
|
||||
* @db: A pointer to the MMIO database to be modified.
|
||||
* @range: The new region's address space.
|
||||
* @handler: The read and write callbacks.
|
||||
*
|
||||
* This function safely inserts a new MMIO region into the database.
|
||||
*
|
||||
* Returns:
|
||||
* MMIO_SUCCESS on successfull registration.
|
||||
* EADDRESS_OVERLAP if the new @range conflicts with any existing region.
|
||||
*/
|
||||
int8_t mmio_db_register(mmio_db_t* db, const mmio_range_t range, const mmio_handler_t handler);
|
||||
|
||||
/*
|
||||
* mmio_db_dispatch_write - Dispatches a guest physical write to a registered MMIO handler.
|
||||
* @db: A pointer to the MMIO database to be queried.
|
||||
* @pvm: A pointer to the pvm instance.
|
||||
* @gpa: The guest physical address of the memory write.
|
||||
* @data: A pointer to the buffer containing the data written by the guest.
|
||||
* @len: The size of the write access in bytes.
|
||||
*
|
||||
* This function is on the critical path ("hot path") of the emulator. It
|
||||
* performs a high-performance binary search to determine if the target @gpa
|
||||
* falls within any of the registered MMIO regions.
|
||||
*
|
||||
* The logic is a two-stage process:
|
||||
* 1. An approximate search using std::upper_bound finds the first region that
|
||||
* starts *after* the target @gpa. The actual candidate region must be the
|
||||
* one immediately preceding this result.
|
||||
* 2. A precise check verifies if the @gpa is contained within the candidate
|
||||
* region's half-open interval [base, end).
|
||||
*
|
||||
* If a match is found, the corresponding write handler is invoked. If not, the
|
||||
* function signals that the access is not handled by the MMIO system and
|
||||
* should be treated as a normal RAM access.
|
||||
*
|
||||
* --- Visual Scenario ---
|
||||
*
|
||||
* Database Ranges: [-- R1 --) [---- R2 ----) [--- R3 ---)
|
||||
* Address Space: 0x1000 0x1010 0x4000 0x4080 0x9000 0x9010
|
||||
*
|
||||
* Search for GPA = 0x4020:
|
||||
*
|
||||
* 1. upper_bound() finds the first region starting > 0x4020, which is R3.
|
||||
* The iterator 'it' points to R3 at index 2.
|
||||
*
|
||||
* [-- R1 --) [---- R2 ----) [--- R3 ---)
|
||||
* ^
|
||||
* |
|
||||
* 'it'
|
||||
*
|
||||
* 2. The candidate for the search is the region before 'it', which is R2.
|
||||
*
|
||||
* [-- R1 --) [---- R2 ----) [--- R3 ---)
|
||||
* ^
|
||||
* |
|
||||
* candidate
|
||||
*
|
||||
* 3. Final check: Is 0x4020 >= R2.base (0x4000) AND < R2.end (0x4080)? Yes.
|
||||
* Result: Match found. Dispatch to handler for R2.
|
||||
*
|
||||
* Return:
|
||||
* MMIO_SUCCESS if the write was handled by a registered device. Returns
|
||||
* ENOT_HANDLED if the @gpa does not map to any MMIO region.
|
||||
* EACCESS_DENIED if the MMIO region has no write function pointer.
|
||||
*/
|
||||
int8_t mmio_db_dispatch_write(mmio_db_t* db, pvm_t* pvm, uint64_t gpa, uint8_t* data, size_t len);
|
||||
|
||||
/*
|
||||
* mmio_db_dispatch_read - Dispatches a guest physical read to a registered MMIO handler.
|
||||
* @db: A pointer to the MMIO database to be queried.
|
||||
* @pvm: A pointer to the pvm instance.
|
||||
* @gpa: The guest physical address of the memory write.
|
||||
* @data: A pointer to the buffer containing the data written by the guest.
|
||||
* @len: The size of the write access in bytes.
|
||||
*
|
||||
* See @mmio_db_dispatch_write() for proper explanation.
|
||||
*
|
||||
* Return:
|
||||
* MMIO_SUCCESS if the write was handled by a registered device. Returns
|
||||
* ENOT_HANDLED if the @gpa does not map to any MMIO region.
|
||||
* EACCESS_DENIED if the MMIO region has no write function pointer.
|
||||
*/
|
||||
int8_t mmio_db_dispatch_read(mmio_db_t* db, pvm_t* pvm, uint64_t gpa, uint8_t* data, size_t len);
|
||||
} // namespace pound::pvm::memory
|
||||
397
src/pvm/mmu.cpp
397
src/pvm/mmu.cpp
|
|
@ -1,397 +0,0 @@
|
|||
#include "mmu.h"
|
||||
#include "pvm.h"
|
||||
#include "common/passert.h"
|
||||
#include <limits.h>
|
||||
|
||||
namespace pound::pvm::memory
|
||||
{
|
||||
#define GRANULE_4KB (1ULL << 12)
|
||||
#define GRANULE_16KB (1ULL << 14)
|
||||
#define GRANULE_64KB (1ULL << 16)
|
||||
|
||||
/*
|
||||
* COUNT_TRAILING_ZEROS - Get the number of trailing zero bits in a u64
|
||||
* @x: A 64-bit value, which must be non-zero.
|
||||
*
|
||||
* Provides a portable wrapper around compiler-specific intrinsics for the
|
||||
* "Count Trailing Zeros" operation. This is equivalent to finding the bit
|
||||
* index of the least significant bit (LSB).
|
||||
*
|
||||
* Note: The behavior for an input of zero is undefined for __builtin_ctzll.
|
||||
* Callers must ensure the argument is non-zero. The MSVC wrapper handles
|
||||
* this by returning 64, but we should not rely on this behavior.
|
||||
*/
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define COUNT_TRAILING_ZEROS(x) (uint8_t)__builtin_ctzll(x)
|
||||
#elif defined(_MSC_VER)
|
||||
#include <intrin.h>
|
||||
/* MSVC's intrinsic is a bit more complex to use safely */
|
||||
static inline uint8_t msvc_ctzll(unsigned long long val)
|
||||
{
|
||||
unsigned long index = 0;
|
||||
if (_BitScanForward64(&index, val))
|
||||
{
|
||||
return (uint8_t)index;
|
||||
}
|
||||
return 64;
|
||||
}
|
||||
#define COUNT_TRAILING_ZEROS(x) msvc_ctzll(x)
|
||||
#else
|
||||
#error "Compiler not supported for CTZ intrinsic. Please add a fallback."
|
||||
#endif
|
||||
|
||||
/* Define the size of a page table entry (descriptor) */
|
||||
#define PAGE_TABLE_ENTRY_SHIFT 3 /* log2(8 bytes) */
|
||||
|
||||
int mmu_gva_to_gpa(pound::pvm::pvm_vcpu_t* vcpu, guest_memory_t* memory, uint64_t gva, uint64_t* out_gpa)
|
||||
{
|
||||
const uint8_t SCTLR_EL1_M_BIT = (1 << 0);
|
||||
if (0 == (vcpu->sctlr_el1 & SCTLR_EL1_M_BIT))
|
||||
{
|
||||
*out_gpa = gva;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Extract T0SZ (bits [5:0]) and T1SZ (bits [21:16]) from TCR_EL1.
|
||||
* Both are 6-bit fields. */
|
||||
const uint64_t TxSZ_WIDTH = 6;
|
||||
const uint64_t TxSZ_MASK = (1ULL << TxSZ_WIDTH) - 1;
|
||||
|
||||
const uint8_t T0SZ = vcpu->tcr_el1 & TxSZ_MASK;
|
||||
const uint8_t T1SZ = (vcpu->tcr_el1 >> 16) & TxSZ_MASK;
|
||||
|
||||
/* The virtual address size in bits. */
|
||||
uint8_t virtual_address_size = 0;
|
||||
|
||||
bool is_ttbr0 = false;
|
||||
bool is_ttbr1 = false;
|
||||
|
||||
/*
|
||||
* Before starting a page table walk, the hardware must perform two checks:
|
||||
* 1. Classify the GVA as belonging to the lower half (user, TTBR0) or
|
||||
* upper half (kernel, TTBR1) of the virtual address space.
|
||||
* 2. Validate that the GVA is correct for the configured VA size.
|
||||
*
|
||||
* The size of the VA space is configured by the TxSZ fields in TCR_EL1.
|
||||
* A TxSZ value of N implies a (64 - N)-bit address space. For any valid
|
||||
* address in this space, the top N bits must be a sign-extension of
|
||||
* bit (63 - N).
|
||||
*
|
||||
* For example, in a 48-bit space (TxSZ=16), bit 47 is the top bit.
|
||||
* - For a lower-half address, bits [63:47] must all be 0.
|
||||
* - For an upper-half address, bits [63:47] must all be 1.
|
||||
*
|
||||
* This sign-extension rule means that bit 63 will always have the same
|
||||
, * value as bit (63 - N) for any valid address. We can therefore use a
|
||||
* simple check of bit 63 as an efficient shortcut to classify the
|
||||
* address. The full canonical check that follows will then catch any
|
||||
* invalid (non-sign-extended) addresses.
|
||||
*
|
||||
* Example Scenario:
|
||||
*
|
||||
* Kernel sets TCR_EL1.T0SZ = 16. This means it's using a 48-bit VA
|
||||
* space (64 - 16 = 48). The top 16 bits of any valid user-space
|
||||
* GVA must be 0.
|
||||
*
|
||||
* A GVA of 0x0001_0000_0000_0000 comes in.
|
||||
*
|
||||
* The top 16 bits are not all zero. An address translation fault is
|
||||
* generated and the page table walk is aborted.
|
||||
*/
|
||||
if ((gva << 63) & 1)
|
||||
{
|
||||
/* Address appears to be in the Upper (Kernal) Half */
|
||||
|
||||
virtual_address_size = 64 - T1SZ;
|
||||
const uint64_t top_bits_mask = (~0ULL << virtual_address_size);
|
||||
const uint64_t gva_tag = gva & top_bits_mask;
|
||||
const uint64_t ttbr1_tag = vcpu->ttbr1_el1 & top_bits_mask;
|
||||
|
||||
if (gva_tag != ttbr1_tag)
|
||||
{
|
||||
/* TODO(GloriousTacoo:memory): Generate address translation fault */
|
||||
return -1;
|
||||
}
|
||||
is_ttbr1 = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Address appears to be in the Lower (User) Half */
|
||||
|
||||
virtual_address_size = 64 - T0SZ;
|
||||
const uint64_t top_bits_mask = (~0ULL << virtual_address_size);
|
||||
if (0 != (gva & top_bits_mask))
|
||||
{
|
||||
/* TODO(GloriousTacoo:memory): Generate address translation fault */
|
||||
return -1;
|
||||
}
|
||||
is_ttbr0 = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* The preceding logic determined which address space (and thus
|
||||
* which TTBR) we're dealing with. Now we get the page size
|
||||
* in bytes from the correct TGx field.
|
||||
*/
|
||||
uint64_t granule_size = 0;
|
||||
PVM_ASSERT((true == is_ttbr0) || (true == is_ttbr1));
|
||||
if (true == is_ttbr0)
|
||||
{
|
||||
/*
|
||||
* We're in userspace. We need to decode TCR_EL1.TG0, which is
|
||||
* at bits [15:14].
|
||||
*
|
||||
* Encoding for TG0:
|
||||
* 0b00: 4KB granule
|
||||
* 0b01: 64KB granule
|
||||
* 0b10: 16KB granule
|
||||
* 0b11: Reserved, will cause a fault.
|
||||
*/
|
||||
const uint8_t TG0_SHIFT = 14;
|
||||
const uint8_t TG0_MASK = 0b11;
|
||||
const uint8_t TG0 = (vcpu->tcr_el1 >> TG0_SHIFT) & TG0_MASK;
|
||||
switch (TG0)
|
||||
{
|
||||
case 0b00:
|
||||
granule_size = GRANULE_4KB;
|
||||
break;
|
||||
case 0b01:
|
||||
granule_size = GRANULE_64KB;
|
||||
break;
|
||||
case 0b10:
|
||||
granule_size = GRANULE_16KB;
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* This is an illegal configuration. The hardware will fault.
|
||||
* For now, an assert will catch bad guest OS behaviour.
|
||||
*/
|
||||
PVM_ASSERT_MSG(false, "Invalid TG0 value in TCR_EL1");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
* We're in kernel space. We decode TCR_EL1.TG1, which is at
|
||||
* bits [31:30]. Note that the encoding values are different
|
||||
* from TG0. Don't get caught out.
|
||||
*
|
||||
* Encoding for TG1:
|
||||
* 0b01: 16KB granule
|
||||
* 0b01: 4KB granule
|
||||
* 0b11: 64KB granule
|
||||
* 0b00: Reserved, will cause a fault.
|
||||
*/
|
||||
const uint8_t TG1_SHIFT = 30;
|
||||
const uint8_t TG1_MASK = 0b11;
|
||||
const uint8_t TG1 = (vcpu->tcr_el1 >> TG1_SHIFT) & TG1_MASK;
|
||||
switch (TG1)
|
||||
{
|
||||
case 0b01:
|
||||
/* 16KB page size */
|
||||
granule_size = GRANULE_16KB;
|
||||
break;
|
||||
case 0b10:
|
||||
/* 4KB page size */
|
||||
granule_size = GRANULE_4KB;
|
||||
break;
|
||||
case 0b11:
|
||||
/* 64KB page size */
|
||||
granule_size = GRANULE_64KB;
|
||||
break;
|
||||
default:
|
||||
PVM_ASSERT_MSG(false, "Invalid TG1 value in TCR_EL1");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* In hardware, everything is a power of two. A 4096-byte page isn't
|
||||
* a magic number; it's 2^12. This means you need exactly 12 bits to
|
||||
* address every single byte within that page.
|
||||
*
|
||||
* The naive way to get 12 fron 4096 is to calculate log2(4096) but
|
||||
* that's computationally expensive. A much faster way, and how the
|
||||
* hardware thinks, is to find the position of the one set bit.
|
||||
*
|
||||
* 4096 in binary is: 0001 0000 0000 0000 (Bit 12 is set, followed
|
||||
* by 12 zeroes).
|
||||
*
|
||||
* The number of trailing zeroes in a binary number is its
|
||||
* logarithm base 2. The COUNT_TRAILING_ZEROES() function
|
||||
* is a compiler intrinsic that typically boils down to
|
||||
* a single CPU instruction (like TZCNT on x86).
|
||||
*/
|
||||
const uint8_t offset_bits = COUNT_TRAILING_ZEROS(granule_size);
|
||||
|
||||
/*
|
||||
* We now need to figure out how many bits are for the index at this
|
||||
* level in the page table.
|
||||
*
|
||||
* A page table is just a big array of 8-byte entires (descriptors).
|
||||
* The table itself has to fit perfectly into a page of memory (a granule).
|
||||
* So a 4KB page holds a 4KB table.
|
||||
*
|
||||
* The number of entries in that table is: Granule Size / Entry Size.
|
||||
* For a 4KB granule: 4096 bytes / 8 bytes = 512 entries.
|
||||
*
|
||||
* To index an array of 512 entries we need 9 bits (since 2^9 = 512).
|
||||
*
|
||||
* log2(Num Entries) = log2(Granule Size / Entry Size)
|
||||
* log2(Num Entries) = log2(Granule Size) - log2(Entry Size)
|
||||
*
|
||||
* We already have log2(Granule Size); that's out `offset_bits`.
|
||||
* The `PAGE_TABLE_ENTRY_SHIFT` is a constant for log2(Entry Size).
|
||||
* An entry is 8 bytes, and 8 is 2^3, so its log2 is 3.
|
||||
*
|
||||
* For a 4KB granule:
|
||||
* 12 offset bits - 3 bits = 9 index bits.
|
||||
*
|
||||
*/
|
||||
const uint8_t page_table_index_bits = offset_bits - PAGE_TABLE_ENTRY_SHIFT;
|
||||
|
||||
/*
|
||||
* Next we determine the page table starting level and walk depth based on the
|
||||
* virtual address size. The intent is to find the highest table level required
|
||||
* to map the address space. A larger VA size requires a deeper walk.
|
||||
*/
|
||||
const uint8_t l3_shift = offset_bits;
|
||||
const uint8_t l2_shift = l3_shift + page_table_index_bits;
|
||||
const uint8_t l1_shift = l2_shift + page_table_index_bits;
|
||||
const uint8_t l0_shift = l1_shift + page_table_index_bits;
|
||||
uint8_t page_table_levels = 0;
|
||||
uint8_t starting_level = 0;
|
||||
switch (granule_size)
|
||||
{
|
||||
case GRANULE_4KB:
|
||||
/* A 4KB granule supports up to a 4-level walk starting at L0. */
|
||||
page_table_levels = 3; /* 0..3 inclusive */
|
||||
if (virtual_address_size > l0_shift)
|
||||
{
|
||||
starting_level = 0;
|
||||
}
|
||||
else if (virtual_address_size > l1_shift)
|
||||
{
|
||||
starting_level = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
starting_level = 2;
|
||||
}
|
||||
break;
|
||||
case GRANULE_16KB:
|
||||
case GRANULE_64KB:
|
||||
/* A 16KB and 64KB granule supports up to a 3-level walk starting at L1. */
|
||||
page_table_levels = 3; /* 1..3 inclusive */
|
||||
if (virtual_address_size > l1_shift)
|
||||
{
|
||||
starting_level = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
starting_level = 2;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/* This granule size is not supported by the architecture. */
|
||||
return -1;
|
||||
}
|
||||
|
||||
uint64_t table_address = 0x0;
|
||||
if (true == is_ttbr0)
|
||||
{
|
||||
table_address = vcpu->ttbr0_el1;
|
||||
}
|
||||
else
|
||||
{
|
||||
table_address = vcpu->ttbr1_el1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Begin the multi-level page table walk.
|
||||
*
|
||||
* The walk starts from the base address of the initial table (L0 or L1,
|
||||
* depending on the VA size) and descends level by level. At each level,
|
||||
* we extract an index from the GVA, use it to find a descriptor in the
|
||||
* current table, and then interpret that descriptor. The descriptor
|
||||
* will either point to the next table in the hierarchy, describe
|
||||
* the final physical page (a page descriptor), or indicate a fault.
|
||||
*/
|
||||
uint64_t level_index = 0;
|
||||
const uint64_t page_table_index_mask = (1ULL << page_table_index_bits) - 1;
|
||||
const uint8_t page_table_entry_size = 8;
|
||||
for (uint8_t level = starting_level; level <= page_table_levels; ++level)
|
||||
{
|
||||
switch (level)
|
||||
{
|
||||
case 0:
|
||||
level_index = (gva >> l0_shift) & page_table_index_mask;
|
||||
break;
|
||||
case 1:
|
||||
level_index = (gva >> l1_shift) & page_table_index_mask;
|
||||
break;
|
||||
case 2:
|
||||
level_index = (gva >> l2_shift) & page_table_index_mask;
|
||||
break;
|
||||
case 3:
|
||||
level_index = (gva >> l3_shift) & page_table_index_mask;
|
||||
break;
|
||||
default:
|
||||
PVM_ASSERT_MSG(false, "Invalid page table configuration!");
|
||||
}
|
||||
|
||||
const uint64_t level_entry_address = table_address + (level_index * page_table_entry_size);
|
||||
uint64_t descriptor = 0;
|
||||
guest_mem_readq(memory, level_entry_address, &descriptor);
|
||||
uint64_t offset_mask = (1ULL << offset_bits) - 1;
|
||||
uint64_t page_offset = gva & offset_mask;
|
||||
uint64_t page_address_mask = ~offset_mask;
|
||||
|
||||
/*
|
||||
* Is the descriptor valid? Bit [0] of every descriptor is the "valid"
|
||||
* bit. If it's 0, the entry is invalid, and the mapping does not exist.
|
||||
*/
|
||||
if (0b0 == (descriptor & 0b1))
|
||||
{
|
||||
// TODO(GloriousTacoo:arm64): generate page fault.
|
||||
return -1;
|
||||
}
|
||||
/*
|
||||
* At the final level, the only valid descriptor is a Page Descriptor,
|
||||
* identified by bits [1:0] being 0b11.
|
||||
*/
|
||||
else if ((level == page_table_levels) && (0b11 == (descriptor & 0b11)))
|
||||
{
|
||||
/*
|
||||
* The descriptor's upper bits [virtual_address_size:offset_bits]
|
||||
* contain the physical base address of the page. We mask out
|
||||
* the lower attribute bits to isolate this address.
|
||||
*/
|
||||
uint64_t page_base_address = descriptor & page_address_mask;
|
||||
*out_gpa = page_base_address | page_offset;
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* If this is not the final level, we expect a Table Descriptor, also
|
||||
* identified by bits [1:0] being 0b11. This descriptor points to the
|
||||
* table for the next level of the walk.
|
||||
*/
|
||||
else if (0b11 == (descriptor & 0b11))
|
||||
{
|
||||
const uint64_t next_table_mask = ~((1ULL << offset_bits) - 1);
|
||||
table_address = descriptor & next_table_mask;
|
||||
}
|
||||
/*
|
||||
* If bits [1:0] are '01', it's a Block Descriptor. These descriptors
|
||||
* terminate the walk early, mapping a large, contiguous block of
|
||||
* memory (e.g., 2MB at L2). This implementation does not yet
|
||||
* support them.
|
||||
*/
|
||||
else if (0b01 == (descriptor & 0b11))
|
||||
{
|
||||
PVM_ASSERT_MSG(false, "Block descriptors are not supported");
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
} // namespace pound::pvm::memory
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "guest.h"
|
||||
#include "pvm.h"
|
||||
|
||||
namespace pound::pvm::memory
|
||||
{
|
||||
/*
|
||||
* mmu_gva_to_gpa() - Translate a Guest Virtual Address to a Guest Physical Address.
|
||||
* @vcpu: A pointer to the vCPU state.
|
||||
* @memory: A pointrr to the guest's memory.
|
||||
* @gva: The Guest Virtual Address to translate.
|
||||
* @out_gpa: A pointer to a uint64_t where the resulting Guest Physical Address
|
||||
* will be stored on success.
|
||||
*
|
||||
* This function is the primary entry point for the emulated AArch64 Stage 1
|
||||
* MMU. It is responsible for resolving a virtual address used by the guest
|
||||
* into a physical address within the guest's physical address space.
|
||||
*
|
||||
* The translation behavior is dependent on the state of the emulated MMU,
|
||||
* primarily controlled by the SCTLR_EL1.M bit (MMU enable).
|
||||
*
|
||||
* If the MMU is disabled, this function performs an identity mapping, where
|
||||
* the GPA is identical to the GVA. This correctly models the processor's
|
||||
* behavior on reset and is the initial "stub" implementation.
|
||||
*
|
||||
* If the MMU is enabled, this function will perform a full, multi-level page
|
||||
* table walk, starting from the base address in TTBR0_EL1 or TTBR1_EL1. It
|
||||
* will parse translation table descriptors, check for permissions, and handle
|
||||
* different page sizes as configured in TCR_EL1.
|
||||
*
|
||||
* A failed translation will result in a fault. The caller is responsible for
|
||||
* checking the return value and initiating a synchronous exception if a fault
|
||||
* occurs. The contents of @out_gpa are undefined on failure.
|
||||
*
|
||||
* Return: 0 on successful translation. A negative error code on a translation
|
||||
* fault (e.g., for a page fault, permission error, or alignment fault).
|
||||
*/
|
||||
int mmu_gva_to_gpa(pound::pvm::pvm_vcpu_t* vcpu, guest_memory_t* memory, uint64_t gva, uint64_t* out_gpa);
|
||||
} // namespace pound::pvm::memory
|
||||
|
|
@ -1,83 +0,0 @@
|
|||
#include "pvm.h"
|
||||
#include "guest.h"
|
||||
#include "common/passert.h"
|
||||
#include "host/memory/arena.h"
|
||||
|
||||
#define LOG_MODULE "pvm"
|
||||
#include "common/logging.h"
|
||||
|
||||
namespace pound::pvm
|
||||
{
|
||||
|
||||
uint8_t pvm_probe(pvm_t* pvm, enum target_type type)
|
||||
{
|
||||
if (type != pvm_TARGET_SWITCH1)
|
||||
{
|
||||
PVM_ASSERT_MSG(false, "Only Switch 1 is supported");
|
||||
}
|
||||
pvm->ops = s1_ops;
|
||||
/* Go to targets/switch1/hardware/probe.cpp */
|
||||
(void)pvm->ops.init(pvm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void take_synchronous_exception(pvm_vcpu_t* vcpu, uint8_t exception_class, uint32_t iss, uint64_t faulting_address)
|
||||
{
|
||||
PVM_ASSERT(nullptr != vcpu);
|
||||
/* An EC holds 6 bits.*/
|
||||
PVM_ASSERT(0 == (exception_class & 11000000));
|
||||
/* An ISS holds 25 bits */
|
||||
PVM_ASSERT(0 == (iss & 0xFE000000));
|
||||
|
||||
vcpu->elr_el1 = vcpu->pc;
|
||||
vcpu->spsr_el1 = vcpu->pstate;
|
||||
vcpu->esr_el1 = 0;
|
||||
|
||||
/* Bits [31:26] are the Exception Class (EC). */
|
||||
/* Bits [25] is the Instruction Length (IL), 1 for a 32-bit instruction. */
|
||||
/* Bits [24:0] are the Instruction Specific Syndrome (ISS) */
|
||||
const uint64_t esr_il_bit = (1ULL << 25);
|
||||
vcpu->esr_el1 = ((uint64_t)exception_class << 26) | esr_il_bit | iss;
|
||||
|
||||
if ((exception_class == EC_DATA_ABORT) || (exception_class == EC_DATA_ABORT_LOWER_EL))
|
||||
{
|
||||
vcpu->far_el1 = faulting_address;
|
||||
}
|
||||
|
||||
/* The CPU state must be changed to a known safe state for handling */
|
||||
vcpu->pstate &= ~0xF0000000;
|
||||
|
||||
/* Mask asynchronous exceptions (IRQ, FIQ, SError). We dont want the
|
||||
* Exception handler to be interruoted by a less important event. */
|
||||
const uint32_t PSTATE_IRQ_BIT = (1 << 7);
|
||||
const uint32_t PSTATE_FIQ_BIT = (1 << 6);
|
||||
const uint32_t PSTATE_SERROR_BIT = (1 << 8);
|
||||
vcpu->pstate |= (PSTATE_IRQ_BIT | PSTATE_FIQ_BIT | PSTATE_SERROR_BIT);
|
||||
|
||||
/* Set the target exception level to EL1. The mode field M[3:0] is set
|
||||
* to 0b0101 for EL1h (using SP_EL1). (page 913 in manual) */
|
||||
const uint32_t PSTATE_EL_MASK = 0b1111;
|
||||
vcpu->pstate &= ~PSTATE_EL_MASK;
|
||||
vcpu->pstate |= PSTATE_EL1H;
|
||||
|
||||
/* TODO(GloriousTacoo:arm): DO NOT IMPLEMENT UNTIL THE INSTRUCTION
|
||||
* DECODER IS FINISHED.
|
||||
*
|
||||
* Create an Exception Vector Table, determine
|
||||
* the address of the exception handler, then update the PC.
|
||||
*
|
||||
* vcpu->pc = vcpu->vbar_el1 + offset; */
|
||||
}
|
||||
|
||||
void cpuTest()
|
||||
{
|
||||
#if 0
|
||||
pound::host::memory::arena_t guest_memory_arena = pound::host::memory::arena_init(GUEST_RAM_SIZE);
|
||||
PVM_ASSERT(nullptr != guest_memory_arena.data);
|
||||
|
||||
memory::guest_memory_t* guest_ram = memory::guest_memory_create(&guest_memory_arena);
|
||||
|
||||
//(void)test_guest_ram_access(guest_ram);
|
||||
#endif
|
||||
}
|
||||
} // namespace pound::pvm
|
||||
230
src/pvm/pvm.h
230
src/pvm/pvm.h
|
|
@ -1,230 +0,0 @@
|
|||
// Copyright 2025 Pound Emulator Project. All rights reserved.
|
||||
|
||||
#pragma once
|
||||
#include "guest.h"
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
|
||||
namespace pound::pvm
|
||||
{
|
||||
/* AArch64 R0-R31 */
|
||||
#define GP_REGISTERS 32
|
||||
|
||||
#define CACHE_LINE_SIZE 64
|
||||
#define GUEST_RAM_SIZE 10240 // 10KiB
|
||||
#define CPU_CORES 8
|
||||
|
||||
/* Data Abort exception taken without a change in Exception level. */
|
||||
#define EC_DATA_ABORT 0b100101
|
||||
|
||||
/* Data Abort exception from a lower Exception level. */
|
||||
#define EC_DATA_ABORT_LOWER_EL 0b100100
|
||||
|
||||
/* Set the PSTATE exception level. (page 913 in manual) */
|
||||
#define PSTATE_EL0 0b0000
|
||||
#define PSTATE_EL1T 0b0100
|
||||
#define PSTATE_EL1H 0b0101
|
||||
|
||||
/*
|
||||
* pvm_vcpu_t - Holds the architectural and selected system-register state for an emulated vCPU.
|
||||
* @r: General-purpose registers X0–X31 (X31 as SP/ZR as appropriate).
|
||||
* @pc: Program Counter.
|
||||
* @cntfreq_el0: Counter Frequency.
|
||||
* @cntpct_el0: Physical Counter.
|
||||
* @cntvct_el0: Virtual Counter - CRITICAL for timing.
|
||||
* @cntv_cval_el0: Virtual Timer Compare Value.
|
||||
* @pmccntr_el0: Cycle Counter.
|
||||
* @tpidr_el0: Thread Pointer ID Register.
|
||||
* @tpidrro_el0: Thread Pointer ID, read-only.
|
||||
* @elr_el1: Exception Link Register.
|
||||
* @esr_el1: Exception Syndrome Register.
|
||||
* @far_el1: Fault Address Register.
|
||||
* @sctlr_el1: System Control Register.
|
||||
* @spsr_el1: Saved Program Status Register.
|
||||
* @tcr_el1: Translation Control Register.
|
||||
* @ttbr0_el1: Translation Table Base Register 0.
|
||||
* @ttbr1_el1: Translation Table Base Register 1.
|
||||
* @vbar_el1: Vector Base Address Register.
|
||||
* @ctr_el0: Cache-Type.
|
||||
* @cntv_ctl_el0: Virtual Timer Control.
|
||||
* @dczid_el0: Data Cache Zero ID.
|
||||
* @pmcr_el0: Performance Monitor Counter.
|
||||
* @pstate: Process State Register (NZCV, DAIF, EL, etc.).
|
||||
*
|
||||
* This structure is aligned to the L1 cache line size to prevent false sharing
|
||||
* when multiple host threads are emulating vCPUs on different physical cores.
|
||||
*/
|
||||
typedef struct alignas(CACHE_LINE_SIZE)
|
||||
{
|
||||
uint64_t r[GP_REGISTERS];
|
||||
uint64_t pc;
|
||||
uint64_t cntfreq_el0;
|
||||
uint64_t cntpct_el0;
|
||||
uint64_t cntvct_el0;
|
||||
uint64_t cntv_cval_el0;
|
||||
uint64_t pmccntr_el0;
|
||||
uint64_t tpidr_el0;
|
||||
uint64_t tpidrro_el0;
|
||||
|
||||
/*
|
||||
* Stores the Program Counter of the instruction that was interrupted.
|
||||
* For a synchronous fault, it's the address of the faulting instruction
|
||||
* itself.
|
||||
*/
|
||||
uint64_t elr_el1;
|
||||
|
||||
/*
|
||||
* Tells the guest OS why the exception happened. It contains a high
|
||||
* level Exception Class (EC) (eg, Data Abort) and a low level
|
||||
* Instruction Specific Syndrome (ISS) with fine-grained details.
|
||||
* (eg, it was an allignment fault cause by a write operation.)
|
||||
*/
|
||||
uint64_t esr_el1;
|
||||
|
||||
/* The memory address that caused a Data Abort exception. */
|
||||
uint64_t far_el1;
|
||||
|
||||
/* Bit [0] bit enables the MMU. */
|
||||
uint64_t sctlr_el1;
|
||||
|
||||
/*
|
||||
* A snapshot of the current PSTATE register before the exception.
|
||||
* This is for restoring the program's state when returning from an
|
||||
* exception.
|
||||
*/
|
||||
uint64_t spsr_el1;
|
||||
|
||||
/* Bits [5:0], T0SZ, specifies the size of the bottom half of the
|
||||
* virtual address space (the ones controlled by TTBR0).
|
||||
*
|
||||
* Bits [21:16], T1SZ, does the same for the top half of the virtual
|
||||
* address space (controlled by TTBR1). */
|
||||
uint64_t tcr_el1;
|
||||
|
||||
/*
|
||||
* Holds the 64-bit base physical address of the initial page table
|
||||
* used for translating virtual addresses in the lower half of the
|
||||
* virtual address space (typically userspace). The top bit of the VA
|
||||
* (bit 63) being 0 selects TTBR0 for the page table walk.
|
||||
*/
|
||||
uint64_t ttbr0_el1;
|
||||
|
||||
/*
|
||||
* Holds the 64-bit base physical address of the initial page table
|
||||
* used for translating virtual addresses in the upper half of the
|
||||
* virtual address space (typically kernel space). The top bit of the VA
|
||||
* (bit 63) being 1 selects TTBR1 for the page table walk.
|
||||
*/
|
||||
uint64_t ttbr1_el1;
|
||||
|
||||
/*
|
||||
* The base address in guest memory where the Exception Vector Table
|
||||
* can be found.
|
||||
*/
|
||||
uint64_t vbar_el1;
|
||||
|
||||
uint32_t ctr_el0;
|
||||
uint32_t cntv_ctl_el0;
|
||||
uint32_t dczid_el0;
|
||||
uint32_t pmcr_el0;
|
||||
uint32_t pstate;
|
||||
} pvm_vcpu_t;
|
||||
|
||||
/* This is here to break the circular dependency between pvm_t and pvm_ops_t. */
|
||||
typedef struct pvm_s pvm_t;
|
||||
|
||||
/*
|
||||
* struct pvm_ops_t - A table of machine-specific operations.
|
||||
* @init: Function pointer to initialize the target machine's state.
|
||||
* Called once by pvm_probe(). It is responsible for setting up
|
||||
* the guest memory map, loading firmware, and registering all
|
||||
* MMIO device handlers.
|
||||
* @destroy: Function pointer to clean up and deallocate all resources
|
||||
* associated with the machine instance. Called on VM shutdown.
|
||||
*
|
||||
* This structure acts as a "virtual table" in C, allowing the generic pvm
|
||||
* core to call target-specific code (e.g., for a Switch 1 or Switch 2)
|
||||
* without needing to know the implementation details. Each supported target
|
||||
* machine must provide a globally visible instance of this struct.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
/* Initialize the machine state */
|
||||
int8_t (*init)(pvm_t* pvm);
|
||||
|
||||
/* Clean up on shutdown */
|
||||
void (*destroy)(pvm_t* pvm);
|
||||
} pvm_ops_t;
|
||||
|
||||
/*
|
||||
* pvm_s - The main pvm instance structure.
|
||||
* @vcpu: The state of the emulated virtual CPU core. Contains all guest-
|
||||
* visible registers.
|
||||
* @memory: A structure representing the guest's physical RAM.
|
||||
* @ops: A table of function pointers to the machine-specific implementation
|
||||
* for this pvm instance. This should only be set by pvm_probe().
|
||||
*
|
||||
* This structure represents a single virtual machine instance.
|
||||
*/
|
||||
struct pvm_s
|
||||
{
|
||||
pound::pvm::pvm_vcpu_t vcpu;
|
||||
pound::pvm::memory::guest_memory_t memory;
|
||||
pvm_ops_t ops;
|
||||
};
|
||||
|
||||
/**
|
||||
* s1_ops - The machine-specific operations for the "Switch 1" target.
|
||||
*
|
||||
* This is the global instance of the operations table for the Switch 1.
|
||||
* It is defined in the target-specific source file
|
||||
* (targets/switch1/hardware/probe.cpp) and provides the iplementations
|
||||
* for initializing and running the emulated Switch 1 hardware.
|
||||
*/
|
||||
extern const pvm_ops_t s1_ops;
|
||||
|
||||
enum target_type
|
||||
{
|
||||
pvm_TARGET_SWITCH1 = 0,
|
||||
pvm_TARGET_SWITCH2 = 1,
|
||||
};
|
||||
|
||||
/*
|
||||
* pvm_probe - Probes for and initializes a target machine configuration.
|
||||
* @pvm: A pointer to the pvm instance to be initialized. This function will
|
||||
* populate the fields of this struct.
|
||||
* @type: The type of target machine to initialize.
|
||||
*
|
||||
* This function is the primary factory for creating a virtual machine. It
|
||||
* looks up the requested machine @type, attaches the corresponding operations
|
||||
* table (e.g., s1_ops) to the @pvm instance, and calls the machine-specific
|
||||
* init() function.
|
||||
*
|
||||
* On successful return, the @pvm struct will be fully configured and ready
|
||||
* for execution.
|
||||
*
|
||||
* Return: 0 on success, or a negative errno code on failure.
|
||||
*/
|
||||
uint8_t pvm_probe(pvm_t* pvm, enum target_type type);
|
||||
|
||||
/*
|
||||
* take_synchronous_exception() - Emulates the hardware process of taking a synchronous exception to EL1.
|
||||
*
|
||||
* @vcpu: A pointer to the vCPU state to be modified.
|
||||
* @exception_class: The high-level Exception Class (EC) code for ESR_EL1.
|
||||
* @iss: The low-level Instruction Specific Syndrome (ISS) code for ESR_EL1.
|
||||
* @faulting_address: The faulting address, to be written to FAR_EL1. Only valid for Data/Instruction Aborts. Pass 0 for other exception types.
|
||||
*
|
||||
* This function modifies the vCPU state according to the rules for taking a
|
||||
* synchronous exception from a lower or same exception level that is targeting EL1.
|
||||
* It saves the necessary return state, populates the syndrome registers,
|
||||
* updates the processor state for entry into EL1, and calculates the new
|
||||
* program counter by branching to the appropriate offset in the EL1 vector table.
|
||||
*
|
||||
*/
|
||||
void take_synchronous_exception(pvm_vcpu_t* vcpu, uint8_t exception_class, uint32_t iss, uint64_t faulting_address);
|
||||
|
||||
void cpuTest();
|
||||
} // namespace pound::pvm
|
||||
Loading…
Add table
Add a link
Reference in a new issue