waterboxhost refactor

Set up a second mirror of guest memory; easily accomplished because we were already using memfd_create / CreateFileMappingW.
This lets us simplify a lot of host code that has to access guest memory that may not be active right now, or might have been mprotect()ed to something weird.  Activate is only needed now to run guest code, or when the C# side wants to peer into guest memory for memory domains and such (waterboxhost does not share the mirror address with the C# side).
This commit is contained in:
nattthebear 2020-07-13 17:46:06 -04:00
parent 40e19e64fe
commit 175556529e
13 changed files with 472 additions and 497 deletions

Binary file not shown.

Binary file not shown.

View File

@ -71,7 +71,6 @@ namespace BizHawk.Emulation.Cores.Waterbox
public unsafe class WaterboxHost : IMonitor, IImportResolver, IBinaryStateable, IDisposable, ICallbackAdjuster
{
private IntPtr _nativeHost;
private IntPtr _activatedNativeHost;
private int _enterCount;
private object _keepAliveDelegate;
@ -132,12 +131,9 @@ namespace BizHawk.Emulation.Cores.Waterbox
public IntPtr GetProcAddrOrZero(string entryPoint)
{
using (this.EnterExit())
{
var retobj = new ReturnData();
NativeImpl.wbx_get_proc_addr_raw(_activatedNativeHost, entryPoint, retobj);
return retobj.GetDataOrThrow();
}
var retobj = new ReturnData();
NativeImpl.wbx_get_proc_addr_raw(_nativeHost, entryPoint, retobj);
return retobj.GetDataOrThrow();
}
public IntPtr GetProcAddrOrThrow(string entryPoint)
@ -155,32 +151,23 @@ namespace BizHawk.Emulation.Cores.Waterbox
public IntPtr GetCallbackProcAddr(IntPtr exitPoint, int slot)
{
using (this.EnterExit())
{
var retobj = new ReturnData();
NativeImpl.wbx_get_callback_addr(_activatedNativeHost, exitPoint, slot, retobj);
return retobj.GetDataOrThrow();
}
var retobj = new ReturnData();
NativeImpl.wbx_get_callback_addr(_nativeHost, exitPoint, slot, retobj);
return retobj.GetDataOrThrow();
}
public IntPtr GetCallinProcAddr(IntPtr entryPoint)
{
using (this.EnterExit())
{
var retobj = new ReturnData();
NativeImpl.wbx_get_callin_addr(_activatedNativeHost, entryPoint, retobj);
return retobj.GetDataOrThrow();
}
var retobj = new ReturnData();
NativeImpl.wbx_get_callin_addr(_nativeHost, entryPoint, retobj);
return retobj.GetDataOrThrow();
}
public void Seal()
{
using (this.EnterExit())
{
var retobj = new ReturnData();
NativeImpl.wbx_seal(_activatedNativeHost, retobj);
retobj.GetDataOrThrow();
}
var retobj = new ReturnData();
NativeImpl.wbx_seal(_nativeHost, retobj);
retobj.GetDataOrThrow();
Console.WriteLine("WaterboxHost Sealed!");
}
@ -191,12 +178,9 @@ namespace BizHawk.Emulation.Cores.Waterbox
/// <param name="name">the filename that the unmanaged core will access the file by</param>
public void AddReadonlyFile(byte[] data, string name)
{
using (this.EnterExit())
{
var retobj = new ReturnData();
NativeImpl.wbx_mount_file(_activatedNativeHost, name, Reader(new MemoryStream(data, false)), IntPtr.Zero, false, retobj);
retobj.GetDataOrThrow();
}
var retobj = new ReturnData();
NativeImpl.wbx_mount_file(_nativeHost, name, Reader(new MemoryStream(data, false)), IntPtr.Zero, false, retobj);
retobj.GetDataOrThrow();
}
/// <summary>
@ -205,12 +189,9 @@ namespace BizHawk.Emulation.Cores.Waterbox
/// </summary>
public void RemoveReadonlyFile(string name)
{
using (this.EnterExit())
{
var retobj = new ReturnData();
NativeImpl.wbx_unmount_file(_activatedNativeHost, name, null, IntPtr.Zero, retobj);
retobj.GetDataOrThrow();
}
var retobj = new ReturnData();
NativeImpl.wbx_unmount_file(_nativeHost, name, null, IntPtr.Zero, retobj);
retobj.GetDataOrThrow();
}
/// <summary>
@ -219,12 +200,9 @@ namespace BizHawk.Emulation.Cores.Waterbox
/// </summary>
public void AddTransientFile(byte[] data, string name)
{
using (this.EnterExit())
{
var retobj = new ReturnData();
NativeImpl.wbx_mount_file(_activatedNativeHost, name, Reader(new MemoryStream(data, false)), IntPtr.Zero, true, retobj);
retobj.GetDataOrThrow();
}
var retobj = new ReturnData();
NativeImpl.wbx_mount_file(_nativeHost, name, Reader(new MemoryStream(data, false)), IntPtr.Zero, true, retobj);
retobj.GetDataOrThrow();
}
/// <summary>
@ -233,14 +211,11 @@ namespace BizHawk.Emulation.Cores.Waterbox
/// <returns>The state of the file when it was removed</returns>
public byte[] RemoveTransientFile(string name)
{
using (this.EnterExit())
{
var retobj = new ReturnData();
var ms = new MemoryStream();
NativeImpl.wbx_unmount_file(_activatedNativeHost, name, Writer(ms), IntPtr.Zero, retobj);
NativeImpl.wbx_unmount_file(_nativeHost, name, Writer(ms), IntPtr.Zero, retobj);
retobj.GetDataOrThrow();
return ms.ToArray();
}
}
// public class MissingFileResult
@ -284,22 +259,15 @@ namespace BizHawk.Emulation.Cores.Waterbox
public void SaveStateBinary(BinaryWriter bw)
{
using (this.EnterExit())
{
var retobj = new ReturnData();
NativeImpl.wbx_save_state(_activatedNativeHost, Writer(bw.BaseStream), IntPtr.Zero, retobj);
retobj.GetDataOrThrow();
}
var retobj = new ReturnData();
NativeImpl.wbx_save_state(_nativeHost, Writer(bw.BaseStream), IntPtr.Zero, retobj);
retobj.GetDataOrThrow();
}
public void LoadStateBinary(BinaryReader br)
{
using (this.EnterExit())
{
var retobj = new ReturnData();
NativeImpl.wbx_load_state(_activatedNativeHost, Reader(br.BaseStream), IntPtr.Zero, retobj);
retobj.GetDataOrThrow();
}
{ var retobj = new ReturnData();
NativeImpl.wbx_load_state(_nativeHost, Reader(br.BaseStream), IntPtr.Zero, retobj);
retobj.GetDataOrThrow();
}
public void Enter()
@ -308,7 +276,7 @@ namespace BizHawk.Emulation.Cores.Waterbox
{
var retobj = new ReturnData();
NativeImpl.wbx_activate_host(_nativeHost, retobj);
_activatedNativeHost = retobj.GetDataOrThrow();
retobj.GetDataOrThrow();
}
_enterCount++;
}
@ -322,9 +290,8 @@ namespace BizHawk.Emulation.Cores.Waterbox
else if (_enterCount == 1)
{
var retobj = new ReturnData();
NativeImpl.wbx_deactivate_host(_activatedNativeHost, retobj);
NativeImpl.wbx_deactivate_host(_nativeHost, retobj);
retobj.GetDataOrThrow();
_activatedNativeHost = IntPtr.Zero;
}
_enterCount--;
}
@ -334,11 +301,10 @@ namespace BizHawk.Emulation.Cores.Waterbox
if (_nativeHost != IntPtr.Zero)
{
var retobj = new ReturnData();
if (_activatedNativeHost != IntPtr.Zero)
if (_enterCount != 0)
{
NativeImpl.wbx_deactivate_host(_activatedNativeHost, retobj);
NativeImpl.wbx_deactivate_host(_nativeHost, retobj);
Console.Error.WriteLine("Warn: Disposed of WaterboxHost which was active");
_activatedNativeHost = IntPtr.Zero;
}
NativeImpl.wbx_destroy_host(_nativeHost, retobj);
_enterCount = 0;

View File

@ -127,7 +127,7 @@ namespace BizHawk.Emulation.Cores.Waterbox
public abstract void wbx_create_host(MemoryLayoutTemplate layout, string moduleName, ReadCallback wbx, IntPtr userdata, ReturnData /*WaterboxHost*/ ret);
/// <summary>
/// Tear down a host environment. May not be called while the environment is active.
/// Tear down a host environment. If called while the environment is active, will deactivate it first.
/// </summary>
[BizImport(CallingConvention.Cdecl)]
public abstract void wbx_destroy_host(IntPtr /*WaterboxHost*/ obj, ReturnData /*void*/ ret);
@ -136,23 +136,24 @@ namespace BizHawk.Emulation.Cores.Waterbox
/// Activate a host environment. This swaps it into memory and makes it available for use.
/// Pointers to inside the environment are only valid while active. Uses a mutex internally
/// so as to not stomp over other host environments in the same 4GiB slice.
/// Returns a pointer to the activated object, used to do most other functions.
/// Ignored if host is already active.
/// </summary>
[BizImport(CallingConvention.Cdecl)]
public abstract void wbx_activate_host(IntPtr /*WaterboxHost*/ obj, ReturnData /*ActivatedWaterboxHost*/ ret);
public abstract void wbx_activate_host(IntPtr /*WaterboxHost*/ obj, ReturnData /*void*/ ret);
/// <summary>
/// Deactivates a host environment, and releases the mutex.
/// Ignored if host is not active
/// </summary>
[BizImport(CallingConvention.Cdecl)]
public abstract void wbx_deactivate_host(IntPtr /*ActivatedWaterboxHost*/ obj, ReturnData /*void*/ ret);
public abstract void wbx_deactivate_host(IntPtr /*WaterboxHost*/ obj, ReturnData /*void*/ ret);
/// <summary>
/// Returns a thunk suitable for calling an exported function from the guest executable. This pointer is only valid
/// while the host is active. A missing proc is not an error and simply returns 0. The guest function must be,
/// and the returned callback will be, sysv abi, and will only pass up to 6 int/ptr args and no other arg types.
/// </summary>
[BizImport(CallingConvention.Cdecl)]
public abstract void wbx_get_proc_addr(IntPtr /*ActivatedWaterboxHost*/ obj, string name, ReturnData /*UIntPtr*/ ret);
public abstract void wbx_get_proc_addr(IntPtr /*WaterboxHost*/ obj, string name, ReturnData /*UIntPtr*/ ret);
/// <summary>
/// Returns a thunk suitable for calling an arbitrary entry point into the guest executable. This pointer is only valid
/// while the host is active. wbx_get_proc_addr already calls this internally on pointers it returns, so this call is
@ -160,14 +161,14 @@ namespace BizHawk.Emulation.Cores.Waterbox
/// a pointer to another function).
/// </summary>
[BizImport(CallingConvention.Cdecl)]
public abstract void wbx_get_callin_addr(IntPtr /*ActivatedWaterboxHost*/ obj, IntPtr ptr, ReturnData /*UIntPtr*/ ret);
public abstract void wbx_get_callin_addr(IntPtr /*WaterboxHost*/ obj, IntPtr ptr, ReturnData /*UIntPtr*/ ret);
/// <summary>
/// Returns the raw address of a function exported from the guest. `wbx_get_proc_addr()` is equivalent to
/// `wbx_get_callin_addr(wbx_get_proc_addr_raw()). Most things should not use this directly, as the returned
/// pointer will not have proper stack hygiene and will crash on syscalls from the guest.
/// </summary>
[BizImport(CallingConvention.Cdecl)]
public abstract void wbx_get_proc_addr_raw(IntPtr /*ActivatedWaterboxHost*/ obj, string name, ReturnData /*UIntPtr*/ ret);
public abstract void wbx_get_proc_addr_raw(IntPtr /*WaterboxHost*/ obj, string name, ReturnData /*UIntPtr*/ ret);
/// <summary>
/// Returns a function pointer suitable for passing to the guest to allow it to call back while active.
/// Slot number is an integer that is used to keep pointers consistent across runs: If the host is loaded
@ -176,12 +177,12 @@ namespace BizHawk.Emulation.Cores.Waterbox
/// The returned thunk will be, and the callback must be, sysv abi and will only pass up to 6 int/ptr args and no other arg types.
/// </summary>
[BizImport(CallingConvention.Cdecl)]
public abstract void wbx_get_callback_addr(IntPtr /*ActivatedWaterboxHost*/ obj, IntPtr callback, int slot, ReturnData /*UIntPtr*/ ret);
public abstract void wbx_get_callback_addr(IntPtr /*WaterboxHost*/ obj, IntPtr callback, int slot, ReturnData /*UIntPtr*/ ret);
/// <summary>
/// Calls the seal operation, which is a one time action that prepares the host to save states.
/// </summary>
[BizImport(CallingConvention.Cdecl)]
public abstract void wbx_seal(IntPtr /*ActivatedWaterboxHost*/ obj, ReturnData /*void*/ ret);
public abstract void wbx_seal(IntPtr /*WaterboxHost*/ obj, ReturnData /*void*/ ret);
/// <summary>
/// Mounts a file in the environment. All data will be immediately consumed from the reader, which will not be used after this call.
@ -190,14 +191,14 @@ namespace BizHawk.Emulation.Cores.Waterbox
/// but it must exist in every savestate and the exact sequence of add_file calls must be consistent from savestate to savestate.
/// </summary>
[BizImport(CallingConvention.Cdecl)]
public abstract void wbx_mount_file(IntPtr /*ActivatedWaterboxHost*/ obj, string name, ReadCallback reader, IntPtr userdata, bool writable, ReturnData /*void*/ ret);
public abstract void wbx_mount_file(IntPtr /*WaterboxHost*/ obj, string name, ReadCallback reader, IntPtr userdata, bool writable, ReturnData /*void*/ ret);
/// <summary>
/// Remove a file previously added. Writer is optional; if provided, the contents of the file at time of removal will be dumped to it.
/// It is an error to remove a file which is currently open in the guest.
/// </summary>
[BizImport(CallingConvention.Cdecl)]
public abstract void wbx_unmount_file(IntPtr /*ActivatedWaterboxHost*/ obj, string name, WriteCallback writer, IntPtr userdata, ReturnData /*void*/ ret);
public abstract void wbx_unmount_file(IntPtr /*WaterboxHost*/ obj, string name, WriteCallback writer, IntPtr userdata, ReturnData /*void*/ ret);
#if false
/// <summary>
@ -208,7 +209,7 @@ namespace BizHawk.Emulation.Cores.Waterbox
/// as wbx_mount_file(). You may free resources associated with the MissingFileResult whenever control next returns to your code.
/// </summary>
[BizImport(CallingConvention.Cdecl)]
public abstract void wbx_set_missing_file_callback(IntPtr /*ActivatedWaterboxHost*/ obj, MissingFileCallback mfc_o);
public abstract void wbx_set_missing_file_callback(IntPtr /*WaterboxHost*/ obj, MissingFileCallback mfc_o);
#endif
/// <summary>
@ -216,7 +217,7 @@ namespace BizHawk.Emulation.Cores.Waterbox
/// Must always be called with the same sequence and contents of readonly files.
/// </summary>
[BizImport(CallingConvention.Cdecl)]
public abstract void wbx_save_state(IntPtr /*ActivatedWaterboxHost*/ obj, WriteCallback writer, IntPtr userdata, ReturnData /*void*/ ret);
public abstract void wbx_save_state(IntPtr /*WaterboxHost*/ obj, WriteCallback writer, IntPtr userdata, ReturnData /*void*/ ret);
/// <summary>
/// Load state. Must not be called before seal. Must not be called with any writable files mounted.
@ -225,7 +226,7 @@ namespace BizHawk.Emulation.Cores.Waterbox
/// Errors generally poison the environment; sorry!
/// </summary>
[BizImport(CallingConvention.Cdecl)]
public abstract void wbx_load_state(IntPtr /*ActivatedWaterboxHost*/ obj, ReadCallback reader, IntPtr userdata, ReturnData /*void*/ ret);
public abstract void wbx_load_state(IntPtr /*WaterboxHost*/ obj, ReadCallback reader, IntPtr userdata, ReturnData /*void*/ ret);
/// <summary>
/// Control whether the host automatically evicts blocks from memory when they are not active. For the best performance,

View File

@ -67,18 +67,6 @@ dependencies = [
"version_check",
]
[[package]]
name = "getset"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24b328c01a4d71d2d8173daa93562a73ab0fe85616876f02500f53d82948c504"
dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "goblin"
version = "0.2.3"
@ -175,32 +163,6 @@ version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6"
[[package]]
name = "proc-macro-error"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "98e9e4b82e0ef281812565ea4751049f1bdcdfccda7d3f459f2e138a40c08678"
dependencies = [
"proc-macro-error-attr",
"proc-macro2",
"quote",
"syn",
"version_check",
]
[[package]]
name = "proc-macro-error-attr"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4f5444ead4e9935abd7f27dc51f7e852a0569ac888096d5ec2499470794e2e53"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn-mid",
"version_check",
]
[[package]]
name = "proc-macro2"
version = "1.0.18"
@ -281,17 +243,6 @@ dependencies = [
"unicode-xid",
]
[[package]]
name = "syn-mid"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "typenum"
version = "1.12.0"
@ -316,7 +267,6 @@ version = "0.1.0"
dependencies = [
"anyhow",
"bitflags",
"getset",
"goblin",
"itertools",
"lazy_static",

View File

@ -16,7 +16,7 @@ lto = true
bitflags = "1.2.1"
page_size = "0.4.2"
lazy_static = "1.4.0"
getset = "0.1.1"
#getset = "0.1.1"
parking_lot = "0.10.2"
itertools = "0.9.0"
goblin = { version = "0.2.3", features = ["elf64", "std"] }

View File

@ -1,5 +1,5 @@
use crate::*;
use host::{ActivatedWaterboxHost, WaterboxHost};
use host::{WaterboxHost};
use std::{os::raw::c_char, io, ffi::{/*CString, */CStr}};
use context::ExternalCallback;
@ -163,14 +163,11 @@ pub extern fn wbx_create_host(layout: &MemoryLayoutTemplate, module_name: *const
ret.put(res.map(|boxed| Box::into_raw(boxed)));
}
/// Tear down a host environment. May not be called while the environment is active.
/// Tear down a host environment. If called while the environment is active, will deactivate it first.
#[no_mangle]
pub extern fn wbx_destroy_host(obj: *mut WaterboxHost, ret: &mut Return<()>) {
let res = (|| {
unsafe {
if (*obj).active() {
return Err(anyhow!("WaterboxHost is still active!"))
}
Box::from_raw(obj);
Ok(())
}
@ -179,26 +176,23 @@ pub extern fn wbx_destroy_host(obj: *mut WaterboxHost, ret: &mut Return<()>) {
}
/// Activate a host environment. This swaps it into memory and makes it available for use.
/// Pointers to inside the environment are only valid while active. Uses a mutex internally
/// so as to not stomp over other host environments in the same 4GiB slice.
/// Returns a pointer to the activated object, used to do most other functions.
/// Pointers to inside the environment are only valid while active. Callbacks into the environment can only be called
/// while active. Uses a mutex internally so as to not stomp over other host environments in the same 4GiB slice.
/// Ignored if host is already active.
#[no_mangle]
pub extern fn wbx_activate_host(obj: *mut WaterboxHost, ret: &mut Return<*mut ActivatedWaterboxHost>) {
pub extern fn wbx_activate_host(obj: &mut WaterboxHost, ret: &mut Return<()>) {
let res = (|| {
unsafe {
if (*obj).active() {
return Err(anyhow!("WaterboxHost is already active!"))
}
Ok((&mut (*obj)).activate())
}
obj.activate();
Ok(())
})();
ret.put(res.map(|boxed| Box::into_raw(boxed)));
ret.put(res);
}
/// Deactivates a host environment, and releases the mutex.
/// Ignored if host is not active
#[no_mangle]
pub extern fn wbx_deactivate_host(obj: *mut ActivatedWaterboxHost, ret: &mut Return<()>) {
unsafe { Box::from_raw(obj); }
pub extern fn wbx_deactivate_host(obj: &mut WaterboxHost, ret: &mut Return<()>) {
obj.deactivate();
ret.put(Ok(()));
}
@ -206,7 +200,7 @@ pub extern fn wbx_deactivate_host(obj: *mut ActivatedWaterboxHost, ret: &mut Ret
/// while the host is active. A missing proc is not an error and simply returns 0. The guest function must be,
/// and the returned callback will be, sysv abi, and will only pass up to 6 int/ptr args and no other arg types.
#[no_mangle]
pub extern fn wbx_get_proc_addr(obj: &mut ActivatedWaterboxHost, name: *const c_char, ret: &mut Return<usize>) {
pub extern fn wbx_get_proc_addr(obj: &mut WaterboxHost, name: *const c_char, ret: &mut Return<usize>) {
match arg_to_str(name) {
Ok(s) => {
ret.put(obj.get_proc_addr(&s));
@ -221,14 +215,14 @@ pub extern fn wbx_get_proc_addr(obj: &mut ActivatedWaterboxHost, name: *const c_
/// only needed if the guest exposes callin pointers that aren't named exports (for instance, if a function returns
/// a pointer to another function).
#[no_mangle]
pub extern fn wbx_get_callin_addr(obj: &mut ActivatedWaterboxHost, ptr: usize, ret: &mut Return<usize>) {
pub extern fn wbx_get_callin_addr(obj: &mut WaterboxHost, ptr: usize, ret: &mut Return<usize>) {
ret.put(obj.get_external_callin_ptr(ptr));
}
/// Returns the raw address of a function exported from the guest. `wbx_get_proc_addr()` is equivalent to
/// `wbx_get_callin_addr(wbx_get_proc_addr_raw()). Most things should not use this directly, as the returned
/// pointer will not have proper stack hygiene and will crash on syscalls from the guest.
#[no_mangle]
pub extern fn wbx_get_proc_addr_raw(obj: &mut ActivatedWaterboxHost, name: *const c_char, ret: &mut Return<usize>) {
pub extern fn wbx_get_proc_addr_raw(obj: &mut WaterboxHost, name: *const c_char, ret: &mut Return<usize>) {
match arg_to_str(name) {
Ok(s) => {
ret.put(obj.get_proc_addr_raw(&s));
@ -245,13 +239,13 @@ pub extern fn wbx_get_proc_addr_raw(obj: &mut ActivatedWaterboxHost, name: *cons
/// in the guest because `foo` was bound to the same slot and a particular slot gives a consistent pointer.
/// The returned thunk will be, and the callback must be, sysv abi and will only pass up to 6 int/ptr args and no other arg types.
#[no_mangle]
pub extern fn wbx_get_callback_addr(obj: &mut ActivatedWaterboxHost, callback: ExternalCallback, slot: usize, ret: &mut Return<usize>) {
pub extern fn wbx_get_callback_addr(obj: &mut WaterboxHost, callback: ExternalCallback, slot: usize, ret: &mut Return<usize>) {
ret.put(obj.get_external_callback_ptr(callback, slot));
}
/// Calls the seal operation, which is a one time action that prepares the host to save states.
#[no_mangle]
pub extern fn wbx_seal(obj: &mut ActivatedWaterboxHost, ret: &mut Return<()>) {
pub extern fn wbx_seal(obj: &mut WaterboxHost, ret: &mut Return<()>) {
ret.put(obj.seal());
}
@ -260,7 +254,7 @@ pub extern fn wbx_seal(obj: &mut ActivatedWaterboxHost, ret: &mut Return<()>) {
/// when save_state is called, and can only be used for transient operations. If a file is readable, it can appear in savestates,
/// but it must exist in every savestate and the exact sequence of add_file calls must be consistent from savestate to savestate.
#[no_mangle]
pub extern fn wbx_mount_file(obj: &mut ActivatedWaterboxHost, name: *const c_char, callback: ReadCallback, userdata: usize, writable: bool, ret: &mut Return<()>) {
pub extern fn wbx_mount_file(obj: &mut WaterboxHost, name: *const c_char, callback: ReadCallback, userdata: usize, writable: bool, ret: &mut Return<()>) {
let mut reader = CReader {
userdata,
callback
@ -275,7 +269,7 @@ pub extern fn wbx_mount_file(obj: &mut ActivatedWaterboxHost, name: *const c_cha
/// Remove a file previously added. Writer is optional; if provided, the contents of the file at time of removal will be dumped to it.
/// It is an error to remove a file which is currently open in the guest.
#[no_mangle]
pub extern fn wbx_unmount_file(obj: &mut ActivatedWaterboxHost, name: *const c_char, callback_opt: Option<WriteCallback>, userdata: usize, ret: &mut Return<()>) {
pub extern fn wbx_unmount_file(obj: &mut WaterboxHost, name: *const c_char, callback_opt: Option<WriteCallback>, userdata: usize, ret: &mut Return<()>) {
let res: anyhow::Result<()> = (|| {
let data = obj.unmount_file(&arg_to_str(name)?)?;
if let Some(callback) = callback_opt {
@ -296,7 +290,7 @@ pub extern fn wbx_unmount_file(obj: &mut ActivatedWaterboxHost, name: *const c_c
/// in the callback. If the MissingFileResult is provided, it will be consumed immediately and will have the same effect
/// as wbx_mount_file(). You may free resources associated with the MissingFileResult whenever control next returns to your code.
// #[no_mangle]
// pub extern fn wbx_set_missing_file_callback(obj: &mut ActivatedWaterboxHost, mfc_o: Option<&MissingFileCallback>) {
// pub extern fn wbx_set_missing_file_callback(obj: &mut WaterboxHost, mfc_o: Option<&MissingFileCallback>) {
// match mfc_o {
// None => obj.set_missing_file_callback(None),
// Some(mfc) => {
@ -326,7 +320,7 @@ pub extern fn wbx_unmount_file(obj: &mut ActivatedWaterboxHost, name: *const c_c
/// Save state. Must not be called before seal. Must not be called with any writable files mounted.
/// Must always be called with the same sequence and contents of readonly files.
#[no_mangle]
pub extern fn wbx_save_state(obj: &mut ActivatedWaterboxHost, callback: WriteCallback, userdata: usize, ret: &mut Return<()>) {
pub extern fn wbx_save_state(obj: &mut WaterboxHost, callback: WriteCallback, userdata: usize, ret: &mut Return<()>) {
let mut writer = CWriter {
userdata,
callback
@ -342,7 +336,7 @@ pub extern fn wbx_save_state(obj: &mut ActivatedWaterboxHost, callback: WriteCal
/// Must be called with the same wbx executable and memory layout as in the savestate.
/// Errors generally poison the environment; sorry!
#[no_mangle]
pub extern fn wbx_load_state(obj: &mut ActivatedWaterboxHost, callback: ReadCallback, userdata: usize, ret: &mut Return<()>) {
pub extern fn wbx_load_state(obj: &mut WaterboxHost, callback: ReadCallback, userdata: usize, ret: &mut Return<()>) {
let mut reader = CReader {
userdata,
callback

View File

@ -1,7 +1,7 @@
use lazy_static::lazy_static;
use crate::*;
use memory_block::{Protection, pal};
use host::{ActivatedWaterboxHost};
use host::{WaterboxHost};
use syscall_defs::SyscallNumber;
pub mod thunks;
@ -57,7 +57,7 @@ pub type ExternalCallback = extern "sysv64" fn(
a1: usize, a2: usize, a3: usize, a4: usize, a5: usize, a6: usize) -> usize;
/// Allowed type of the syscall service function
pub type SyscallCallback = extern "sysv64" fn(
a1: usize, a2: usize, a3: usize, a4: usize, a5: usize, a6: usize, nr: SyscallNumber, h: &mut ActivatedWaterboxHost) -> SyscallReturn;
a1: usize, a2: usize, a3: usize, a4: usize, a5: usize, a6: usize, nr: SyscallNumber, h: &mut WaterboxHost) -> SyscallReturn;
/// Structure used to track information for calls into waterbox code
/// Layout must be synced with interop.s

View File

@ -1,9 +1,9 @@
use goblin;
use goblin::{elf::Elf, elf64::{sym::*, section_header::*}};
use crate::*;
use crate::memory_block::ActivatedMemoryBlock;
use crate::memory_block::Protection;
use std::collections::HashMap;
use memory_block::MemoryBlock;
/// Special system import area
const INFO_OBJECT_NAME: &str = "__wbxsysinfo";
@ -45,7 +45,7 @@ impl ElfLoader {
pub fn new(wbx: &Elf, data: &[u8],
module_name: &str,
layout: &WbxSysLayout,
b: &mut ActivatedMemoryBlock
b: &mut MemoryBlock
) -> anyhow::Result<ElfLoader> {
println!("Mouting `{}` @{:x}", module_name, layout.elf.start);
println!(" Sections:");
@ -147,11 +147,7 @@ impl ElfLoader {
);
// TODO: Using no_replace false here because the linker puts eh_frame_hdr in a separate segment that overlaps the other RO segment???
b.mmap_fixed(prot_addr, Protection::RW, false)?;
unsafe {
let src = &data[segment.file_range()];
let dst = AddressRange { start: addr.start, size: segment.file_range().end - segment.file_range().start }.slice_mut();
dst.copy_from_slice(src);
}
b.copy_from_external(&data[segment.file_range()], addr.start)?;
b.mprotect(prot_addr, prot)?;
}
@ -160,7 +156,10 @@ impl ElfLoader {
if i.size != std::mem::size_of::<WbxSysLayout>() {
return Err(anyhow!("Symbol {} is the wrong size", INFO_OBJECT_NAME))
}
unsafe { *(i.start as *mut WbxSysLayout) = *layout; }
unsafe {
let src = std::slice::from_raw_parts(layout as *const WbxSysLayout as *const u8, i.size);
b.copy_from_external(src, i.start)?;
}
},
// info area can legally be missing if the core calls no emulibc functions
// None => return Err(anyhow!("Symbol {} is missing", INFO_OBJECT_NAME))
@ -179,7 +178,7 @@ impl ElfLoader {
hash: bin::hash(data)
})
}
pub fn seal(&mut self, b: &mut ActivatedMemoryBlock) {
pub fn seal(&mut self, b: &mut MemoryBlock) {
for section in self.sections.iter() {
if section_name_is_readonly(section.name.as_str()) {
b.mprotect(section.addr.align_expand(), Protection::R).unwrap();

View File

@ -1,5 +1,5 @@
use crate::*;
use crate::{memory_block::ActivatedMemoryBlock, syscall_defs::*};
use crate::{syscall_defs::*};
use memory_block::{MemoryBlock, Protection};
use std::{os::raw::c_char, ffi::CStr};
use fs::{FileDescriptor, FileSystem/*, MissingFileCallback*/};
@ -27,10 +27,9 @@ impl WaterboxHost {
let elf_addr = ElfLoader::elf_addr(&wbx);
let layout = layout_template.make_layout(elf_addr)?;
let mut memory_block = MemoryBlock::new(layout.all());
let mut b = memory_block.enter();
let elf = ElfLoader::new(&wbx, &image_file[..], module_name, &layout, &mut b)?;
let elf = ElfLoader::new(&wbx, &image_file[..], module_name, &layout, &mut memory_block)?;
let fs = FileSystem::new();
drop(b);
unsafe { gdb::register(&image_file[..]) }
let mut res = Box::new(WaterboxHost {
fs,
@ -44,11 +43,10 @@ impl WaterboxHost {
context: Context::new(layout.main_thread.end(), syscall),
thunks,
});
let mut active = res.activate();
res.activate();
println!("Calling _start()");
active.run_guest_simple(active.h.elf.entry_point());
drop(active);
res.run_guest_simple(res.elf.entry_point());
res.deactivate();
Ok(res)
}
@ -57,74 +55,72 @@ impl WaterboxHost {
self.active
}
pub fn activate(&mut self) -> Box<ActivatedWaterboxHost> {
context::prepare_thread();
let h = unsafe { &mut *(self as *mut WaterboxHost) };
let b = self.memory_block.enter();
let mut res = Box::new(ActivatedWaterboxHost {
h,
b,
});
res.h.active = true;
res.h.context.host_ptr = res.as_mut() as *mut ActivatedWaterboxHost as usize;
res
pub fn activate(&mut self) {
if !self.active {
context::prepare_thread();
self.context.host_ptr = self as *mut WaterboxHost as usize;
self.memory_block.activate();
self.active = true;
}
}
pub fn deactivate(&mut self) {
if self.active {
self.context.host_ptr = 0;
self.memory_block.deactivate();
self.active = false;
}
}
}
impl Drop for WaterboxHost {
fn drop(&mut self) {
self.deactivate();
unsafe { gdb::deregister(&self.image_file[..]) }
}
}
pub struct ActivatedWaterboxHost<'a> {
h: &'a mut WaterboxHost,
b: ActivatedMemoryBlock<'a>,
}
impl<'a> Drop for ActivatedWaterboxHost<'a> {
fn drop(&mut self) {
self.h.active = false;
self.h.context.host_ptr = 0;
}
}
impl<'a> ActivatedWaterboxHost<'a> {
impl WaterboxHost {
pub fn get_external_callback_ptr(&mut self, callback: ExternalCallback, slot: usize) -> anyhow::Result<usize> {
if slot >= CALLBACK_SLOTS {
Err(anyhow!("slot must be less than {}", CALLBACK_SLOTS))
} else {
self.h.context.extcall_slots[slot] = Some(callback);
self.context.extcall_slots[slot] = Some(callback);
Ok(context::get_callback_ptr(slot))
}
}
pub fn get_external_callin_ptr(&mut self, ptr: usize) -> anyhow::Result<usize> {
self.h.thunks.get_thunk_for_proc(ptr, &mut self.h.context as *mut Context)
self.thunks.get_thunk_for_proc(ptr, &mut self.context as *mut Context)
}
pub fn get_proc_addr(&mut self, name: &str) -> anyhow::Result<usize> {
let ptr = self.h.elf.get_proc_addr(name);
let ptr = self.elf.get_proc_addr(name);
if ptr == 0 {
Ok(0)
} else {
self.h.thunks.get_thunk_for_proc(ptr, &mut self.h.context as *mut Context)
self.thunks.get_thunk_for_proc(ptr, &mut self.context as *mut Context)
}
}
pub fn get_proc_addr_raw(&mut self, name: &str) -> anyhow::Result<usize> {
let ptr = self.h.elf.get_proc_addr(name);
let ptr = self.elf.get_proc_addr(name);
Ok(ptr)
}
fn check_sealed(&self) -> anyhow::Result<()> {
if !self.h.sealed {
if !self.sealed {
Err(anyhow!("Not sealed!"))
} else {
Ok(())
}
}
pub fn seal(&mut self) -> anyhow::Result<()> {
if self.h.sealed {
if self.sealed {
return Err(anyhow!("Already sealed!"))
}
fn run_proc(h: &mut ActivatedWaterboxHost, name: &str) {
match h.h.elf.get_proc_addr(name) {
let was_active = self.active;
self.activate();
fn run_proc(h: &mut WaterboxHost, name: &str) {
match h.elf.get_proc_addr(name) {
0 => (),
ptr => {
println!("Calling {}()", name);
@ -135,47 +131,51 @@ impl<'a> ActivatedWaterboxHost<'a> {
run_proc(self, "co_clean");
run_proc(self, "ecl_seal");
self.h.elf.seal(&mut self.b);
self.b.seal();
self.h.sealed = true;
self.elf.seal(&mut self.memory_block);
self.memory_block.seal()?;
if was_active {
self.deactivate();
}
self.sealed = true;
Ok(())
}
pub fn mount_file(&mut self, name: String, data: Vec<u8>, writable: bool) -> anyhow::Result<()> {
self.h.fs.mount(name, data, writable)
self.fs.mount(name, data, writable)
}
pub fn unmount_file(&mut self, name: &str) -> anyhow::Result<Vec<u8>> {
self.h.fs.unmount(name)
self.fs.unmount(name)
}
// pub fn set_missing_file_callback(&mut self, cb: Option<MissingFileCallback>) {
// self.h.fs.set_missing_file_callback(cb);
// self.fs.set_missing_file_callback(cb);
// }
/// Run a guest entry point that takes no arguments
pub fn run_guest_simple(&mut self, entry_point: usize) {
context::call_guest_simple(entry_point, &mut self.h.context);
context::call_guest_simple(entry_point, &mut self.context);
}
}
const SAVE_START_MAGIC: &str = "ActivatedWaterboxHost_v1";
const SAVE_END_MAGIC: &str = "ʇsoHxoqɹǝʇɐMpǝʇɐʌᴉʇɔ∀";
impl<'a> IStateable for ActivatedWaterboxHost<'a> {
impl IStateable for WaterboxHost {
fn save_state(&mut self, stream: &mut dyn Write) -> anyhow::Result<()> {
self.check_sealed()?;
bin::write_magic(stream, SAVE_START_MAGIC)?;
self.h.fs.save_state(stream)?;
bin::write(stream, &self.h.program_break)?;
self.h.elf.save_state(stream)?;
self.b.save_state(stream)?;
self.fs.save_state(stream)?;
bin::write(stream, &self.program_break)?;
self.elf.save_state(stream)?;
self.memory_block.save_state(stream)?;
bin::write_magic(stream, SAVE_END_MAGIC)?;
Ok(())
}
fn load_state(&mut self, stream: &mut dyn Read) -> anyhow::Result<()> {
self.check_sealed()?;
bin::verify_magic(stream, SAVE_START_MAGIC)?;
self.h.fs.load_state(stream)?;
bin::read(stream, &mut self.h.program_break)?;
self.h.elf.load_state(stream)?;
self.b.load_state(stream)?;
self.fs.load_state(stream)?;
bin::read(stream, &mut self.program_break)?;
self.elf.load_state(stream)?;
self.memory_block.load_state(stream)?;
bin::verify_magic(stream, SAVE_END_MAGIC)?;
Ok(())
}
@ -228,7 +228,7 @@ fn arg_to_statbuff<'a>(arg: usize) -> &'a mut KStat {
extern "sysv64" fn syscall(
a1: usize, a2: usize, a3: usize, a4: usize, _a5: usize, _a6: usize,
nr: SyscallNumber, h: &mut ActivatedWaterboxHost
nr: SyscallNumber, h: &mut WaterboxHost
) -> SyscallReturn {
match nr {
NR_MMAP => {
@ -252,43 +252,43 @@ extern "sysv64" fn syscall(
}
}
let no_replace = flags & MAP_FIXED_NOREPLACE != 0;
let arena_addr = h.h.layout.mmap;
let res = h.b.mmap(AddressRange { start: a1, size: a2 }, prot, arena_addr, no_replace)?;
let arena_addr = h.layout.mmap;
let res = h.memory_block.mmap(AddressRange { start: a1, size: a2 }, prot, arena_addr, no_replace)?;
syscall_ok(res)
},
NR_MREMAP => {
let arena_addr = h.h.layout.mmap;
let res = h.b.mremap(AddressRange { start: a1, size: a2 }, a3, arena_addr)?;
let arena_addr = h.layout.mmap;
let res = h.memory_block.mremap(AddressRange { start: a1, size: a2 }, a3, arena_addr)?;
syscall_ok(res)
},
NR_MPROTECT => {
let prot = arg_to_prot(a3)?;
let res = h.b.mprotect(AddressRange { start: a1, size: a2 }, prot);
let res = h.memory_block.mprotect(AddressRange { start: a1, size: a2 }, prot);
syscall_ret(res)
},
NR_MUNMAP => syscall_ret(h.b.munmap(AddressRange { start: a1, size: a2 })),
NR_MUNMAP => syscall_ret(h.memory_block.munmap(AddressRange { start: a1, size: a2 })),
NR_MADVISE => {
match a3 {
MADV_DONTNEED => syscall_ret(h.b.madvise_dontneed(AddressRange { start: a1, size: a2 })),
MADV_DONTNEED => syscall_ret(h.memory_block.madvise_dontneed(AddressRange { start: a1, size: a2 })),
_ => syscall_ok(0),
}
},
NR_STAT => {
let name = arg_to_str(a1)?;
syscall_ret(h.h.fs.stat(&name, arg_to_statbuff(a2)))
syscall_ret(h.fs.stat(&name, arg_to_statbuff(a2)))
},
NR_FSTAT => {
syscall_ret(h.h.fs.fstat(arg_to_fd(a1)?, arg_to_statbuff(a2)))
syscall_ret(h.fs.fstat(arg_to_fd(a1)?, arg_to_statbuff(a2)))
},
NR_IOCTL => syscall_ok(0),
NR_READ => {
unsafe {
syscall_ret_i64(h.h.fs.read(arg_to_fd(a1)?, std::slice::from_raw_parts_mut(a2 as *mut u8, a3)))
syscall_ret_i64(h.fs.read(arg_to_fd(a1)?, std::slice::from_raw_parts_mut(a2 as *mut u8, a3)))
}
},
NR_WRITE => {
unsafe {
syscall_ret_i64(h.h.fs.write(arg_to_fd(a1)?, std::slice::from_raw_parts(a2 as *const u8, a3)))
syscall_ret_i64(h.fs.write(arg_to_fd(a1)?, std::slice::from_raw_parts(a2 as *const u8, a3)))
}
},
NR_READV => {
@ -298,7 +298,7 @@ extern "sysv64" fn syscall(
let iov = std::slice::from_raw_parts_mut(a2 as *mut Iovec, a3);
for io in iov {
if io.iov_base != 0 {
ret += h.h.fs.read(fd, io.slice_mut())?;
ret += h.fs.read(fd, io.slice_mut())?;
}
}
syscall_ok(ret as usize)
@ -311,19 +311,19 @@ extern "sysv64" fn syscall(
let iov = std::slice::from_raw_parts(a2 as *const Iovec, a3);
for io in iov {
if io.iov_base != 0 {
ret += h.h.fs.write(fd, io.slice())?;
ret += h.fs.write(fd, io.slice())?;
}
}
syscall_ok(ret as usize)
}
},
NR_OPEN => {
syscall_ret_val(h.h.fs.open(&arg_to_str(a1)?, a2 as i32, a3 as i32).map(|x| x.0 as usize))
syscall_ret_val(h.fs.open(&arg_to_str(a1)?, a2 as i32, a3 as i32).map(|x| x.0 as usize))
},
NR_CLOSE => syscall_ret(h.h.fs.close(arg_to_fd(a1)?)),
NR_LSEEK => syscall_ret_i64(h.h.fs.seek(arg_to_fd(a1)?, a2 as i64, a3 as i32)),
NR_TRUNCATE => syscall_ret(h.h.fs.truncate(&arg_to_str(a1)?, a2 as i64)),
NR_FTRUNCATE => syscall_ret(h.h.fs.ftruncate(arg_to_fd(a1)?, a2 as i64)),
NR_CLOSE => syscall_ret(h.fs.close(arg_to_fd(a1)?)),
NR_LSEEK => syscall_ret_i64(h.fs.seek(arg_to_fd(a1)?, a2 as i64, a3 as i32)),
NR_TRUNCATE => syscall_ret(h.fs.truncate(&arg_to_str(a1)?, a2 as i64)),
NR_FTRUNCATE => syscall_ret(h.fs.ftruncate(arg_to_fd(a1)?, a2 as i64)),
// TODO: 99% sure nothing calls this
NR_SET_THREAD_AREA => syscall_err(ENOSYS),
// TODO: What calls this?
@ -338,8 +338,8 @@ extern "sysv64" fn syscall(
},
NR_BRK => {
// TODO: This could be done on the C side
let addr = h.h.layout.sbrk;
let old = h.h.program_break;
let addr = h.layout.sbrk;
let old = h.program_break;
let res = if a1 != align_down(a1) {
old
} else if a1 < addr.start {
@ -351,13 +351,13 @@ extern "sysv64" fn syscall(
eprintln!("Failed to satisfy allocation of {} bytes on sbrk heap", a1 - old);
old
} else if a1 > old {
h.b.mmap_fixed(AddressRange { start: old, size: a1 - old }, Protection::RW, true).unwrap();
h.memory_block.mmap_fixed(AddressRange { start: old, size: a1 - old }, Protection::RW, true).unwrap();
println!("Allocated {} bytes on sbrk heap, usage {}/{}", a1 - old, a1 - addr.start, addr.size);
a1
} else {
old
};
h.h.program_break = res;
h.program_break = res;
syscall_ok(res)
},
_ => syscall_ret(unimp(nr)),

View File

@ -7,7 +7,6 @@ use std::sync::MutexGuard;
use std::ops::DerefMut;
use pageblock::PageBlock;
use crate::*;
use getset::Getters;
use crate::syscall_defs::*;
use itertools::Itertools;
use std::sync::atomic::AtomicU32;
@ -137,6 +136,8 @@ impl Page {
/// Used internally to talk about regions of memory together with their allocation status
struct PageRange<'a> {
pub start: usize,
pub mirror_start: usize,
pub active: bool,
pub pages: &'a mut [Page]
}
impl<'a> PageRange<'a> {
@ -146,15 +147,25 @@ impl<'a> PageRange<'a> {
size: self.pages.len() << PAGESHIFT
}
}
pub fn split_at_size(&mut self, size: usize) -> (PageRange, PageRange) {
pub fn mirror_addr(&self) -> AddressRange {
AddressRange {
start: self.mirror_start,
size: self.pages.len() << PAGESHIFT
}
}
pub fn split_at_size(self, size: usize) -> (PageRange<'a>, PageRange<'a>) {
let (sl, sr) = self.pages.split_at_mut(size >> PAGESHIFT);
(
PageRange {
start: self.start,
mirror_start: self.mirror_start,
active: self.active,
pages: sl
},
PageRange {
start: self.start + size,
mirror_start: self.mirror_start + size,
active: self.active,
pages: sr
}
)
@ -181,6 +192,14 @@ impl<'a> PageRange<'a> {
(AddressRange { start: page_start, size: PAGESIZE}, p)
})
}
pub fn iter_mut_with_mirror_addr(&mut self) -> impl Iterator<Item = (AddressRange, &mut Page)> {
let mut start = self.mirror_start;
self.pages.iter_mut().map(move |p| {
let page_start = start;
start += PAGESIZE;
(AddressRange { start: page_start, size: PAGESIZE}, p)
})
}
/// fuse two adjacent ranges. panics if they do not exactly touch
pub fn fuse(left: Self, right: Self) -> PageRange<'a> {
unsafe {
@ -189,6 +208,8 @@ impl<'a> PageRange<'a> {
assert_eq!(lp.add(left.pages.len()), rp);
PageRange {
start: left.start,
mirror_start: left.mirror_start,
active: left.active,
pages: std::slice::from_raw_parts_mut(lp, left.pages.len() + right.pages.len())
}
}
@ -197,40 +218,25 @@ impl<'a> PageRange<'a> {
static NEXT_DEBUG_ID: AtomicU32 = AtomicU32::new(0);
#[derive(Getters)]
#[derive(Debug)]
pub struct MemoryBlock {
#[get]
pages: Vec<Page>,
#[get]
addr: AddressRange,
#[get]
/// An always-visible second mirror of the address space with RW permissions
/// Writes here will not trip dirty detection!
mirror: AddressRange,
sealed: bool,
#[get]
hash: Vec<u8>,
lock_index: u32,
handle: pal::Handle,
debug_id: u32,
active: bool,
active_guard: Option<BlockGuard>,
}
type BlockGuard = MutexGuard<'static, Option<MemoryBlockRef>>;
pub struct ActivatedMemoryBlock<'block> {
b: &'block mut MemoryBlock,
mutex_guard: Option<BlockGuard>,
}
impl<'block> Drop for ActivatedMemoryBlock<'block> {
fn drop(&mut self) {
unsafe {
let guard = std::mem::replace(&mut self.mutex_guard, None);
self.b.deactivate(guard.unwrap());
}
}
}
impl MemoryBlock {
pub fn new(addr: AddressRange) -> Box<MemoryBlock> {
if addr.start != align_down(addr.start) || addr.size != align_down(addr.size) {
@ -255,11 +261,14 @@ impl MemoryBlock {
let lock_index = (addr.start >> 32) as u32;
// add the lock_index stuff now, so we won't have to check for it later on activate / drop
lock_list::maybe_add(lock_index);
let mirror = pal::map_handle(&handle, AddressRange { start: 0, size: addr.size }).unwrap();
unsafe { pal::protect(mirror, Protection::RW).unwrap(); }
let debug_id = NEXT_DEBUG_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
let res = Box::new(MemoryBlock {
pages,
addr,
mirror,
sealed: false,
hash: Vec::new(),
@ -267,7 +276,7 @@ impl MemoryBlock {
handle,
debug_id,
active: false,
active_guard: None,
});
// res.trace("new");
res
@ -280,65 +289,71 @@ impl MemoryBlock {
name, self.debug_id, ptr, self.lock_index, tid)
}
pub fn enter(&mut self) -> ActivatedMemoryBlock {
unsafe {
let mutex_guard = self.activate();
ActivatedMemoryBlock {
b: self,
mutex_guard: Some(mutex_guard),
}
pub fn active(&self) -> bool {
match self.active_guard {
Some(_) => true,
None => false
}
}
/// lock memory region and potentially swap this block into memory
unsafe fn activate(&mut self) -> BlockGuard {
// self.trace("activate");
assert!(!self.active);
let area = lock_list::get(self.lock_index);
let mut guard = area.lock().unwrap();
let other_opt = guard.deref_mut();
match *other_opt {
Some(MemoryBlockRef(other)) => {
if other != self {
assert!(!(*other).active);
(*other).swapout();
self.swapin();
*other_opt = Some(MemoryBlockRef(self));
}
},
None => {
self.swapin();
*other_opt = Some(MemoryBlockRef(self));
pub fn activate(&mut self) {
unsafe {
// self.trace("activate");
if self.active() {
return
}
}
self.active = true;
guard
}
/// unlock memory region, and potentially swap this block out of memory
#[allow(unused_variables)] // unused stuff in release mode only
#[allow(unused_mut)]
unsafe fn deactivate(&mut self, mut guard: BlockGuard) {
// self.trace("deactivate");
assert!(self.active);
if ALWAYS_EVICT_BLOCKS {
// in debug mode, forcibly evict to catch dangling pointers
let area = lock_list::get(self.lock_index);
let mut guard = area.lock().unwrap();
let other_opt = guard.deref_mut();
match *other_opt {
Some(MemoryBlockRef(other)) => {
if other != self {
panic!();
assert!(!(*other).active());
(*other).swapout();
self.swapin();
*other_opt = Some(MemoryBlockRef(self));
}
self.swapout();
*other_opt = None;
},
None => {
panic!()
self.swapin();
*other_opt = Some(MemoryBlockRef(self));
}
}
self.active_guard = Some(guard);
}
}
/// unlock memory region, and potentially swap this block out of memory
#[allow(unused_variables)] // unused stuff in release mode only
#[allow(unused_mut)]
pub fn deactivate(&mut self) {
// self.trace("deactivate");
if !self.active() {
return
}
let mut guard = std::mem::replace(&mut self.active_guard, None).unwrap();
unsafe {
if ALWAYS_EVICT_BLOCKS {
// in debug mode, forcibly evict to catch dangling pointers
let other_opt = guard.deref_mut();
match *other_opt {
Some(MemoryBlockRef(other)) => {
if other != self {
panic!();
}
self.swapout();
*other_opt = None;
},
None => {
panic!()
}
}
}
}
self.active = false;
}
unsafe fn swapin(&mut self) {
@ -357,7 +372,9 @@ impl MemoryBlock {
fn page_range(&mut self) -> PageRange {
PageRange {
start: self.addr.start,
pages: &mut self.pages[..]
mirror_start: self.mirror.start,
active: self.active(),
pages: &mut self.pages[..],
}
}
@ -369,10 +386,13 @@ impl MemoryBlock {
|| addr.size != align_down(addr.size) {
Err(EINVAL)
} else {
let pstart = (addr.start - self.addr.start) >> PAGESHIFT;
let offset = addr.start - self.addr.start;
let pstart = offset >> PAGESHIFT;
let psize = (addr.size) >> PAGESHIFT;
Ok(PageRange {
start: addr.start,
mirror_start: self.mirror.start + offset,
active: self.active(),
pages: &mut self.pages[pstart..pstart + psize]
})
}
@ -381,6 +401,9 @@ impl MemoryBlock {
/// Refresh the correct protections in underlying host RAM on a page range. Use after
/// temporary pal::protect(...) modifications, or to apply the effect of a dirty/prot change on the page
fn refresh_protections(range: &PageRange) {
if !range.active {
return
}
struct Chunk {
addr: AddressRange,
prot: Protection,
@ -424,12 +447,10 @@ impl MemoryBlock {
#[cfg(windows)]
if status == PageAllocation::Allocated(Protection::RWStack) {
// have to precapture snapshots here
let mut addr = range.start;
for p in range.iter_mut() {
for (maddr, p) in range.iter_mut_with_mirror_addr() {
unsafe {
p.maybe_snapshot(addr);
p.maybe_snapshot(maddr.start)
}
addr += PAGESIZE;
}
}
}
@ -439,6 +460,9 @@ impl MemoryBlock {
fn get_stack_dirty(&mut self) {
#[cfg(windows)]
unsafe {
if !self.active() {
return
}
let mut start = self.addr.start;
let mut pindex = 0;
while start < self.addr.end() {
@ -464,6 +488,8 @@ impl MemoryBlock {
impl Drop for MemoryBlock {
fn drop(&mut self) {
// self.trace("drop");
self.deactivate();
let area = lock_list::get(self.lock_index);
let mut guard = area.lock().unwrap();
let other_opt = guard.deref_mut();
@ -476,22 +502,30 @@ impl Drop for MemoryBlock {
},
None => ()
}
unsafe { let _ = pal::unmap_handle(self.mirror); }
let h = std::mem::replace(&mut self.handle, pal::bad());
unsafe { let _ = pal::close_handle(h); }
}
}
impl<'block> ActivatedMemoryBlock<'block> {
impl MemoryBlock {
/// Looks for some free pages inside an arena
fn find_free_pages<'a>(arena: &'a mut PageRange<'a>, npages: usize) -> Result<PageRange<'a>, SyscallError> {
let active = arena.active;
struct Chunk<'a> {
range: PageRange<'a>,
free: bool,
}
let disp = arena.mirror_start.wrapping_sub(arena.start);
let range = arena.iter_mut_with_addr()
.map(|(a, p)| Chunk {
free: p.status == PageAllocation::Free,
range: PageRange { start: a.start, pages: std::slice::from_mut(p) },
range: PageRange {
start: a.start,
mirror_start: a.start.wrapping_add(disp),
active,
pages: std::slice::from_mut(p),
},
})
.coalesce(|x, y| {
if x.free == y.free {
@ -514,7 +548,9 @@ impl<'block> ActivatedMemoryBlock<'block> {
} else {
Ok(PageRange {
start: r.start,
pages: &mut r.pages[0..npages]
mirror_start: r.mirror_start,
active,
pages: &mut r.pages[0..npages],
})
}
},
@ -527,8 +563,8 @@ impl<'block> ActivatedMemoryBlock<'block> {
if size != align_down(size) {
return Err(EINVAL)
}
let mut arena = self.b.validate_range(arena_addr).unwrap();
match ActivatedMemoryBlock::find_free_pages(&mut arena, size >> PAGESHIFT) {
let mut arena = self.validate_range(arena_addr).unwrap();
match MemoryBlock::find_free_pages(&mut arena, size >> PAGESHIFT) {
Ok(mut range) => {
MemoryBlock::set_protections(&mut range, PageAllocation::Allocated(prot));
Ok(range.start)
@ -539,7 +575,7 @@ impl<'block> ActivatedMemoryBlock<'block> {
/// implements a subset of mmap(2) for anonymous, fixed address mappings
pub fn mmap_fixed(&mut self, addr: AddressRange, prot: Protection, no_replace: bool) -> SyscallResult {
let mut range = self.b.validate_range(addr)?;
let mut range = self.validate_range(addr)?;
if no_replace && range.iter().any(|p| p.status != PageAllocation::Free) {
return Err(EEXIST)
}
@ -549,10 +585,10 @@ impl<'block> ActivatedMemoryBlock<'block> {
/// implements a subset of mremap(2) when MREMAP_MAYMOVE is not set, and MREMAP_FIXED is not
fn mremap_nomove(&mut self, addr: AddressRange, new_size: usize) -> SyscallResult {
self.b.get_stack_dirty();
self.get_stack_dirty();
if new_size > addr.size {
let full_addr = AddressRange { start: addr.start, size: new_size };
let mut range = self.b.validate_range(full_addr)?;
let range = self.validate_range(full_addr)?;
let (old_range, mut new_range) = range.split_at_size(addr.size);
if old_range.iter().any(|p| p.status == PageAllocation::Free) {
return Err(EINVAL)
@ -563,7 +599,7 @@ impl<'block> ActivatedMemoryBlock<'block> {
MemoryBlock::set_protections(&mut new_range, old_range.pages[0].status);
Ok(())
} else {
let range = self.b.validate_range(addr)?;
let range = self.validate_range(addr)?;
if range.iter().any(|p| p.status == PageAllocation::Free) {
return Err(EINVAL)
}
@ -575,32 +611,32 @@ impl<'block> ActivatedMemoryBlock<'block> {
fn mremap_maymove(&mut self, addr: AddressRange, new_size: usize, arena_addr: AddressRange) -> Result<usize, SyscallError> {
// This could be a lot more clever, but it's a difficult problem and doesn't come up often.
// So I use a "simple" solution here.
self.b.get_stack_dirty();
self.get_stack_dirty();
if new_size != align_down(new_size) {
return Err(EINVAL)
}
// save a copy of src, and unmap
let mut src = self.b.validate_range(addr)?;
let mut src = self.validate_range(addr)?;
if src.iter().any(|p| p.status == PageAllocation::Free) {
return Err(EINVAL)
}
let src_addr = src.addr();
let src_maddr = src.mirror_addr();
let mut old_status = Vec::new();
old_status.reserve_exact(src.pages.len());
let mut old_data = vec![0u8; src_addr.size];
let mut old_data = vec![0u8; src_maddr.size];
for p in src.iter() {
old_status.push(p.status);
}
unsafe {
pal::protect(src_addr, Protection::R).unwrap();
old_data.copy_from_slice(src_addr.slice());
old_data.copy_from_slice(src_maddr.slice());
}
ActivatedMemoryBlock::free_pages_impl(&mut src, false);
MemoryBlock::free_pages_impl(&mut src, false);
// find new location to map to, and copy into there
let mut arena = self.b.validate_range(arena_addr).unwrap();
let mut dest = match ActivatedMemoryBlock::find_free_pages(&mut arena, new_size >> PAGESHIFT) {
let mut arena = self.validate_range(arena_addr).unwrap();
let mut dest = match MemoryBlock::find_free_pages(&mut arena, new_size >> PAGESHIFT) {
Ok(r) => r,
Err(_) => {
// woops! reallocate at the old address.
@ -611,8 +647,7 @@ impl<'block> ActivatedMemoryBlock<'block> {
let nbcopy = std::cmp::min(addr.size, new_size);
let npcopy = nbcopy >> PAGESHIFT;
unsafe {
pal::protect(dest.addr(), Protection::RW).unwrap();
dest.addr().slice_mut()[0..nbcopy].copy_from_slice(&old_data[0..nbcopy]);
dest.mirror_addr().slice_mut()[0..nbcopy].copy_from_slice(&old_data[0..nbcopy]);
}
for (status, pdst) in old_status.iter().zip(dest.iter_mut()) {
pdst.status = *status;
@ -629,8 +664,8 @@ impl<'block> ActivatedMemoryBlock<'block> {
/// implements a subset of mprotect(2)
pub fn mprotect(&mut self, addr: AddressRange, prot: Protection) -> SyscallResult {
self.b.get_stack_dirty();
let mut range = self.b.validate_range(addr)?;
self.get_stack_dirty();
let mut range = self.validate_range(addr)?;
if range.iter().any(|p| p.status == PageAllocation::Free) {
return Err(ENOMEM)
}
@ -669,12 +704,11 @@ impl<'block> ActivatedMemoryBlock<'block> {
/// release pages, assuming the range has been fully validated already
fn free_pages_impl(range: &mut PageRange, advise_only: bool) {
let addr = range.addr();
// we do not save the current state of unmapped pages, and if they are later remapped,
// the expectation is that they will start out as zero filled. accordingly, the most
// sensible way to do this is to zero them now
unsafe {
pal::protect(addr, Protection::RW).unwrap();
let addr = range.mirror_addr();
addr.zero();
// simple state size optimization: we can undirty pages in this case depending on the initial state
#[cfg(not(feature = "no-dirty-detection"))]
@ -694,12 +728,12 @@ impl<'block> ActivatedMemoryBlock<'block> {
/// munmap or MADV_DONTNEED
fn munmap_impl(&mut self, addr: AddressRange, advise_only: bool) -> SyscallResult {
self.b.get_stack_dirty();
let mut range = self.b.validate_range(addr)?;
self.get_stack_dirty();
let mut range = self.validate_range(addr)?;
if range.iter().any(|p| p.status == PageAllocation::Free) {
return Err(EINVAL)
}
ActivatedMemoryBlock::free_pages_impl(&mut range, advise_only);
MemoryBlock::free_pages_impl(&mut range, advise_only);
Ok(())
}
/// Marks an address range as invisible. Its page content will not be saved in states (but
@ -710,8 +744,8 @@ impl<'block> ActivatedMemoryBlock<'block> {
// The limitations on this method are mostly because we want to not need a snapshot or dirty
// tracking for invisible pages. But if we didn't have one and later the pages became visible,
// we'd need one and wouldn't be able to reconstruct one.
assert!(!self.b.sealed);
let mut range = self.b.validate_range(addr)?;
assert!(!self.sealed);
let mut range = self.validate_range(addr)?;
for p in range.iter_mut() {
p.dirty = true;
p.invisible = true;
@ -725,29 +759,32 @@ impl<'block> ActivatedMemoryBlock<'block> {
self.munmap_impl(addr, true)
}
pub fn seal(&mut self) {
assert!(!self.b.sealed);
for p in self.b.pages.iter_mut() {
pub fn seal(&mut self) -> anyhow::Result<()> {
if self.sealed {
return Err(anyhow!("Already sealed!"))
}
for p in self.pages.iter_mut() {
if p.dirty && !p.invisible {
p.dirty = false;
p.snapshot = Snapshot::None;
}
}
#[cfg(feature = "no-dirty-detection")]
unsafe {
pal::protect(self.b.addr, Protection::R).unwrap();
for (a, p) in self.b.page_range().iter_mut_with_addr() {
for (a, p) in self.page_range().iter_mut_with_mirror_addr() {
p.dirty = true;
p.maybe_snapshot(a.start);
}
}
self.b.refresh_all_protections();
self.b.sealed = true;
self.b.hash = {
self.refresh_all_protections();
self.sealed = true;
self.hash = {
let mut hasher = Sha256::new();
bin::write(&mut hasher, &self.b.addr).unwrap();
for p in self.b.pages.iter() {
bin::write(&mut hasher, &self.addr).unwrap();
for p in self.pages.iter() {
match &p.snapshot {
Snapshot::None => bin::writeval(&mut hasher, 1).unwrap(),
Snapshot::ZeroFilled => bin::writeval(&mut hasher, 2).unwrap(),
@ -756,27 +793,50 @@ impl<'block> ActivatedMemoryBlock<'block> {
}
hasher.finalize()[..].to_owned()
};
Ok(())
}
/// Helper to copy bytes into guest memory, to guest address `start`
pub fn copy_from_external(&mut self, src: &[u8], start: usize) -> SyscallResult {
{
let addr = AddressRange {
start,
size: src.len()
};
let mut range = self.validate_range(addr.align_expand())?;
for p in range.iter_mut() {
p.dirty = true;
}
}
let dest = AddressRange {
start: start - self.addr.start + self.mirror.start,
size: src.len()
};
unsafe { dest.slice_mut().copy_from_slice(src) }
Ok(())
}
}
const MAGIC: &str = "ActivatedMemoryBlock";
impl<'block> IStateable for ActivatedMemoryBlock<'block> {
impl IStateable for MemoryBlock {
fn save_state(&mut self, stream: &mut dyn Write) -> anyhow::Result<()> {
if !self.b.sealed {
if !self.sealed {
return Err(anyhow!("Must seal first"))
}
bin::write_magic(stream, MAGIC)?;
bin::write_hash(stream, &self.b.hash[..])?;
self.b.get_stack_dirty();
self.b.addr.save_state(stream)?;
bin::write_hash(stream, &self.hash[..])?;
self.get_stack_dirty();
self.addr.save_state(stream)?;
unsafe {
let mut statii = Vec::new();
let mut dirtii = Vec::new();
statii.reserve_exact(self.b.pages.len());
dirtii.reserve_exact(self.b.pages.len());
for p in self.b.pages.iter() {
statii.reserve_exact(self.pages.len());
dirtii.reserve_exact(self.pages.len());
for p in self.pages.iter() {
statii.push(p.status);
dirtii.push(p.dirty);
}
@ -784,51 +844,47 @@ impl<'block> IStateable for ActivatedMemoryBlock<'block> {
stream.write_all(std::mem::transmute(&dirtii[..]))?;
}
for (paddr, p) in self.b.page_range().iter_with_addr() {
for (paddr, p) in self.page_range().iter_mut_with_mirror_addr() {
// bin::write(stream, &p.status)?;
if !p.invisible {
// bin::write(stream, &p.dirty)?;
if p.dirty {
unsafe {
if !p.status.readable() {
pal::protect(paddr, Protection::R).unwrap();
}
stream.write_all(paddr.slice())?;
if !p.status.readable() {
pal::protect(paddr, Protection::None).unwrap();
}
}
}
}
}
Ok(())
}
fn load_state(&mut self, stream: &mut dyn Read) -> anyhow::Result<()> {
assert!(self.b.sealed);
if !self.sealed {
return Err(anyhow!("Must seal first"))
}
bin::verify_magic(stream, MAGIC)?;
match bin::verify_hash(stream, &self.b.hash[..]) {
match bin::verify_hash(stream, &self.hash[..]) {
Ok(_) => (),
Err(_) => eprintln!("Unexpected MemoryBlock hash mismatch."),
}
self.b.get_stack_dirty();
self.get_stack_dirty();
{
let mut addr = AddressRange { start:0, size: 0 };
addr.load_state(stream)?;
if addr != self.b.addr {
if addr != self.addr {
return Err(anyhow!("Bad state data (addr) for ActivatedMemoryBlock"))
}
}
unsafe {
pal::protect(self.b.addr, Protection::RW).unwrap();
let mut statii = vec![PageAllocation::Free; self.b.pages.len()];
let mut dirtii = vec![false; self.b.pages.len()];
let mut statii = vec![PageAllocation::Free; self.pages.len()];
let mut dirtii = vec![false; self.pages.len()];
stream.read_exact(std::mem::transmute(&mut statii[..]))?;
stream.read_exact(std::mem::transmute(&mut dirtii[..]))?;
let mut index = 0usize;
for (paddr, p) in self.b.page_range().iter_mut_with_addr() {
for (paddr, p) in self.page_range().iter_mut_with_mirror_addr() {
let status = statii[index];
// let status = bin::readval::<PageAllocation>(stream)?;
if !p.invisible {
@ -859,7 +915,7 @@ impl<'block> IStateable for ActivatedMemoryBlock<'block> {
index += 1;
}
self.b.refresh_all_protections();
self.refresh_all_protections();
}
Ok(())
}

View File

@ -61,7 +61,9 @@ mod win {
/// Map a handle into an address range
/// Probably shouldn't call with addr.size > handle's alloced size
/// Leaks if not later unmapped
pub fn map_handle(handle: &Handle, addr: AddressRange) -> anyhow::Result<()> {
/// addr.start can be 0, which means the OS chooses a location, or non-zero, which gives fixed address behavior
/// Returned address range will be identical in the case of non-zero, or give the actual address in the case of zero.
pub fn map_handle(handle: &Handle, addr: AddressRange) -> anyhow::Result<AddressRange> {
unsafe {
let res = MapViewOfFileEx(
handle.0 as *mut c_void,
@ -71,10 +73,10 @@ mod win {
addr.size,
addr.start as *mut c_void
);
if res == addr.start as *mut c_void {
Ok(())
} else {
if res == null_mut() {
Err(error())
} else {
Ok(AddressRange { start: res as usize, size: addr.size })
}
}
}
@ -97,7 +99,7 @@ mod win {
}
/// Map some anonymous bytes with no fd backing
/// addr.start can be 0, which means the OS chooses a location, or non-zero, which gives fixed behavior like map_handle
/// addr.start can be 0, which means the OS chooses a location, or non-zero, which gives fixed address behavior
/// Returned address range will be identical in the case of non-zero, or give the actual address in the case of zero.
pub fn map_anon(addr: AddressRange, initial_prot: Protection) -> anyhow::Result<AddressRange> {
unsafe {
@ -199,19 +201,24 @@ mod nix {
/// Map a handle into an address range
/// Probably shouldn't call with addr.size > handle's alloced size
/// Leaks if not later unmapped
pub fn map_handle(handle: &Handle, addr: AddressRange) -> anyhow::Result<()> {
/// addr.start can be 0, which means the OS chooses a location, or non-zero, which gives fixed address behavior
/// Returned address range will be identical in the case of non-zero, or give the actual address in the case of zero.
pub fn map_handle(handle: &Handle, addr: AddressRange) -> anyhow::Result<AddressRange> {
unsafe {
let res = mmap(addr.start as *mut c_void,
let mut flags = MAP_SHARED;
if addr.start != 0 {
flags |= MAP_FIXED | MAP_FIXED_NOREPLACE;
}
let ptr = mmap(addr.start as *mut c_void,
addr.size,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_SHARED | MAP_FIXED,
flags,
handle.0 as i32,
0
);
if res == addr.start as *mut c_void {
Ok(())
} else {
Err(error())
match ptr {
MAP_FAILED => Err(error()),
p => Ok(AddressRange { start: p as usize, size: addr.size })
}
}
}
@ -234,7 +241,7 @@ mod nix {
}
/// Map some anonymous bytes with no fd backing
/// addr.start can be 0, which means the OS chooses a location, or non-zero, which gives fixed behavior like map_handle
/// addr.start can be 0, which means the OS chooses a location, or non-zero, which gives fixed address behavior
/// Returned address range will be identical in the case of non-zero, or give the actual address in the case of zero.
pub fn map_anon(addr: AddressRange, initial_prot: Protection) -> anyhow::Result<AddressRange> {
unsafe {

View File

@ -13,14 +13,16 @@ fn test_create() {
drop(MemoryBlock::new(AddressRange { start: 0x36b00000000, size: 0x2000 }));
{
let mut b = MemoryBlock::new(AddressRange { start: 0x36100000000, size: 0x65000 });
b.enter();
b.enter();
b.activate();
b.deactivate();
b.activate();
b.deactivate();
}
{
let mut b = MemoryBlock::new(AddressRange { start: 0x36e00000000, size: 0x5000 });
let guard = b.enter();
drop(guard);
b.enter();
b.activate();
b.deactivate();
b.activate();
}
}
@ -30,11 +32,11 @@ fn test_dirty() -> TestResult {
unsafe {
let addr = AddressRange { start: 0x36f00000000, size: 0x10000 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
g.mmap_fixed(addr, Protection::RW, true)?;
let ptr = g.b.addr.slice_mut();
b.activate();
b.mmap_fixed(addr, Protection::RW, true)?;
let ptr = b.addr.slice_mut();
ptr[0x2003] = 5;
assert!(g.b.pages[2].dirty);
assert!(b.pages[2].dirty);
Ok(())
}
}
@ -45,11 +47,11 @@ fn test_offset() -> TestResult {
unsafe {
let addr = AddressRange { start: 0x36f00000000, size: 0x20000 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
g.mmap_fixed(AddressRange { start: 0x36f00003000, size: 0x1000 }, Protection::RW, true)?;
let ptr = g.b.addr.slice_mut();
b.activate();
b.mmap_fixed(AddressRange { start: 0x36f00003000, size: 0x1000 }, Protection::RW, true)?;
let ptr = b.addr.slice_mut();
ptr[0x3663] = 12;
assert!(g.b.pages[3].dirty);
assert!(b.pages[3].dirty);
Ok(())
}
}
@ -60,27 +62,27 @@ fn test_stk_norm() -> TestResult {
unsafe {
let addr = AddressRange { start: 0x36200000000, size: 0x10000 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
g.mmap_fixed(addr, Protection::RWStack, true)?;
let ptr = g.b.addr.slice_mut();
b.activate();
b.mmap_fixed(addr, Protection::RWStack, true)?;
let ptr = b.addr.slice_mut();
ptr[0xeeee] = 0xee;
ptr[0x44] = 0x44;
g.b.get_stack_dirty();
b.get_stack_dirty();
assert!(g.b.pages[0].dirty);
assert!(g.b.pages[14].dirty);
assert!(b.pages[0].dirty);
assert!(b.pages[14].dirty);
assert_eq!(ptr[0x8000], 0);
g.b.get_stack_dirty();
b.get_stack_dirty();
// This is an unfair test, but it's just documenting the current limitations of the system.
// Ideally, page 8 would be clean because we read from it but did not write to it.
// Due to limitations of RWStack tracking on windows, it is dirty.
#[cfg(windows)]
assert!(g.b.pages[8].dirty);
assert!(b.pages[8].dirty);
#[cfg(unix)]
assert!(!g.b.pages[8].dirty);
assert!(!b.pages[8].dirty);
Ok(())
}
@ -93,9 +95,9 @@ fn test_stack() -> TestResult {
unsafe {
let addr = AddressRange { start: 0x36f00000000, size: 0x10000 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
g.mmap_fixed(addr, Protection::RW, true)?;
let ptr = g.b.addr.slice_mut();
b.activate();
b.mmap_fixed(addr, Protection::RW, true)?;
let ptr = b.addr.slice_mut();
let mut i = 0;
ptr[i] = 0x48 ; i += 1; ptr[i] = 0x89 ; i += 1; ptr[i] = 0xe0 ; i += 1; // mov rax,rsp
@ -105,18 +107,18 @@ fn test_stack() -> TestResult {
ptr[i] = 0xb0 ; i += 1; ptr[i] = 0x2a ; i += 1; // mov al,0x2a
ptr[i] = 0xc3 ; // ret
g.mprotect(AddressRange { start: 0x36f00000000, size: 0x1000 }, Protection::RX)?;
g.mprotect(AddressRange { start: 0x36f00008000, size: 0x8000 }, Protection::RWStack)?;
b.mprotect(AddressRange { start: 0x36f00000000, size: 0x1000 }, Protection::RX)?;
b.mprotect(AddressRange { start: 0x36f00008000, size: 0x8000 }, Protection::RWStack)?;
let tmp_rsp = addr.end();
let res = transmute::<usize, extern "sysv64" fn(rsp: usize) -> u8>(addr.start)(tmp_rsp);
assert_eq!(res, 42);
g.b.get_stack_dirty();
b.get_stack_dirty();
assert!(g.b.pages[0].dirty);
assert!(!g.b.pages[1].dirty);
assert!(!g.b.pages[14].dirty);
assert!(g.b.pages[15].dirty);
assert!(b.pages[0].dirty);
assert!(!b.pages[1].dirty);
assert!(!b.pages[14].dirty);
assert!(b.pages[15].dirty);
let real_rsp = isize::from_le_bytes(ptr[addr.size - 8..].try_into().unwrap());
let current_rsp = &real_rsp as *const isize as isize;
@ -130,17 +132,17 @@ fn test_state_basic() -> TestResult {
unsafe {
let addr = AddressRange { start: 0x36c00000000, size: 0x4000 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
let ptr = g.b.addr.slice_mut();
g.mmap_fixed(addr, Protection::RW, true)?;
b.activate();
let ptr = b.addr.slice_mut();
b.mmap_fixed(addr, Protection::RW, true)?;
ptr[0x0000] = 20;
ptr[0x1000] = 40;
ptr[0x2000] = 60;
ptr[0x3000] = 80;
g.seal();
b.seal()?;
let mut state0 = Vec::new();
g.save_state(&mut state0)?;
b.save_state(&mut state0)?;
// no pages should be in the state
assert!(state0.len() < 0x1000);
@ -149,20 +151,20 @@ fn test_state_basic() -> TestResult {
ptr[0x3000] = 44;
let mut state1 = Vec::new();
g.save_state(&mut state1)?;
b.save_state(&mut state1)?;
// two pages should be in the state
assert!(state1.len() > 0x2000);
assert!(state1.len() < 0x3000);
g.load_state(&mut state0.as_slice())?;
b.load_state(&mut state0.as_slice())?;
assert_eq!(ptr[0x0000], 20);
assert_eq!(ptr[0x1000], 40);
assert_eq!(ptr[0x2000], 60);
assert_eq!(ptr[0x3000], 80);
g.load_state(&mut state1.as_slice())?;
b.load_state(&mut state1.as_slice())?;
assert_eq!(ptr[0x0000], 20);
assert_eq!(ptr[0x1000], 100);
@ -178,33 +180,33 @@ fn test_state_unreadable() -> TestResult {
unsafe {
let addr = AddressRange { start: 0x36c00000000, size: 0x1000 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
let ptr = g.b.addr.slice_mut();
g.mmap_fixed(addr, Protection::RW, true)?;
g.seal();
b.activate();
let ptr = b.addr.slice_mut();
b.mmap_fixed(addr, Protection::RW, true)?;
b.seal()?;
ptr[200] = 200;
ptr[500] = 100;
g.mprotect(addr, Protection::None)?;
b.mprotect(addr, Protection::None)?;
let mut state0 = Vec::new();
g.save_state(&mut state0)?;
b.save_state(&mut state0)?;
g.mprotect(addr, Protection::RW)?;
b.mprotect(addr, Protection::RW)?;
ptr[300] = 50;
ptr[600] = 11;
g.mprotect(addr, Protection::None)?;
b.mprotect(addr, Protection::None)?;
let mut state1 = Vec::new();
g.save_state(&mut state1)?;
b.save_state(&mut state1)?;
g.load_state(&mut state0.as_slice())?;
g.mprotect(addr, Protection::R)?;
b.load_state(&mut state0.as_slice())?;
b.mprotect(addr, Protection::R)?;
assert_eq!(ptr[200], 200);
assert_eq!(ptr[500], 100);
assert_eq!(ptr[300], 0);
assert_eq!(ptr[600], 0);
g.load_state(&mut state1.as_slice())?;
g.mprotect(addr, Protection::R)?;
b.load_state(&mut state1.as_slice())?;
b.mprotect(addr, Protection::R)?;
assert_eq!(ptr[200], 200);
assert_eq!(ptr[500], 100);
assert_eq!(ptr[300], 50);
@ -227,13 +229,13 @@ fn test_thready_stack() -> TestResult {
unsafe {
let addr = AddressRange { start: 0x36000000000 + i * 0x100000000, size: PAGESIZE * 2 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
b.activate();
blocker.wait();
g.mmap_fixed(addr, Protection::RWX, true)?;
g.mprotect(AddressRange { start: addr.start + PAGESIZE, size: PAGESIZE }, Protection::RWStack)?;
b.mmap_fixed(addr, Protection::RWX, true)?;
b.mprotect(AddressRange { start: addr.start + PAGESIZE, size: PAGESIZE }, Protection::RWStack)?;
let ptr = g.b.addr.slice_mut();
let ptr = b.addr.slice_mut();
let mut i = 0;
ptr[i] = 0x48 ; i += 1; ptr[i] = 0x89 ; i += 1; ptr[i] = 0xe0 ; i += 1; // mov rax,rsp
@ -243,18 +245,18 @@ fn test_thready_stack() -> TestResult {
ptr[i] = 0xb0 ; i += 1; ptr[i] = 0x2a ; i += 1; // mov al,0x2a
ptr[i] = 0xc3 ; // ret
g.seal();
b.seal()?;
assert!(!g.b.pages[0].dirty);
assert!(!g.b.pages[1].dirty);
assert!(!b.pages[0].dirty);
assert!(!b.pages[1].dirty);
let tmp_rsp = addr.end();
let res = transmute::<usize, extern "sysv64" fn(rsp: usize) -> u8>(addr.start)(tmp_rsp);
assert_eq!(res, 42);
g.b.get_stack_dirty();
b.get_stack_dirty();
assert!(!g.b.pages[0].dirty);
assert!(g.b.pages[1].dirty);
assert!(!b.pages[0].dirty);
assert!(b.pages[1].dirty);
Ok(())
}
@ -275,16 +277,16 @@ fn test_state_invisible() -> TestResult {
unsafe {
let addr = AddressRange { start: 0x36400000000, size: 0x4000 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
let ptr = g.b.addr.slice_mut();
g.mmap_fixed(addr, Protection::RW, true)?;
b.activate();
let ptr = b.addr.slice_mut();
b.mmap_fixed(addr, Protection::RW, true)?;
ptr[0x0055] = 11;
ptr[0x1055] = 22;
g.mark_invisible(AddressRange { start: 0x36400001000, size: 0x2000 })?;
b.mark_invisible(AddressRange { start: 0x36400001000, size: 0x2000 })?;
ptr[0x2055] = 33;
ptr[0x3055] = 44;
g.seal();
b.seal()?;
ptr[0x0055] = 0x11;
ptr[0x1055] = 0x22;
@ -292,7 +294,7 @@ fn test_state_invisible() -> TestResult {
ptr[0x3055] = 0x44;
let mut state0 = Vec::new();
g.save_state(&mut state0)?;
b.save_state(&mut state0)?;
// two pages should be in the state
assert!(state0.len() > 0x2000);
@ -303,7 +305,7 @@ fn test_state_invisible() -> TestResult {
ptr[0x2055] = 0x77;
ptr[0x3055] = 0x88;
g.load_state(&mut state0.as_slice())?;
b.load_state(&mut state0.as_slice())?;
assert_eq!(ptr[0x0055], 0x11);
// Some current cores require this behavior, where the invisible values are actually left untouched.
@ -323,23 +325,23 @@ fn test_dontneed() -> TestResult {
unsafe {
let addr = AddressRange { start: 0x36500000000, size: 0x10000 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
g.seal();
let ptr = g.b.addr.slice_mut();
b.activate();
b.seal()?;
let ptr = b.addr.slice_mut();
g.mmap_fixed(addr, Protection::RW, true)?;
b.mmap_fixed(addr, Protection::RW, true)?;
for i in 0..addr.size {
ptr[i] = i as u8;
}
let addr2 = AddressRange { start: addr.start + 0x3000, size: 0x5000 };
g.madvise_dontneed(addr2)?;
b.madvise_dontneed(addr2)?;
let ptr2 = addr2.slice_mut();
for i in 0..addr2.size {
assert_eq!(ptr2[i], 0);
}
let mut state0 = Vec::new();
g.save_state(&mut state0)?;
b.save_state(&mut state0)?;
assert!(state0.len() < 0xc000);
Ok(())
@ -350,16 +352,16 @@ fn test_dontneed() -> TestResult {
fn test_remap_nomove() -> TestResult {
let addr = AddressRange { start: 0x36600000000, size: 0x10000 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
b.activate();
g.mmap_fixed(AddressRange { start: addr.start, size: 0x4000 }, Protection::RWX, true)?;
g.mremap_nomove(AddressRange { start: addr.start, size: 0x4000 }, 0x6000)?;
assert_eq!(g.b.pages[3].status, PageAllocation::Allocated(Protection::RWX));
assert_eq!(g.b.pages[5].status, PageAllocation::Allocated(Protection::RWX));
g.mremap_nomove(AddressRange { start: addr.start, size: 0x6000 }, 0x3000)?;
assert_eq!(g.b.pages[2].status, PageAllocation::Allocated(Protection::RWX));
assert_eq!(g.b.pages[3].status, PageAllocation::Free);
assert_eq!(g.b.pages[5].status, PageAllocation::Free);
b.mmap_fixed(AddressRange { start: addr.start, size: 0x4000 }, Protection::RWX, true)?;
b.mremap_nomove(AddressRange { start: addr.start, size: 0x4000 }, 0x6000)?;
assert_eq!(b.pages[3].status, PageAllocation::Allocated(Protection::RWX));
assert_eq!(b.pages[5].status, PageAllocation::Allocated(Protection::RWX));
b.mremap_nomove(AddressRange { start: addr.start, size: 0x6000 }, 0x3000)?;
assert_eq!(b.pages[2].status, PageAllocation::Allocated(Protection::RWX));
assert_eq!(b.pages[3].status, PageAllocation::Free);
assert_eq!(b.pages[5].status, PageAllocation::Free);
Ok(())
}
@ -368,14 +370,14 @@ fn test_remap_nomove() -> TestResult {
fn test_mmap_move() -> TestResult {
let addr = AddressRange { start: 0x36700000000, size: 0x10000 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
b.activate();
let p0 = g.mmap_movable(0x10000, Protection::RW, addr)?;
let p0 = b.mmap_movable(0x10000, Protection::RW, addr)?;
assert_eq!(p0, 0x36700000000);
g.munmap(AddressRange { start: 0x36700002000, size: 0x2000 })?;
g.munmap(AddressRange { start: 0x3670000a000, size: 0x1000 })?;
b.munmap(AddressRange { start: 0x36700002000, size: 0x2000 })?;
b.munmap(AddressRange { start: 0x3670000a000, size: 0x1000 })?;
let p1: usize = g.mmap_movable(0x1000, Protection::RW, addr)?;
let p1: usize = b.mmap_movable(0x1000, Protection::RW, addr)?;
assert_eq!(p1, 0x3670000a000); // fit in smallest hole
Ok(())
@ -386,16 +388,16 @@ fn test_mremap_move_expand() -> TestResult {
unsafe {
let addr = AddressRange { start: 0x36800000000, size: 0x4000 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
let ptr = g.b.addr.slice_mut();
b.activate();
let ptr = b.addr.slice_mut();
let initial_addr = AddressRange { start: 0x36800002000, size: 0x1000 };
g.mmap_fixed(initial_addr, Protection::RW, true)?;
b.mmap_fixed(initial_addr, Protection::RW, true)?;
ptr[0x2004] = 11;
let p1 = g.mremap_maymove(initial_addr, 0x2000, addr)?;
let p1 = b.mremap_maymove(initial_addr, 0x2000, addr)?;
assert_eq!(p1, addr.start);
assert_eq!(ptr[4], 11);
g.mmap_fixed(initial_addr, Protection::RW, true)?;
b.mmap_fixed(initial_addr, Protection::RW, true)?;
assert_eq!(ptr[0x2004], 0);
}
Ok(())
@ -406,16 +408,16 @@ fn test_mremap_move_shrink() -> TestResult {
unsafe {
let addr = AddressRange { start: 0x36900000000, size: 0x4000 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
let ptr = g.b.addr.slice_mut();
b.activate();
let ptr = b.addr.slice_mut();
let initial_addr = AddressRange { start: 0x36900001000, size: 0x3000 };
g.mmap_fixed(initial_addr, Protection::RW, true)?;
b.mmap_fixed(initial_addr, Protection::RW, true)?;
ptr[0x1004] = 11;
let p1 = g.mremap_maymove(initial_addr, 0x1000, addr)?;
let p1 = b.mremap_maymove(initial_addr, 0x1000, addr)?;
assert_eq!(p1, addr.start);
assert_eq!(ptr[4], 11);
g.mmap_fixed(initial_addr, Protection::RW, true)?;
b.mmap_fixed(initial_addr, Protection::RW, true)?;
assert_eq!(ptr[0x1004], 0);
}
Ok(())