memoryblock experiments continue
This commit is contained in:
parent
85d140c4b5
commit
6ff2d801e6
|
@ -2,20 +2,40 @@ mod pageblock;
|
||||||
mod pal;
|
mod pal;
|
||||||
mod tripguard;
|
mod tripguard;
|
||||||
|
|
||||||
|
use std::sync::MutexGuard;
|
||||||
use std::ops::{DerefMut, Deref};
|
use std::ops::{DerefMut, Deref};
|
||||||
use parking_lot::ReentrantMutex;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::sync::Mutex;
|
|
||||||
use pageblock::PageBlock;
|
use pageblock::PageBlock;
|
||||||
use crate::*;
|
use crate::*;
|
||||||
use getset::Getters;
|
use getset::Getters;
|
||||||
use lazy_static::lazy_static;
|
|
||||||
use crate::syscall_defs::*;
|
use crate::syscall_defs::*;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use std::io;
|
use std::io;
|
||||||
|
use std::sync::atomic::AtomicU32;
|
||||||
|
|
||||||
lazy_static! {
|
/// Tracks one lock for each 4GB memory area
|
||||||
static ref LOCK_LIST: Mutex<HashMap<u32, ReentrantMutex<Option<MemoryBlockRef>>>> = Mutex::new(HashMap::new());
|
mod lock_list {
|
||||||
|
use lazy_static::lazy_static;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::sync::Mutex;
|
||||||
|
use super::MemoryBlockRef;
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
static ref LOCK_LIST: Mutex<HashMap<u32, Box<Mutex<Option<MemoryBlockRef>>>>> = Mutex::new(HashMap::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn extend<T>(o: &T) -> &'static T {
|
||||||
|
std::mem::transmute::<&T, &'static T>(o)
|
||||||
|
}
|
||||||
|
pub fn maybe_add(lock_index: u32) {
|
||||||
|
let map = &mut LOCK_LIST.lock().unwrap();
|
||||||
|
map.entry(lock_index).or_insert_with(|| Box::new(Mutex::new(None)));
|
||||||
|
}
|
||||||
|
pub fn get(lock_index: u32) -> &'static Mutex<Option<MemoryBlockRef>> {
|
||||||
|
let map = &mut LOCK_LIST.lock().unwrap();
|
||||||
|
unsafe {
|
||||||
|
extend(map.get(&lock_index).unwrap())
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn align_down(p: usize) -> usize {
|
fn align_down(p: usize) -> usize {
|
||||||
|
@ -46,7 +66,10 @@ impl PageAllocation {
|
||||||
pub fn writable(&self) -> bool {
|
pub fn writable(&self) -> bool {
|
||||||
use PageAllocation::*;
|
use PageAllocation::*;
|
||||||
match self {
|
match self {
|
||||||
Allocated(Protection::RW) | Allocated(Protection::RWX) => true,
|
Allocated(a) => match a {
|
||||||
|
Protection::RW | Protection::RWX | Protection::RWStack => true,
|
||||||
|
_ => false
|
||||||
|
}
|
||||||
_ => false,
|
_ => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -63,7 +86,7 @@ enum Snapshot {
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct Page {
|
struct Page {
|
||||||
pub status: PageAllocation,
|
pub status: PageAllocation,
|
||||||
/// if true, the page has changed from its ground state
|
/// if true, the page has changed from its original state
|
||||||
pub dirty: bool,
|
pub dirty: bool,
|
||||||
pub snapshot: Snapshot,
|
pub snapshot: Snapshot,
|
||||||
}
|
}
|
||||||
|
@ -86,8 +109,23 @@ impl Page {
|
||||||
self.snapshot = Snapshot::Data(snapshot);
|
self.snapshot = Snapshot::Data(snapshot);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/// Compute the appropriate native protection value given this page's current status
|
||||||
|
pub fn native_prot(&self) -> Protection {
|
||||||
|
match self.status {
|
||||||
|
#[cfg(windows)]
|
||||||
|
PageAllocation::Allocated(Protection::RWStack) if self.dirty => Protection::RW,
|
||||||
|
PageAllocation::Allocated(Protection::RW) if !self.dirty => Protection::R,
|
||||||
|
PageAllocation::Allocated(Protection::RWX) if !self.dirty => Protection::RX,
|
||||||
|
#[cfg(unix)]
|
||||||
|
PageAllocation::Allocated(Protection::RWStack) => if self.dirty { Protection::RW } else { Protection::R },
|
||||||
|
PageAllocation::Allocated(x) => x,
|
||||||
|
PageAllocation::Free => Protection::None,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static NEXT_DEBUG_ID: AtomicU32 = AtomicU32::new(0);
|
||||||
|
|
||||||
#[derive(Getters)]
|
#[derive(Getters)]
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct MemoryBlock {
|
pub struct MemoryBlock {
|
||||||
|
@ -101,6 +139,9 @@ pub struct MemoryBlock {
|
||||||
lock_index: u32,
|
lock_index: u32,
|
||||||
handle: pal::Handle,
|
handle: pal::Handle,
|
||||||
lock_count: u32,
|
lock_count: u32,
|
||||||
|
mutex_guard: Option<MutexGuard<'static, Option<MemoryBlockRef>>>,
|
||||||
|
|
||||||
|
debug_id: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct MemoryBlockGuard<'a> {
|
pub struct MemoryBlockGuard<'a> {
|
||||||
|
@ -140,12 +181,10 @@ impl MemoryBlock {
|
||||||
let handle = pal::open(addr.size).unwrap();
|
let handle = pal::open(addr.size).unwrap();
|
||||||
let lock_index = (addr.start >> 32) as u32;
|
let lock_index = (addr.start >> 32) as u32;
|
||||||
// add the lock_index stuff now, so we won't have to check for it later on activate / drop
|
// add the lock_index stuff now, so we won't have to check for it later on activate / drop
|
||||||
{
|
lock_list::maybe_add(lock_index);
|
||||||
let map = &mut LOCK_LIST.lock().unwrap();
|
|
||||||
map.entry(lock_index).or_insert(ReentrantMutex::new(None));
|
|
||||||
}
|
|
||||||
|
|
||||||
Box::new(MemoryBlock {
|
let debug_id = NEXT_DEBUG_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||||
|
let res = Box::new(MemoryBlock {
|
||||||
pages,
|
pages,
|
||||||
addr,
|
addr,
|
||||||
sealed: false,
|
sealed: false,
|
||||||
|
@ -153,7 +192,19 @@ impl MemoryBlock {
|
||||||
lock_index,
|
lock_index,
|
||||||
handle,
|
handle,
|
||||||
lock_count: 0,
|
lock_count: 0,
|
||||||
})
|
mutex_guard: None,
|
||||||
|
|
||||||
|
debug_id,
|
||||||
|
});
|
||||||
|
// res.trace("new");
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn trace(&self, name: &str) {
|
||||||
|
let ptr = unsafe { std::mem::transmute::<&Self, usize>(self) };
|
||||||
|
let tid = unsafe { std::mem::transmute::<std::thread::ThreadId, u64>(std::thread::current().id()) };
|
||||||
|
eprintln!("{}#{} {} [{}]@[{}] thr{}",
|
||||||
|
name, self.debug_id, ptr, self.lock_count, self.lock_index, tid)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn enter(&mut self) -> MemoryBlockGuard {
|
pub fn enter(&mut self) -> MemoryBlockGuard {
|
||||||
|
@ -165,65 +216,102 @@ impl MemoryBlock {
|
||||||
|
|
||||||
/// lock self, and potentially swap this block into memory
|
/// lock self, and potentially swap this block into memory
|
||||||
pub fn activate(&mut self) {
|
pub fn activate(&mut self) {
|
||||||
|
// self.trace("activate");
|
||||||
unsafe {
|
unsafe {
|
||||||
let map = &mut LOCK_LIST.lock().unwrap();
|
if !self.active() {
|
||||||
let lock = map.get_mut(&self.lock_index).unwrap();
|
let area = lock_list::get(self.lock_index);
|
||||||
std::mem::forget(lock.lock());
|
let mut guard = area.lock().unwrap();
|
||||||
let other_opt = lock.get_mut();
|
|
||||||
match *other_opt {
|
let other_opt = guard.deref_mut();
|
||||||
Some(MemoryBlockRef(other)) => {
|
match *other_opt {
|
||||||
if other != self {
|
Some(MemoryBlockRef(other)) => {
|
||||||
assert!(!(*other).active());
|
if other != self {
|
||||||
(*other).swapout();
|
assert!(!(*other).active());
|
||||||
|
(*other).swapout();
|
||||||
|
self.swapin();
|
||||||
|
*other_opt = Some(MemoryBlockRef(self));
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => {
|
||||||
self.swapin();
|
self.swapin();
|
||||||
*other_opt = Some(MemoryBlockRef(self));
|
*other_opt = Some(MemoryBlockRef(self));
|
||||||
}
|
}
|
||||||
},
|
|
||||||
None => {
|
|
||||||
self.swapin();
|
|
||||||
*other_opt = Some(MemoryBlockRef(self));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
self.mutex_guard = Some(guard);
|
||||||
}
|
}
|
||||||
self.lock_count += 1;
|
self.lock_count += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// unlock self, and potentially swap this block out of memory
|
/// unlock self, and potentially swap this block out of memory
|
||||||
pub fn deactivate(&mut self) {
|
pub fn deactivate(&mut self) {
|
||||||
|
// self.trace("deactivate");
|
||||||
unsafe {
|
unsafe {
|
||||||
assert!(self.active());
|
assert!(self.active());
|
||||||
let map = &mut LOCK_LIST.lock().unwrap();
|
|
||||||
let lock = map.get_mut(&self.lock_index).unwrap();
|
|
||||||
self.lock_count -= 1;
|
self.lock_count -= 1;
|
||||||
#[cfg(debug_assertions)]
|
if !self.active() {
|
||||||
{
|
let mut guard = std::mem::replace(&mut self.mutex_guard, None).unwrap();
|
||||||
let other_opt = lock.get_mut();
|
#[cfg(debug_assertions)]
|
||||||
let other = other_opt.as_ref().unwrap().0;
|
{
|
||||||
assert_eq!(&*other, self);
|
// in debug mode, forcibly evict to catch dangling pointers
|
||||||
// in debug mode, forcibly evict to catch dangling pointers
|
let other_opt = guard.deref_mut();
|
||||||
if !self.active() {
|
match *other_opt {
|
||||||
self.swapout();
|
Some(MemoryBlockRef(other)) => {
|
||||||
*other_opt = None;
|
if other != self {
|
||||||
|
panic!();
|
||||||
|
}
|
||||||
|
self.swapout();
|
||||||
|
*other_opt = None;
|
||||||
|
},
|
||||||
|
None => {
|
||||||
|
panic!()
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
lock.force_unlock();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn swapin(&mut self) {
|
unsafe fn swapin(&mut self) {
|
||||||
|
// self.trace("swapin");
|
||||||
assert!(pal::map(&self.handle, self.addr));
|
assert!(pal::map(&self.handle, self.addr));
|
||||||
tripguard::register(self);
|
tripguard::register(self);
|
||||||
MemoryBlock::refresh_protections(self.addr.start, self.pages.as_slice());
|
MemoryBlock::refresh_protections(self.addr.start, self.pages.as_slice());
|
||||||
}
|
}
|
||||||
unsafe fn swapout(&mut self) {
|
unsafe fn swapout(&mut self) {
|
||||||
|
// self.trace("swapout");
|
||||||
self.get_stack_dirty();
|
self.get_stack_dirty();
|
||||||
assert!(pal::unmap(self.addr));
|
assert!(pal::unmap(self.addr));
|
||||||
tripguard::unregister(self);
|
tripguard::unregister(self);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn active (&self) -> bool {
|
pub fn active(&self) -> bool {
|
||||||
self.lock_count > 0
|
self.lock_count > 0
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for MemoryBlock {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// self.trace("drop");
|
||||||
|
assert!(!self.active());
|
||||||
|
let area = lock_list::get(self.lock_index);
|
||||||
|
let mut guard = area.lock().unwrap();
|
||||||
|
let other_opt = guard.deref_mut();
|
||||||
|
match *other_opt {
|
||||||
|
Some(MemoryBlockRef(other)) => {
|
||||||
|
if other == self {
|
||||||
|
unsafe { self.swapout(); }
|
||||||
|
*other_opt = None;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => ()
|
||||||
|
}
|
||||||
|
let h = std::mem::replace(&mut self.handle, pal::bad());
|
||||||
|
unsafe { pal::close(h); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MemoryBlock {
|
||||||
fn validate_range(&mut self, addr: AddressRange) -> Result<&mut [Page], i32> {
|
fn validate_range(&mut self, addr: AddressRange) -> Result<&mut [Page], i32> {
|
||||||
if addr.start < self.addr.start
|
if addr.start < self.addr.start
|
||||||
|| addr.end() > self.addr.end()
|
|| addr.end() > self.addr.end()
|
||||||
|
@ -233,8 +321,8 @@ impl MemoryBlock {
|
||||||
Err(EINVAL)
|
Err(EINVAL)
|
||||||
} else {
|
} else {
|
||||||
let pstart = (addr.start - self.addr.start) >> PAGESHIFT;
|
let pstart = (addr.start - self.addr.start) >> PAGESHIFT;
|
||||||
let pend = (addr.size) >> PAGESHIFT;
|
let psize = (addr.size) >> PAGESHIFT;
|
||||||
Ok(&mut self.pages[pstart..pend])
|
Ok(&mut self.pages[pstart..pstart + psize])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -245,21 +333,11 @@ impl MemoryBlock {
|
||||||
};
|
};
|
||||||
let chunks = pages.iter()
|
let chunks = pages.iter()
|
||||||
.map(|p| {
|
.map(|p| {
|
||||||
let prot = match p.status {
|
let cstart = start;
|
||||||
#[cfg(windows)]
|
|
||||||
PageAllocation::Allocated(Protection::RWStack) if p.dirty => Protection::RW,
|
|
||||||
PageAllocation::Allocated(Protection::RW) if !p.dirty => Protection::R,
|
|
||||||
PageAllocation::Allocated(Protection::RWX) if !p.dirty => Protection::RX,
|
|
||||||
#[cfg(unix)]
|
|
||||||
PageAllocation::Allocated(Protection::RWStack) => if p.dirty { Protection::RW } else { Protection::R },
|
|
||||||
PageAllocation::Allocated(x) => x,
|
|
||||||
PageAllocation::Free => Protection::None,
|
|
||||||
};
|
|
||||||
let pstart = start;
|
|
||||||
start += PAGESIZE;
|
start += PAGESIZE;
|
||||||
Chunk {
|
Chunk {
|
||||||
addr: AddressRange { start: pstart, size: PAGESIZE },
|
addr: AddressRange { start: cstart, size: PAGESIZE },
|
||||||
prot,
|
prot: p.native_prot(),
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.coalesce(|x, y| if x.prot == y.prot {
|
.coalesce(|x, y| if x.prot == y.prot {
|
||||||
|
@ -381,7 +459,6 @@ impl MemoryBlock {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IStateable for MemoryBlock {
|
impl IStateable for MemoryBlock {
|
||||||
fn save_sate(&mut self, stream: Box<dyn Write>) -> Result<(), io::Error> {
|
fn save_sate(&mut self, stream: Box<dyn Write>) -> Result<(), io::Error> {
|
||||||
assert!(self.sealed);
|
assert!(self.sealed);
|
||||||
|
@ -395,31 +472,6 @@ impl IStateable for MemoryBlock {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for MemoryBlock {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
assert!(!self.active());
|
|
||||||
let map = &mut LOCK_LIST.lock().unwrap();
|
|
||||||
let lock = map.get_mut(&self.lock_index).unwrap();
|
|
||||||
let other_opt = lock.get_mut();
|
|
||||||
match *other_opt {
|
|
||||||
Some(MemoryBlockRef(other)) => {
|
|
||||||
if other == self {
|
|
||||||
unsafe {
|
|
||||||
self.swapout();
|
|
||||||
}
|
|
||||||
*other_opt = None;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
None => ()
|
|
||||||
}
|
|
||||||
let mut h = pal::bad();
|
|
||||||
std::mem::swap(&mut h, &mut self.handle);
|
|
||||||
unsafe {
|
|
||||||
pal::close(h);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialEq for MemoryBlock {
|
impl PartialEq for MemoryBlock {
|
||||||
fn eq(&self, other: &MemoryBlock) -> bool {
|
fn eq(&self, other: &MemoryBlock) -> bool {
|
||||||
self as *const MemoryBlock == other as *const MemoryBlock
|
self as *const MemoryBlock == other as *const MemoryBlock
|
||||||
|
@ -427,13 +479,16 @@ impl PartialEq for MemoryBlock {
|
||||||
}
|
}
|
||||||
impl Eq for MemoryBlock {}
|
impl Eq for MemoryBlock {}
|
||||||
|
|
||||||
struct MemoryBlockRef(*mut MemoryBlock);
|
#[derive(Debug)]
|
||||||
|
pub struct MemoryBlockRef(*mut MemoryBlock);
|
||||||
unsafe impl Send for MemoryBlockRef {}
|
unsafe impl Send for MemoryBlockRef {}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
use std::mem::transmute;
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
/// new / drop, activate / deactivate
|
||||||
#[test]
|
#[test]
|
||||||
fn test_create() {
|
fn test_create() {
|
||||||
drop(MemoryBlock::new(AddressRange { start: 0x36300000000, size: 0x50000 }));
|
drop(MemoryBlock::new(AddressRange { start: 0x36300000000, size: 0x50000 }));
|
||||||
|
@ -458,6 +513,7 @@ mod tests {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// simple test of dirt detection
|
||||||
#[test]
|
#[test]
|
||||||
fn test_dirty() -> SyscallResult {
|
fn test_dirty() -> SyscallResult {
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -471,4 +527,82 @@ mod tests {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// dirt detection away from the start of a block
|
||||||
|
#[test]
|
||||||
|
fn test_offset() -> SyscallResult {
|
||||||
|
unsafe {
|
||||||
|
let addr = AddressRange { start: 0x36f00000000, size: 0x20000 };
|
||||||
|
let mut b = MemoryBlock::new(addr);
|
||||||
|
let mut g = b.enter();
|
||||||
|
g.mmap_fixed(AddressRange { start: 0x36f00003000, size: 0x1000 }, Protection::RW)?;
|
||||||
|
let ptr = g.addr.slice_mut();
|
||||||
|
ptr[0x3663] = 12;
|
||||||
|
assert!(g.pages[3].dirty);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// dirt detection in RWStack area when $rsp points there
|
||||||
|
#[test]
|
||||||
|
fn test_stk_norm() -> SyscallResult {
|
||||||
|
unsafe {
|
||||||
|
let addr = AddressRange { start: 0x36200000000, size: 0x10000 };
|
||||||
|
let mut b = MemoryBlock::new(addr);
|
||||||
|
let mut g = b.enter();
|
||||||
|
g.mmap_fixed(addr, Protection::RWStack)?;
|
||||||
|
let ptr = g.addr.slice_mut();
|
||||||
|
ptr[0xeeee] = 0xee;
|
||||||
|
ptr[0x44] = 0x44;
|
||||||
|
assert!(g.pages[0].dirty);
|
||||||
|
assert!(g.pages[14].dirty);
|
||||||
|
assert_eq!(ptr[0x8000], 0);
|
||||||
|
|
||||||
|
// This is an unfair test, but it's just documenting the current limitations of the system.
|
||||||
|
// Ideally, page 8 would be clean because we read from it but did not write to it.
|
||||||
|
// Due to limitations of RWStack tracking on windows, it is dirty.
|
||||||
|
#[cfg(windows)]
|
||||||
|
assert!(g.pages[8].dirty);
|
||||||
|
#[cfg(unix)]
|
||||||
|
assert!(!g.pages[8].dirty);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// dirt detection in RWStack area when $rsp points there
|
||||||
|
#[test]
|
||||||
|
fn test_stack() -> SyscallResult {
|
||||||
|
use std::convert::TryInto;
|
||||||
|
unsafe {
|
||||||
|
let addr = AddressRange { start: 0x36f00000000, size: 0x10000 };
|
||||||
|
let mut b = MemoryBlock::new(addr);
|
||||||
|
let mut g = b.enter();
|
||||||
|
g.mmap_fixed(addr, Protection::RW)?;
|
||||||
|
let ptr = g.addr.slice_mut();
|
||||||
|
let mut i = 0;
|
||||||
|
|
||||||
|
ptr[i] = 0x48 ; i += 1; ptr[i] = 0x89 ; i += 1; ptr[i] = 0xe0 ; i += 1; // mov rax,rsp
|
||||||
|
ptr[i] = 0x48 ; i += 1; ptr[i] = 0x89 ; i += 1; ptr[i] = 0xfc ; i += 1; // mov rsp,rdi
|
||||||
|
ptr[i] = 0x50 ; i += 1; // push rax
|
||||||
|
ptr[i] = 0x48 ; i += 1; ptr[i] = 0x89 ; i += 1; ptr[i] = 0xc4 ; i += 1; // mov rsp,rax
|
||||||
|
ptr[i] = 0xb0 ; i += 1; ptr[i] = 0x2a ; i += 1; // mov al,0x2a
|
||||||
|
ptr[i] = 0xc3 ; // ret
|
||||||
|
|
||||||
|
g.mprotect(AddressRange { start: 0x36f00000000, size: 0x1000 }, Protection::RX)?;
|
||||||
|
g.mprotect(AddressRange { start: 0x36f00008000, size: 0x8000 }, Protection::RWStack)?;
|
||||||
|
let tmp_rsp = addr.end();
|
||||||
|
let res = transmute::<usize, extern "sysv64" fn(rsp: usize) -> u8>(addr.start)(tmp_rsp);
|
||||||
|
assert_eq!(res, 42);
|
||||||
|
assert!(g.pages[0].dirty);
|
||||||
|
assert!(!g.pages[1].dirty);
|
||||||
|
assert!(!g.pages[14].dirty);
|
||||||
|
assert!(g.pages[15].dirty);
|
||||||
|
|
||||||
|
let real_rsp = isize::from_le_bytes(ptr[addr.size - 8..].try_into().unwrap());
|
||||||
|
let current_rsp = &real_rsp as *const isize as isize;
|
||||||
|
assert!((real_rsp - current_rsp).abs() < 0x10000);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,8 +29,15 @@ pub unsafe fn register(block: *mut MemoryBlock) {
|
||||||
|
|
||||||
pub unsafe fn unregister(block: *mut MemoryBlock) {
|
pub unsafe fn unregister(block: *mut MemoryBlock) {
|
||||||
let mut data = GLOBAL_DATA.lock().unwrap();
|
let mut data = GLOBAL_DATA.lock().unwrap();
|
||||||
let pos = data.active_blocks.iter().position(|x| x.0 == block).unwrap();
|
let pos = data.active_blocks.iter().position(|x| x.0 == block);
|
||||||
data.active_blocks.remove(pos);
|
match pos {
|
||||||
|
Some(index) => {
|
||||||
|
data.active_blocks.remove(index);
|
||||||
|
},
|
||||||
|
None => {
|
||||||
|
panic!("Tried to unregister MemoryBlock which was not registered")
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TripResult {
|
enum TripResult {
|
||||||
|
@ -53,11 +60,7 @@ unsafe fn trip(addr: usize) -> TripResult {
|
||||||
}
|
}
|
||||||
page.maybe_snapshot(page_start_addr);
|
page.maybe_snapshot(page_start_addr);
|
||||||
page.dirty = true;
|
page.dirty = true;
|
||||||
let new_prot = match &page.status {
|
assert!(pal::protect(AddressRange { start: page_start_addr, size: PAGESIZE }, page.native_prot()));
|
||||||
PageAllocation::Allocated(p) => p,
|
|
||||||
PageAllocation::Free => panic!(),
|
|
||||||
};
|
|
||||||
assert!(pal::protect(AddressRange { start: page_start_addr, size: PAGESIZE }, *new_prot));
|
|
||||||
TripResult::Handled
|
TripResult::Handled
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,9 +73,10 @@ fn initialize() {
|
||||||
unsafe extern "system" fn handler(p_info: *mut EXCEPTION_POINTERS) -> i32 {
|
unsafe extern "system" fn handler(p_info: *mut EXCEPTION_POINTERS) -> i32 {
|
||||||
let p_record = &mut *(*p_info).ExceptionRecord;
|
let p_record = &mut *(*p_info).ExceptionRecord;
|
||||||
let flags = p_record.ExceptionInformation[0];
|
let flags = p_record.ExceptionInformation[0];
|
||||||
if p_record.ExceptionCode != STATUS_ACCESS_VIOLATION // only trigger on access violations...
|
match p_record.ExceptionCode {
|
||||||
|| (flags & 1) == 0 { // ...due to a write attempts
|
STATUS_ACCESS_VIOLATION if (flags & 1) != 0 => (), // write exception
|
||||||
return EXCEPTION_CONTINUE_SEARCH
|
STATUS_GUARD_PAGE_VIOLATION => (), // guard exception
|
||||||
|
_ => return EXCEPTION_CONTINUE_SEARCH
|
||||||
}
|
}
|
||||||
let fault_address = p_record.ExceptionInformation[1] as usize;
|
let fault_address = p_record.ExceptionInformation[1] as usize;
|
||||||
match trip(fault_address) {
|
match trip(fault_address) {
|
||||||
|
|
Loading…
Reference in New Issue