memoryblock experiments continue

This commit is contained in:
nattthebear 2020-06-23 08:37:42 -04:00
parent 85d140c4b5
commit 6ff2d801e6
2 changed files with 232 additions and 94 deletions

View File

@ -2,20 +2,40 @@ mod pageblock;
mod pal;
mod tripguard;
use std::sync::MutexGuard;
use std::ops::{DerefMut, Deref};
use parking_lot::ReentrantMutex;
use std::collections::HashMap;
use std::sync::Mutex;
use pageblock::PageBlock;
use crate::*;
use getset::Getters;
use lazy_static::lazy_static;
use crate::syscall_defs::*;
use itertools::Itertools;
use std::io;
use std::sync::atomic::AtomicU32;
lazy_static! {
static ref LOCK_LIST: Mutex<HashMap<u32, ReentrantMutex<Option<MemoryBlockRef>>>> = Mutex::new(HashMap::new());
/// Tracks one lock for each 4GB memory area
mod lock_list {
use lazy_static::lazy_static;
use std::collections::HashMap;
use std::sync::Mutex;
use super::MemoryBlockRef;
lazy_static! {
static ref LOCK_LIST: Mutex<HashMap<u32, Box<Mutex<Option<MemoryBlockRef>>>>> = Mutex::new(HashMap::new());
}
unsafe fn extend<T>(o: &T) -> &'static T {
std::mem::transmute::<&T, &'static T>(o)
}
pub fn maybe_add(lock_index: u32) {
let map = &mut LOCK_LIST.lock().unwrap();
map.entry(lock_index).or_insert_with(|| Box::new(Mutex::new(None)));
}
pub fn get(lock_index: u32) -> &'static Mutex<Option<MemoryBlockRef>> {
let map = &mut LOCK_LIST.lock().unwrap();
unsafe {
extend(map.get(&lock_index).unwrap())
}
}
}
fn align_down(p: usize) -> usize {
@ -46,7 +66,10 @@ impl PageAllocation {
pub fn writable(&self) -> bool {
use PageAllocation::*;
match self {
Allocated(Protection::RW) | Allocated(Protection::RWX) => true,
Allocated(a) => match a {
Protection::RW | Protection::RWX | Protection::RWStack => true,
_ => false
}
_ => false,
}
}
@ -63,7 +86,7 @@ enum Snapshot {
#[derive(Debug)]
struct Page {
pub status: PageAllocation,
/// if true, the page has changed from its ground state
/// if true, the page has changed from its original state
pub dirty: bool,
pub snapshot: Snapshot,
}
@ -86,8 +109,23 @@ impl Page {
self.snapshot = Snapshot::Data(snapshot);
}
}
/// Compute the appropriate native protection value given this page's current status
pub fn native_prot(&self) -> Protection {
match self.status {
#[cfg(windows)]
PageAllocation::Allocated(Protection::RWStack) if self.dirty => Protection::RW,
PageAllocation::Allocated(Protection::RW) if !self.dirty => Protection::R,
PageAllocation::Allocated(Protection::RWX) if !self.dirty => Protection::RX,
#[cfg(unix)]
PageAllocation::Allocated(Protection::RWStack) => if self.dirty { Protection::RW } else { Protection::R },
PageAllocation::Allocated(x) => x,
PageAllocation::Free => Protection::None,
}
}
}
static NEXT_DEBUG_ID: AtomicU32 = AtomicU32::new(0);
#[derive(Getters)]
#[derive(Debug)]
pub struct MemoryBlock {
@ -101,6 +139,9 @@ pub struct MemoryBlock {
lock_index: u32,
handle: pal::Handle,
lock_count: u32,
mutex_guard: Option<MutexGuard<'static, Option<MemoryBlockRef>>>,
debug_id: u32,
}
pub struct MemoryBlockGuard<'a> {
@ -140,12 +181,10 @@ impl MemoryBlock {
let handle = pal::open(addr.size).unwrap();
let lock_index = (addr.start >> 32) as u32;
// add the lock_index stuff now, so we won't have to check for it later on activate / drop
{
let map = &mut LOCK_LIST.lock().unwrap();
map.entry(lock_index).or_insert(ReentrantMutex::new(None));
}
lock_list::maybe_add(lock_index);
Box::new(MemoryBlock {
let debug_id = NEXT_DEBUG_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
let res = Box::new(MemoryBlock {
pages,
addr,
sealed: false,
@ -153,7 +192,19 @@ impl MemoryBlock {
lock_index,
handle,
lock_count: 0,
})
mutex_guard: None,
debug_id,
});
// res.trace("new");
res
}
pub fn trace(&self, name: &str) {
let ptr = unsafe { std::mem::transmute::<&Self, usize>(self) };
let tid = unsafe { std::mem::transmute::<std::thread::ThreadId, u64>(std::thread::current().id()) };
eprintln!("{}#{} {} [{}]@[{}] thr{}",
name, self.debug_id, ptr, self.lock_count, self.lock_index, tid)
}
pub fn enter(&mut self) -> MemoryBlockGuard {
@ -165,65 +216,102 @@ impl MemoryBlock {
/// lock self, and potentially swap this block into memory
pub fn activate(&mut self) {
// self.trace("activate");
unsafe {
let map = &mut LOCK_LIST.lock().unwrap();
let lock = map.get_mut(&self.lock_index).unwrap();
std::mem::forget(lock.lock());
let other_opt = lock.get_mut();
match *other_opt {
Some(MemoryBlockRef(other)) => {
if other != self {
assert!(!(*other).active());
(*other).swapout();
if !self.active() {
let area = lock_list::get(self.lock_index);
let mut guard = area.lock().unwrap();
let other_opt = guard.deref_mut();
match *other_opt {
Some(MemoryBlockRef(other)) => {
if other != self {
assert!(!(*other).active());
(*other).swapout();
self.swapin();
*other_opt = Some(MemoryBlockRef(self));
}
},
None => {
self.swapin();
*other_opt = Some(MemoryBlockRef(self));
}
},
None => {
self.swapin();
*other_opt = Some(MemoryBlockRef(self));
}
self.mutex_guard = Some(guard);
}
self.lock_count += 1;
}
}
/// unlock self, and potentially swap this block out of memory
pub fn deactivate(&mut self) {
// self.trace("deactivate");
unsafe {
assert!(self.active());
let map = &mut LOCK_LIST.lock().unwrap();
let lock = map.get_mut(&self.lock_index).unwrap();
self.lock_count -= 1;
#[cfg(debug_assertions)]
{
let other_opt = lock.get_mut();
let other = other_opt.as_ref().unwrap().0;
assert_eq!(&*other, self);
// in debug mode, forcibly evict to catch dangling pointers
if !self.active() {
self.swapout();
*other_opt = None;
if !self.active() {
let mut guard = std::mem::replace(&mut self.mutex_guard, None).unwrap();
#[cfg(debug_assertions)]
{
// in debug mode, forcibly evict to catch dangling pointers
let other_opt = guard.deref_mut();
match *other_opt {
Some(MemoryBlockRef(other)) => {
if other != self {
panic!();
}
self.swapout();
*other_opt = None;
},
None => {
panic!()
}
}
}
}
lock.force_unlock();
}
}
unsafe fn swapin(&mut self) {
// self.trace("swapin");
assert!(pal::map(&self.handle, self.addr));
tripguard::register(self);
MemoryBlock::refresh_protections(self.addr.start, self.pages.as_slice());
}
unsafe fn swapout(&mut self) {
// self.trace("swapout");
self.get_stack_dirty();
assert!(pal::unmap(self.addr));
tripguard::unregister(self);
}
pub fn active (&self) -> bool {
pub fn active(&self) -> bool {
self.lock_count > 0
}
}
impl Drop for MemoryBlock {
fn drop(&mut self) {
// self.trace("drop");
assert!(!self.active());
let area = lock_list::get(self.lock_index);
let mut guard = area.lock().unwrap();
let other_opt = guard.deref_mut();
match *other_opt {
Some(MemoryBlockRef(other)) => {
if other == self {
unsafe { self.swapout(); }
*other_opt = None;
}
},
None => ()
}
let h = std::mem::replace(&mut self.handle, pal::bad());
unsafe { pal::close(h); }
}
}
impl MemoryBlock {
fn validate_range(&mut self, addr: AddressRange) -> Result<&mut [Page], i32> {
if addr.start < self.addr.start
|| addr.end() > self.addr.end()
@ -233,8 +321,8 @@ impl MemoryBlock {
Err(EINVAL)
} else {
let pstart = (addr.start - self.addr.start) >> PAGESHIFT;
let pend = (addr.size) >> PAGESHIFT;
Ok(&mut self.pages[pstart..pend])
let psize = (addr.size) >> PAGESHIFT;
Ok(&mut self.pages[pstart..pstart + psize])
}
}
@ -245,21 +333,11 @@ impl MemoryBlock {
};
let chunks = pages.iter()
.map(|p| {
let prot = match p.status {
#[cfg(windows)]
PageAllocation::Allocated(Protection::RWStack) if p.dirty => Protection::RW,
PageAllocation::Allocated(Protection::RW) if !p.dirty => Protection::R,
PageAllocation::Allocated(Protection::RWX) if !p.dirty => Protection::RX,
#[cfg(unix)]
PageAllocation::Allocated(Protection::RWStack) => if p.dirty { Protection::RW } else { Protection::R },
PageAllocation::Allocated(x) => x,
PageAllocation::Free => Protection::None,
};
let pstart = start;
let cstart = start;
start += PAGESIZE;
Chunk {
addr: AddressRange { start: pstart, size: PAGESIZE },
prot,
addr: AddressRange { start: cstart, size: PAGESIZE },
prot: p.native_prot(),
}
})
.coalesce(|x, y| if x.prot == y.prot {
@ -381,7 +459,6 @@ impl MemoryBlock {
}
}
}
impl IStateable for MemoryBlock {
fn save_sate(&mut self, stream: Box<dyn Write>) -> Result<(), io::Error> {
assert!(self.sealed);
@ -395,31 +472,6 @@ impl IStateable for MemoryBlock {
}
}
impl Drop for MemoryBlock {
fn drop(&mut self) {
assert!(!self.active());
let map = &mut LOCK_LIST.lock().unwrap();
let lock = map.get_mut(&self.lock_index).unwrap();
let other_opt = lock.get_mut();
match *other_opt {
Some(MemoryBlockRef(other)) => {
if other == self {
unsafe {
self.swapout();
}
*other_opt = None;
}
},
None => ()
}
let mut h = pal::bad();
std::mem::swap(&mut h, &mut self.handle);
unsafe {
pal::close(h);
}
}
}
impl PartialEq for MemoryBlock {
fn eq(&self, other: &MemoryBlock) -> bool {
self as *const MemoryBlock == other as *const MemoryBlock
@ -427,13 +479,16 @@ impl PartialEq for MemoryBlock {
}
impl Eq for MemoryBlock {}
struct MemoryBlockRef(*mut MemoryBlock);
#[derive(Debug)]
pub struct MemoryBlockRef(*mut MemoryBlock);
unsafe impl Send for MemoryBlockRef {}
#[cfg(test)]
mod tests {
use std::mem::transmute;
use super::*;
/// new / drop, activate / deactivate
#[test]
fn test_create() {
drop(MemoryBlock::new(AddressRange { start: 0x36300000000, size: 0x50000 }));
@ -458,6 +513,7 @@ mod tests {
}
}
/// simple test of dirt detection
#[test]
fn test_dirty() -> SyscallResult {
unsafe {
@ -471,4 +527,82 @@ mod tests {
Ok(())
}
}
/// dirt detection away from the start of a block
#[test]
fn test_offset() -> SyscallResult {
unsafe {
let addr = AddressRange { start: 0x36f00000000, size: 0x20000 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
g.mmap_fixed(AddressRange { start: 0x36f00003000, size: 0x1000 }, Protection::RW)?;
let ptr = g.addr.slice_mut();
ptr[0x3663] = 12;
assert!(g.pages[3].dirty);
Ok(())
}
}
/// dirt detection in RWStack area when $rsp points there
#[test]
fn test_stk_norm() -> SyscallResult {
unsafe {
let addr = AddressRange { start: 0x36200000000, size: 0x10000 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
g.mmap_fixed(addr, Protection::RWStack)?;
let ptr = g.addr.slice_mut();
ptr[0xeeee] = 0xee;
ptr[0x44] = 0x44;
assert!(g.pages[0].dirty);
assert!(g.pages[14].dirty);
assert_eq!(ptr[0x8000], 0);
// This is an unfair test, but it's just documenting the current limitations of the system.
// Ideally, page 8 would be clean because we read from it but did not write to it.
// Due to limitations of RWStack tracking on windows, it is dirty.
#[cfg(windows)]
assert!(g.pages[8].dirty);
#[cfg(unix)]
assert!(!g.pages[8].dirty);
Ok(())
}
}
/// dirt detection in RWStack area when $rsp points there
#[test]
fn test_stack() -> SyscallResult {
use std::convert::TryInto;
unsafe {
let addr = AddressRange { start: 0x36f00000000, size: 0x10000 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
g.mmap_fixed(addr, Protection::RW)?;
let ptr = g.addr.slice_mut();
let mut i = 0;
ptr[i] = 0x48 ; i += 1; ptr[i] = 0x89 ; i += 1; ptr[i] = 0xe0 ; i += 1; // mov rax,rsp
ptr[i] = 0x48 ; i += 1; ptr[i] = 0x89 ; i += 1; ptr[i] = 0xfc ; i += 1; // mov rsp,rdi
ptr[i] = 0x50 ; i += 1; // push rax
ptr[i] = 0x48 ; i += 1; ptr[i] = 0x89 ; i += 1; ptr[i] = 0xc4 ; i += 1; // mov rsp,rax
ptr[i] = 0xb0 ; i += 1; ptr[i] = 0x2a ; i += 1; // mov al,0x2a
ptr[i] = 0xc3 ; // ret
g.mprotect(AddressRange { start: 0x36f00000000, size: 0x1000 }, Protection::RX)?;
g.mprotect(AddressRange { start: 0x36f00008000, size: 0x8000 }, Protection::RWStack)?;
let tmp_rsp = addr.end();
let res = transmute::<usize, extern "sysv64" fn(rsp: usize) -> u8>(addr.start)(tmp_rsp);
assert_eq!(res, 42);
assert!(g.pages[0].dirty);
assert!(!g.pages[1].dirty);
assert!(!g.pages[14].dirty);
assert!(g.pages[15].dirty);
let real_rsp = isize::from_le_bytes(ptr[addr.size - 8..].try_into().unwrap());
let current_rsp = &real_rsp as *const isize as isize;
assert!((real_rsp - current_rsp).abs() < 0x10000);
Ok(())
}
}
}

View File

@ -29,8 +29,15 @@ pub unsafe fn register(block: *mut MemoryBlock) {
pub unsafe fn unregister(block: *mut MemoryBlock) {
let mut data = GLOBAL_DATA.lock().unwrap();
let pos = data.active_blocks.iter().position(|x| x.0 == block).unwrap();
data.active_blocks.remove(pos);
let pos = data.active_blocks.iter().position(|x| x.0 == block);
match pos {
Some(index) => {
data.active_blocks.remove(index);
},
None => {
panic!("Tried to unregister MemoryBlock which was not registered")
}
}
}
enum TripResult {
@ -53,11 +60,7 @@ unsafe fn trip(addr: usize) -> TripResult {
}
page.maybe_snapshot(page_start_addr);
page.dirty = true;
let new_prot = match &page.status {
PageAllocation::Allocated(p) => p,
PageAllocation::Free => panic!(),
};
assert!(pal::protect(AddressRange { start: page_start_addr, size: PAGESIZE }, *new_prot));
assert!(pal::protect(AddressRange { start: page_start_addr, size: PAGESIZE }, page.native_prot()));
TripResult::Handled
}
@ -70,9 +73,10 @@ fn initialize() {
unsafe extern "system" fn handler(p_info: *mut EXCEPTION_POINTERS) -> i32 {
let p_record = &mut *(*p_info).ExceptionRecord;
let flags = p_record.ExceptionInformation[0];
if p_record.ExceptionCode != STATUS_ACCESS_VIOLATION // only trigger on access violations...
|| (flags & 1) == 0 { // ...due to a write attempts
return EXCEPTION_CONTINUE_SEARCH
match p_record.ExceptionCode {
STATUS_ACCESS_VIOLATION if (flags & 1) != 0 => (), // write exception
STATUS_GUARD_PAGE_VIOLATION => (), // guard exception
_ => return EXCEPTION_CONTINUE_SEARCH
}
let fault_address = p_record.ExceptionInformation[1] as usize;
match trip(fault_address) {