more krusty krab

This commit is contained in:
nattthebear 2020-06-22 18:15:11 -04:00
parent e3dd8870c6
commit 92525576cc
9 changed files with 526 additions and 119 deletions

View File

@ -2,6 +2,6 @@
"editor.insertSpaces": false,
"editor.tabSize": 4,
"search.exclude": {
"waterbox": true
"waterbox/**": true
}
}

View File

@ -0,0 +1,5 @@
{
"search.exclude": {
"target/**": true
}
}

View File

@ -21,6 +21,12 @@ dependencies = [
"bitflags",
]
[[package]]
name = "either"
version = "1.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3"
[[package]]
name = "getset"
version = "0.1.1"
@ -33,6 +39,15 @@ dependencies = [
"syn",
]
[[package]]
name = "itertools"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
dependencies = [
"either",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
@ -190,6 +205,7 @@ version = "0.1.0"
dependencies = [
"bitflags",
"getset",
"itertools",
"lazy_static",
"libc",
"page_size",

View File

@ -13,6 +13,7 @@ page_size = "0.4.2"
lazy_static = "1.4.0"
getset = "0.1.1"
parking_lot = "0.10.2"
itertools = "0.9.0"
[target.'cfg(windows)'.dependencies]
winapi = { version = "0.3.8", features = ["memoryapi", "handleapi", "errhandlingapi", "winnt"] }

View File

@ -1,16 +1,42 @@
#![crate_type = "cdylib"]
use std::io::{Read, Write};
// TODO: Turn this off once we've built the exported public API
#![allow(dead_code)]
use std::io::{Read, Write, Error};
const PAGESIZE: usize = 0x1000;
const PAGEMASK: usize = 0xfff;
const PAGESHIFT: i32 = 12;
mod memory_block;
mod syscall_defs;
pub trait IStateable {
fn save_sate(&mut self, stream: Box<dyn Write>);
fn load_state(&mut self, stream: Box<dyn Read>);
fn save_sate(&mut self, stream: Box<dyn Write>) -> Result<(), Error>;
fn load_state(&mut self, stream: Box<dyn Read>) -> Result<(), Error>;
}
#[derive(Debug, Clone, Copy)]
pub struct AddressRange {
pub start: usize,
pub size: usize,
}
impl AddressRange {
pub fn end(&self) -> usize {
self.start + self.size
}
pub fn contains(&self, addr: usize) -> bool {
addr >= self.start && addr < self.end()
}
/// Unsafe: Pointers are unchecked and lifetime is not connected to the AddressRange
pub unsafe fn slice(&self) -> &'static [u8] {
std::slice::from_raw_parts(self.start as *const u8, self.size)
}
/// Unsafe: Pointers are unchecked and lifetime is not connected to the AddressRange
pub unsafe fn slice_mut(&self) -> &'static mut [u8] {
std::slice::from_raw_parts_mut(self.start as *mut u8, self.size)
}
}
#[cfg(test)]

View File

@ -7,35 +7,25 @@ use parking_lot::ReentrantMutex;
use std::collections::HashMap;
use std::sync::Mutex;
use pageblock::PageBlock;
use bitflags::bitflags;
use crate::*;
use getset::Getters;
use lazy_static::lazy_static;
use crate::syscall_defs::*;
use itertools::Itertools;
use std::io;
lazy_static! {
static ref LOCK_LIST: Mutex<HashMap<u32, ReentrantMutex<Option<MemoryBlockRef>>>> = Mutex::new(HashMap::new());
}
fn alignDown(p: usize) -> usize {
fn align_down(p: usize) -> usize {
p & !PAGEMASK
}
fn alignUp(p: usize) -> usize {
fn align_up(p: usize) -> usize {
((p - 1) | PAGEMASK) + 1
}
bitflags! {
struct PageFlags: u32 {
const R = 1;
const W = 2;
const X = 4;
/// This page is mapped in the waterbox right now
const ALLOCATED = 8;
/// The contents of this page have changed since the dirty flag was set
const DIRTY = 16;
/// rsp might point here. On some OSes, use an alternate method of dirt detection
const STACK = 32;
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Protection {
None,
R,
@ -45,6 +35,23 @@ pub enum Protection {
RWStack
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum PageAllocation {
/// not in use by the guest
Free,
/// in use by the guest system, with a particular allocation status
Allocated(Protection),
}
impl PageAllocation {
pub fn writable(&self) -> bool {
use PageAllocation::*;
match self {
Allocated(Protection::RW) | Allocated(Protection::RWX) => true,
_ => false,
}
}
}
#[derive(Debug)]
enum Snapshot {
None,
@ -55,21 +62,39 @@ enum Snapshot {
/// Information about a single page of memory
#[derive(Debug)]
struct Page {
pub flags: PageFlags,
pub status: PageAllocation,
/// if true, the page has changed from its ground state
pub dirty: bool,
pub snapshot: Snapshot,
}
impl Page {
pub fn new() -> Page {
Page {
status: PageAllocation::Free,
dirty: false,
snapshot: Snapshot::ZeroFilled,
}
}
/// Take a snapshot if one is not yet stored
/// unsafe: caller must ensure pages are mapped and addr is correct
pub unsafe fn maybe_snapshot(&mut self, addr: usize) {
if match self.snapshot { Snapshot:: None => true, _ => false } {
let mut snapshot = PageBlock::new();
let src = std::slice::from_raw_parts(addr as *const u8, PAGESIZE);
let dst = snapshot.slice_mut();
dst.copy_from_slice(src);
self.snapshot = Snapshot::Data(snapshot);
}
}
}
#[derive(Getters)]
#[derive(Debug)]
struct MemoryBlock {
pub struct MemoryBlock {
#[get]
pages: Vec<Page>,
#[get]
start: usize,
#[get]
length: usize,
#[get]
end: usize,
addr: AddressRange,
#[get]
sealed: bool,
@ -78,7 +103,7 @@ struct MemoryBlock {
lock_count: u32,
}
struct MemoryBlockGuard<'a> {
pub struct MemoryBlockGuard<'a> {
block: &'a mut MemoryBlock,
}
impl<'a> Drop for MemoryBlockGuard<'a> {
@ -99,42 +124,36 @@ impl<'a> DerefMut for MemoryBlockGuard<'a> {
}
impl MemoryBlock {
pub fn new(start: usize, length: usize) -> MemoryBlock {
if start != alignDown(start) || length != alignDown(length) {
pub fn new(addr: AddressRange) -> Box<MemoryBlock> {
if addr.start != align_down(addr.start) || addr.size != align_down(addr.size) {
panic!("Addresses and sizes must be aligned!");
}
let end = start + length;
if start >> 32 != (end - 1) >> 32 {
if addr.start >> 32 != (addr.end() - 1) >> 32 {
panic!("MemoryBlock must fit into a single 4G region!");
}
let npage = length >> PAGESHIFT;
let npage = addr.size >> PAGESHIFT;
let mut pages = Vec::new();
pages.reserve_exact(npage);
for _ in 0..npage {
pages.push(Page {
flags: PageFlags::empty(),
snapshot: Snapshot::None,
});
pages.push(Page::new());
}
let handle = pal::open(length).unwrap();
let lock_index = (start >> 32) as u32;
let handle = pal::open(addr.size).unwrap();
let lock_index = (addr.start >> 32) as u32;
// add the lock_index stuff now, so we won't have to check for it later on activate / drop
{
let map = &mut LOCK_LIST.lock().unwrap();
map.entry(lock_index).or_insert(ReentrantMutex::new(None));
}
MemoryBlock {
Box::new(MemoryBlock {
pages,
start,
length,
end,
addr,
sealed: false,
lock_index,
handle,
lock_count: 0,
}
})
}
pub fn enter(&mut self) -> MemoryBlockGuard {
@ -191,30 +210,188 @@ impl MemoryBlock {
}
unsafe fn swapin(&mut self) {
assert!(pal::map(&self.handle, self.start, self.length));
assert!(pal::map(&self.handle, self.addr));
tripguard::register(self);
MemoryBlock::refresh_protections(self.addr.start, self.pages.as_slice());
}
unsafe fn swapout(&mut self) {
assert!(pal::unmap(self.start, self.length));
self.get_stack_dirty();
assert!(pal::unmap(self.addr));
tripguard::unregister(self);
}
pub fn active (&self) -> bool {
self.lock_count > 0
}
pub fn protect(&mut self, start: usize, length: usize, prot: Protection) {
fn validate_range(&mut self, addr: AddressRange) -> Result<&mut [Page], i32> {
if addr.start < self.addr.start
|| addr.end() > self.addr.end()
|| addr.size == 0
|| addr.start != align_down(addr.start)
|| addr.size != align_down(addr.size) {
Err(EINVAL)
} else {
let pstart = (addr.start - self.addr.start) >> PAGESHIFT;
let pend = (addr.size) >> PAGESHIFT;
Ok(&mut self.pages[pstart..pend])
}
}
pub fn seal(&mut self) {
fn refresh_protections(mut start: usize, pages: &[Page]) {
struct Chunk {
addr: AddressRange,
prot: Protection,
};
let chunks = pages.iter()
.map(|p| {
let prot = match p.status {
#[cfg(windows)]
PageAllocation::Allocated(Protection::RWStack) if p.dirty => Protection::RW,
PageAllocation::Allocated(Protection::RW) if !p.dirty => Protection::R,
PageAllocation::Allocated(Protection::RWX) if !p.dirty => Protection::RX,
#[cfg(unix)]
PageAllocation::Allocated(Protection::RWStack) => if p.dirty { Protection::RW } else { Protection::R },
PageAllocation::Allocated(x) => x,
PageAllocation::Free => Protection::None,
};
let pstart = start;
start += PAGESIZE;
Chunk {
addr: AddressRange { start: pstart, size: PAGESIZE },
prot,
}
})
.coalesce(|x, y| if x.prot == y.prot {
Ok(Chunk {
addr: AddressRange { start: x.addr.start, size: x.addr.size + y.addr.size },
prot: x.prot,
})
} else {
Err((x, y))
});
for c in chunks {
unsafe {
assert!(pal::protect(c.addr, c.prot));
}
}
}
fn set_protections(start: usize, pages: &mut [Page], status: PageAllocation) {
for p in pages.iter_mut() {
p.status = status;
}
MemoryBlock::refresh_protections(start, pages);
#[cfg(windows)]
if status == PageAllocation::Allocated(Protection::RWStack) {
// have to precapture snapshots here
let mut addr = start;
for p in pages {
unsafe {
p.maybe_snapshot(addr);
}
addr += PAGESIZE;
}
}
}
/// Updates knowledge on RWStack tripped areas. Must be called before those areas change allocation type, or are swapped out.
/// noop on linux
fn get_stack_dirty(&mut self) {
#[cfg(windows)]
unsafe {
let mut start = self.addr.start;
let mut pindex = 0;
while start < self.addr.end() {
if !self.pages[pindex].dirty && self.pages[pindex].status == PageAllocation::Allocated(Protection::RWStack) {
let mut res = pal::get_stack_dirty(start).unwrap();
while res.size > 0 && start < self.addr.end() {
if res.dirty && self.pages[pindex].status == PageAllocation::Allocated(Protection::RWStack) {
self.pages[pindex].dirty = true;
}
res.size -= PAGESIZE;
start += PAGESIZE;
pindex += 1;
}
} else {
start += PAGESIZE;
pindex += 1;
}
}
}
}
/// implements a subset of mmap(2)
pub fn mmap_fixed(&mut self, addr: AddressRange, prot: Protection) -> SyscallResult {
self.get_stack_dirty(); // not needed here technically?
let pages = self.validate_range(addr)?;
if pages.iter().any(|p| p.status != PageAllocation::Free) {
// assume MAP_FIXED_NOREPLACE at all times
return Err(EEXIST)
}
MemoryBlock::set_protections(addr.start, pages, PageAllocation::Allocated(prot));
Ok(())
}
/// implements a subset of mprotect(2)
pub fn mprotect(&mut self, addr: AddressRange, prot: Protection) -> SyscallResult {
self.get_stack_dirty();
let pages = self.validate_range(addr)?;
if pages.iter().any(|p| p.status == PageAllocation::Free) {
return Err(ENOMEM)
}
MemoryBlock::set_protections(addr.start, pages, PageAllocation::Allocated(prot));
Ok(())
}
/// implements a subset of munmap(2)
pub fn munmap(&mut self, addr: AddressRange) -> SyscallResult {
self.get_stack_dirty();
let pages = self.validate_range(addr)?;
if pages.iter().any(|p| p.status == PageAllocation::Free) {
return Err(EINVAL)
}
// we do not save the current state of unmapped pages, and if they are later remapped,
// the expectation is that they will start out as zero filled. accordingly, the most
// sensible way to do this is to zero them now
unsafe {
pal::protect(addr, Protection::RW);
std::ptr::write_bytes(addr.start as *mut u8, 0, addr.size);
// simple state size optimization: we can undirty pages in this case depending on the initial state
for p in pages.iter_mut() {
p.dirty = match p.snapshot {
Snapshot::ZeroFilled => false,
_ => true
};
}
}
MemoryBlock::set_protections(addr.start, pages, PageAllocation::Free);
Ok(())
}
pub fn seal(&mut self) {
assert!(!self.sealed);
for p in self.pages.iter_mut() {
if p.dirty {
p.dirty = false;
} else {
p.snapshot = Snapshot::ZeroFilled;
}
}
}
}
impl IStateable for MemoryBlock {
fn save_sate(&mut self, stream: Box<dyn Write>) {
fn save_sate(&mut self, stream: Box<dyn Write>) -> Result<(), io::Error> {
assert!(self.sealed);
self.get_stack_dirty();
Ok(())
}
fn load_state(&mut self, stream: Box<dyn Read>) {
fn load_state(&mut self, stream: Box<dyn Read>) -> Result<(), io::Error> {
assert!(self.sealed);
self.get_stack_dirty();
Ok(())
}
}
@ -258,17 +435,17 @@ mod tests {
use super::*;
#[test]
fn test_basic() {
drop(MemoryBlock::new(0x36300000000, 0x50000));
drop(MemoryBlock::new(0x36b00000000, 0x2000));
fn test_create() {
drop(MemoryBlock::new(AddressRange { start: 0x36300000000, size: 0x50000 }));
drop(MemoryBlock::new(AddressRange { start: 0x36b00000000, size: 0x2000 }));
{
let mut b = MemoryBlock::new(0x36100000000, 0x65000);
let mut b = MemoryBlock::new(AddressRange { start: 0x36100000000, size: 0x65000 });
b.activate();
b.deactivate();
b.enter();
}
{
let mut b = MemoryBlock::new(0x36e00000000, 0x5000);
let mut b = MemoryBlock::new(AddressRange { start: 0x36e00000000, size: 0x5000 });
b.activate();
b.activate();
let mut guard = b.enter();
@ -280,4 +457,18 @@ mod tests {
b.enter();
}
}
#[test]
fn test_dirty() -> SyscallResult {
unsafe {
let addr = AddressRange { start: 0x36f00000000, size: 0x10000 };
let mut b = MemoryBlock::new(addr);
let mut g = b.enter();
g.mmap_fixed(addr, Protection::RW)?;
let ptr = g.addr.slice_mut();
ptr[0x2003] = 5;
assert!(g.pages[2].dirty);
Ok(())
}
}
}

View File

@ -1,5 +1,6 @@
// Platform abstraction layer over mmap/etc. Doesn't do much checking, not meant for general consumption
use super::Protection;
use crate::*;
#[derive(Debug)]
pub struct Handle(usize);
@ -8,6 +9,7 @@ pub struct Handle(usize);
pub use win::*;
#[cfg(windows)]
mod win {
use std::mem::{size_of, zeroed};
use winapi::um::memoryapi::*;
use winapi::um::winnt::*;
use winapi::um::handleapi::*;
@ -49,17 +51,17 @@ mod win {
return Handle(INVALID_HANDLE_VALUE as usize);
}
pub fn map(handle: &Handle, start: usize, size: usize) -> bool {
pub fn map(handle: &Handle, addr: AddressRange) -> bool {
unsafe {
let res = MapViewOfFileEx(
handle.0 as *mut c_void,
FILE_MAP_ALL_ACCESS | FILE_MAP_EXECUTE,
0,
0,
size,
start as *mut c_void
addr.size,
addr.start as *mut c_void
);
if res == start as *mut c_void {
if res == addr.start as *mut c_void {
true
} else {
error();
@ -68,11 +70,11 @@ mod win {
}
}
pub unsafe fn unmap(start: usize, _size: usize) -> bool {
UnmapViewOfFile(start as *mut c_void) != 0
pub unsafe fn unmap(addr: AddressRange) -> bool {
UnmapViewOfFile(addr.start as *mut c_void) != 0
}
pub unsafe fn protect(start: usize, size: usize, prot: Protection) -> bool {
pub unsafe fn protect(addr: AddressRange, prot: Protection) -> bool {
let p = match prot {
Protection::None => PAGE_NOACCESS,
Protection::R => PAGE_READONLY,
@ -82,7 +84,28 @@ mod win {
Protection::RWStack => PAGE_READWRITE | PAGE_GUARD,
};
let mut old_protect: u32 = 0;
VirtualProtect(start as *mut c_void, size, p, &mut old_protect) != 0
VirtualProtect(addr.start as *mut c_void, addr.size, p, &mut old_protect) != 0
}
pub struct StackTripResult {
pub size: usize,
pub dirty: bool,
}
/// Return true if the memory was dirtied. Return size is effectively arbitrary; it may be larger
/// or smaller than you expected. DANGER: If called on memory that is not currently in RWStack mode,
/// it will generally return true even though that's not what you want.
pub unsafe fn get_stack_dirty(start: usize) -> Option<StackTripResult> {
let mut mbi = Box::new(zeroed::<MEMORY_BASIC_INFORMATION>());
let mbi_size = size_of::<MEMORY_BASIC_INFORMATION>();
if VirtualQuery(start as *const c_void, &mut *mbi, mbi_size) != mbi_size {
error();
None
} else {
Some(StackTripResult {
size: mbi.RegionSize,
dirty: mbi.Protect & PAGE_GUARD == 0,
})
}
}
}
@ -125,16 +148,16 @@ mod nix {
return Handle(-1i32 as usize);
}
pub fn map(handle: &Handle, start: usize, size: usize) -> bool {
pub fn map(handle: &Handle, addr: AddressRange) -> bool {
unsafe {
let res = mmap(start as *mut c_void,
size,
let res = mmap(addr.start as *mut c_void,
addr.size,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_SHARED | MAP_FIXED,
handle.0 as i32,
0
);
if res == start as *mut c_void {
if res == addr.start as *mut c_void {
true
} else {
error();
@ -143,11 +166,11 @@ mod nix {
}
}
pub unsafe fn unmap(start: usize, size: usize) -> bool {
munmap(start as *mut c_void, size) == 0
pub unsafe fn unmap(addr: AddressRange) -> bool {
munmap(addr.start as *mut c_void, addr.size) == 0
}
pub unsafe fn protect(start: usize, size: usize, prot: Protection) -> bool {
pub unsafe fn protect(addr: AddressRange, prot: Protection) -> bool {
let p = match prot {
Protection::None => PROT_NONE,
Protection::R => PROT_READ,
@ -156,7 +179,7 @@ mod nix {
Protection::RWX => PROT_READ | PROT_WRITE | PROT_EXEC,
Protection::RWStack => panic!("RWStack should not be passed to pal layer"),
};
mprotect(start as *mut c_void, size, p) == 0
mprotect(addr.start as *mut c_void, addr.size, p) == 0
}
}
@ -172,31 +195,32 @@ mod tests {
unsafe {
let size = 0x20000usize;
let start = 0x36a00000000usize;
let addr = AddressRange { start, size };
let handle = open(size).unwrap();
assert!(map(&handle, start, size));
assert!(protect(start, size, Protection::RW));
assert!(map(&handle, addr));
assert!(protect(addr, Protection::RW));
*((start + 0x14795) as *mut u8) = 42;
assert!(unmap(start, size));
assert!(unmap(addr));
assert!(map(&handle, start, size));
assert!(protect(start, size, Protection::R));
assert!(map(&handle, addr));
assert!(protect(addr, Protection::R));
assert_eq!(*((start + 0x14795) as *const u8), 42);
assert!(unmap(start + 0x14000, 0x2000));
assert!(unmap(addr));
assert!(map(&handle, start, size));
assert!(protect(start, size, Protection::RW));
assert!(map(&handle, addr));
assert!(protect(addr, Protection::RW));
*(start as *mut u8) = 0xc3; // RET
assert!(protect(start, size, Protection::RX));
assert!(protect(addr, Protection::RX));
transmute::<usize, extern fn() -> ()>(start)();
assert!(protect(start, size, Protection::RWX));
assert!(protect(addr, Protection::RWX));
*(start as *mut u8) = 0x90; // NOP
*((start + 1) as *mut u8) = 0xb0; // MOV AL
*((start + 2) as *mut u8) = 0x7b; // 123
*((start + 3) as *mut u8) = 0xc3; // RET
let i = transmute::<usize, extern fn() -> u8>(start)();
assert_eq!(i, 123);
assert!(unmap(start, size));
assert!(unmap(addr));
assert!(close(handle));
}

View File

@ -7,7 +7,7 @@ use super::*;
use lazy_static::lazy_static;
lazy_static! {
static ref global_data: Mutex<GlobalData> = Mutex::new(GlobalData {
static ref GLOBAL_DATA: Mutex<GlobalData> = Mutex::new(GlobalData {
initialized: false,
active_blocks: Vec::new(),
});
@ -18,8 +18,8 @@ struct GlobalData {
active_blocks: Vec<MemoryBlockRef>,
}
unsafe fn register(block: *mut MemoryBlock) {
let mut data = global_data.lock().unwrap();
pub unsafe fn register(block: *mut MemoryBlock) {
let mut data = GLOBAL_DATA.lock().unwrap();
if !data.initialized {
initialize();
data.initialized = true;
@ -27,8 +27,8 @@ unsafe fn register(block: *mut MemoryBlock) {
data.active_blocks.push(MemoryBlockRef(block));
}
unsafe fn unregister(block: *mut MemoryBlock) {
let mut data = global_data.lock().unwrap();
pub unsafe fn unregister(block: *mut MemoryBlock) {
let mut data = GLOBAL_DATA.lock().unwrap();
let pos = data.active_blocks.iter().position(|x| x.0 == block).unwrap();
data.active_blocks.remove(pos);
}
@ -39,29 +39,25 @@ enum TripResult {
}
unsafe fn trip(addr: usize) -> TripResult {
let data = global_data.lock().unwrap();
let data = GLOBAL_DATA.lock().unwrap();
let memory_block = match data.active_blocks
.iter()
.find(|x| addr >= (*x.0).start && addr < (*x.0).end) {
.find(|x| (*x.0).addr.contains(addr)) {
Some(x) => &mut *x.0,
None => return TripResult::NotHandled,
};
let page_start_addr = addr & !PAGEMASK;
let page = &mut memory_block.pages[(addr - memory_block.start) >> PAGESHIFT];
if !page.flags.contains(PageFlags::W) {
let page = &mut memory_block.pages[(addr - memory_block.addr.start) >> PAGESHIFT];
if !page.status.writable() {
return TripResult::NotHandled
}
if memory_block.sealed && match page.snapshot { Snapshot::None => true, _ => false } {
// take snapshot now
let mut snapshot = PageBlock::new();
let src = std::slice::from_raw_parts(page_start_addr as *const u8, PAGESIZE);
let dst = snapshot.slice_mut();
dst.copy_from_slice(src);
page.snapshot = Snapshot::Data(snapshot);
}
page.flags.insert(PageFlags::DIRTY);
let new_prot = if page.flags.contains(PageFlags::X) { Protection::RWX } else { Protection::RW };
assert!(pal::protect(page_start_addr, PAGESIZE, new_prot));
page.maybe_snapshot(page_start_addr);
page.dirty = true;
let new_prot = match &page.status {
PageAllocation::Allocated(p) => p,
PageAllocation::Free => panic!(),
};
assert!(pal::protect(AddressRange { start: page_start_addr, size: PAGESIZE }, *new_prot));
TripResult::Handled
}
@ -75,7 +71,7 @@ fn initialize() {
let p_record = &mut *(*p_info).ExceptionRecord;
let flags = p_record.ExceptionInformation[0];
if p_record.ExceptionCode != STATUS_ACCESS_VIOLATION // only trigger on access violations...
|| (flags & 1) != 0 { // ...due to a write attempts
|| (flags & 1) == 0 { // ...due to a write attempts
return EXCEPTION_CONTINUE_SEARCH
}
let fault_address = p_record.ExceptionInformation[1] as usize;
@ -91,50 +87,50 @@ fn initialize() {
}
#[cfg(unix)]
type sa_handler = unsafe extern fn(i32) -> ();
type SaHandler = unsafe extern fn(i32) -> ();
#[cfg(unix)]
type sa_sigaction = unsafe extern fn(i32, *const siginfo_t, *const ucontext_t) -> ();
type SaSigaction = unsafe extern fn(i32, *const siginfo_t, *const ucontext_t) -> ();
#[cfg(unix)]
use libc::*;
#[cfg(unix)]
static mut altstack: [u8; SIGSTKSZ] = [0; SIGSTKSZ];
static mut ALTSTACK: [u8; SIGSTKSZ] = [0; SIGSTKSZ];
#[cfg(unix)]
static mut sa_old: Option<Box<sigaction>> = None;
static mut SA_OLD: Option<Box<sigaction>> = None;
#[cfg(unix)]
fn initialize() {
use std::mem::{transmute, zeroed};
unsafe extern fn handler(sig: i32, info: *const siginfo_t, ucontext: *const ucontext_t) {
let faultAddress = (*info).si_addr() as usize;
let fault_address = (*info).si_addr() as usize;
let write = (*ucontext).uc_mcontext.gregs[REG_ERR as usize] & 2 != 0;
let rethrow = !write || match trip(faultAddress) {
let rethrow = !write || match trip(fault_address) {
TripResult::NotHandled => true,
_ => false
};
if rethrow {
if sa_old.as_ref().unwrap().sa_flags & SA_SIGINFO != 0 {
transmute::<usize, sa_sigaction>(sa_old.as_ref().unwrap().sa_sigaction)(sig, info, ucontext);
if SA_OLD.as_ref().unwrap().sa_flags & SA_SIGINFO != 0 {
transmute::<usize, SaSigaction>(SA_OLD.as_ref().unwrap().sa_sigaction)(sig, info, ucontext);
} else {
transmute::<usize, sa_handler>(sa_old.as_ref().unwrap().sa_sigaction)(sig);
transmute::<usize, SaHandler>(SA_OLD.as_ref().unwrap().sa_sigaction)(sig);
}
abort();
}
}
unsafe {
sa_old = Some(Box::new(zeroed::<sigaction>()));
SA_OLD = Some(Box::new(zeroed::<sigaction>()));
let ss = stack_t {
ss_flags: 0,
ss_sp: &mut altstack[0] as *mut u8 as *mut c_void,
ss_sp: &mut ALTSTACK[0] as *mut u8 as *mut c_void,
ss_size: SIGSTKSZ
};
assert!(sigaltstack(&ss, null_mut()) == 0, "sigaltstack failed");
let mut sa = sigaction {
sa_mask: zeroed::<sigset_t>(),
sa_sigaction: transmute::<sa_sigaction, usize>(handler),
sa_sigaction: transmute::<SaSigaction, usize>(handler),
sa_flags: SA_ONSTACK | SA_SIGINFO,
sa_restorer: None,
};
sigfillset(&mut sa.sa_mask);
assert!(sigaction(SIGSEGV, &sa, &mut **sa_old.as_mut().unwrap() as *mut sigaction) == 0, "sigaction failed");
assert!(sigaction(SIGSEGV, &sa, &mut **SA_OLD.as_mut().unwrap() as *mut sigaction) == 0, "sigaction failed");
}
}

View File

@ -0,0 +1,148 @@
// linux syscall related things, for use in the waterbox
// There are various crates that contain these, but they're #[cfg]'ed to the HOST system.
// We want exactly the ones that waterbox guest MUSL uses, exactly the way they're defined there
/// the result of a syscall in Rust-friendly form; OK or errno
pub type SyscallResult = Result<(), i32>;
/// map a syscall result as the kernel would return it
pub fn map_syscall_result(result: SyscallResult) -> isize {
match result {
Ok(()) => 0,
Err(i) => -i as isize,
}
}
pub const EPERM: i32 = 1;
pub const ENOENT: i32 = 2;
pub const ESRCH: i32 = 3;
pub const EINTR: i32 = 4;
pub const EIO: i32 = 5;
pub const ENXIO: i32 = 6;
pub const E2BIG: i32 = 7;
pub const ENOEXEC: i32 = 8;
pub const EBADF: i32 = 9;
pub const ECHILD: i32 = 10;
pub const EAGAIN: i32 = 11;
pub const ENOMEM: i32 = 12;
pub const EACCES: i32 = 13;
pub const EFAULT: i32 = 14;
pub const ENOTBLK: i32 = 15;
pub const EBUSY: i32 = 16;
pub const EEXIST: i32 = 17;
pub const EXDEV: i32 = 18;
pub const ENODEV: i32 = 19;
pub const ENOTDIR: i32 = 20;
pub const EISDIR: i32 = 21;
pub const EINVAL: i32 = 22;
pub const ENFILE: i32 = 23;
pub const EMFILE: i32 = 24;
pub const ENOTTY: i32 = 25;
pub const ETXTBSY: i32 = 26;
pub const EFBIG: i32 = 27;
pub const ENOSPC: i32 = 28;
pub const ESPIPE: i32 = 29;
pub const EROFS: i32 = 30;
pub const EMLINK: i32 = 31;
pub const EPIPE: i32 = 32;
pub const EDOM: i32 = 33;
pub const ERANGE: i32 = 34;
pub const EDEADLK: i32 = 35;
pub const ENAMETOOLONG: i32 = 36;
pub const ENOLCK: i32 = 37;
pub const ENOSYS: i32 = 38;
pub const ENOTEMPTY: i32 = 39;
pub const ELOOP: i32 = 40;
pub const EWOULDBLOCK: i32 = EAGAIN;
pub const ENOMSG: i32 = 42;
pub const EIDRM: i32 = 43;
pub const ECHRNG: i32 = 44;
pub const EL2NSYNC: i32 = 45;
pub const EL3HLT: i32 = 46;
pub const EL3RST: i32 = 47;
pub const ELNRNG: i32 = 48;
pub const EUNATCH: i32 = 49;
pub const ENOCSI: i32 = 50;
pub const EL2HLT: i32 = 51;
pub const EBADE: i32 = 52;
pub const EBADR: i32 = 53;
pub const EXFULL: i32 = 54;
pub const ENOANO: i32 = 55;
pub const EBADRQC: i32 = 56;
pub const EBADSLT: i32 = 57;
pub const EDEADLOCK: i32 = EDEADLK;
pub const EBFONT: i32 = 59;
pub const ENOSTR: i32 = 60;
pub const ENODATA: i32 = 61;
pub const ETIME: i32 = 62;
pub const ENOSR: i32 = 63;
pub const ENONET: i32 = 64;
pub const ENOPKG: i32 = 65;
pub const EREMOTE: i32 = 66;
pub const ENOLINK: i32 = 67;
pub const EADV: i32 = 68;
pub const ESRMNT: i32 = 69;
pub const ECOMM: i32 = 70;
pub const EPROTO: i32 = 71;
pub const EMULTIHOP: i32 = 72;
pub const EDOTDOT: i32 = 73;
pub const EBADMSG: i32 = 74;
pub const EOVERFLOW: i32 = 75;
pub const ENOTUNIQ: i32 = 76;
pub const EBADFD: i32 = 77;
pub const EREMCHG: i32 = 78;
pub const ELIBACC: i32 = 79;
pub const ELIBBAD: i32 = 80;
pub const ELIBSCN: i32 = 81;
pub const ELIBMAX: i32 = 82;
pub const ELIBEXEC: i32 = 83;
pub const EILSEQ: i32 = 84;
pub const ERESTART: i32 = 85;
pub const ESTRPIPE: i32 = 86;
pub const EUSERS: i32 = 87;
pub const ENOTSOCK: i32 = 88;
pub const EDESTADDRREQ: i32 = 89;
pub const EMSGSIZE: i32 = 90;
pub const EPROTOTYPE: i32 = 91;
pub const ENOPROTOOPT: i32 = 92;
pub const EPROTONOSUPPORT: i32 = 93;
pub const ESOCKTNOSUPPORT: i32 = 94;
pub const EOPNOTSUPP: i32 = 95;
pub const ENOTSUP: i32 = EOPNOTSUPP;
pub const EPFNOSUPPORT: i32 = 96;
pub const EAFNOSUPPORT: i32 = 97;
pub const EADDRINUSE: i32 = 98;
pub const EADDRNOTAVAIL: i32 = 99;
pub const ENETDOWN: i32 = 100;
pub const ENETUNREACH: i32 = 101;
pub const ENETRESET: i32 = 102;
pub const ECONNABORTED: i32 = 103;
pub const ECONNRESET: i32 = 104;
pub const ENOBUFS: i32 = 105;
pub const EISCONN: i32 = 106;
pub const ENOTCONN: i32 = 107;
pub const ESHUTDOWN: i32 = 108;
pub const ETOOMANYREFS: i32 = 109;
pub const ETIMEDOUT: i32 = 110;
pub const ECONNREFUSED: i32 = 111;
pub const EHOSTDOWN: i32 = 112;
pub const EHOSTUNREACH: i32 = 113;
pub const EALREADY: i32 = 114;
pub const EINPROGRESS: i32 = 115;
pub const ESTALE: i32 = 116;
pub const EUCLEAN: i32 = 117;
pub const ENOTNAM: i32 = 118;
pub const ENAVAIL: i32 = 119;
pub const EISNAM: i32 = 120;
pub const EREMOTEIO: i32 = 121;
pub const EDQUOT: i32 = 122;
pub const ENOMEDIUM: i32 = 123;
pub const EMEDIUMTYPE: i32 = 124;
pub const ECANCELED: i32 = 125;
pub const ENOKEY: i32 = 126;
pub const EKEYEXPIRED: i32 = 127;
pub const EKEYREVOKED: i32 = 128;
pub const EKEYREJECTED: i32 = 129;
pub const EOWNERDEAD: i32 = 130;
pub const ENOTRECOVERABLE: i32 = 131;
pub const ERFKILL: i32 = 132;
pub const EHWPOISON: i32 = 133;