From a0776876c137d5de3c9b4801589cab6156dc44ef Mon Sep 17 00:00:00 2001 From: Sergey Pepyakin Date: Wed, 3 Jul 2019 13:24:52 +0200 Subject: [PATCH] Guard with feature. --- Cargo.toml | 6 ++ src/memory/{mmap.rs => mmap_bytebuf.rs} | 0 src/memory/mod.rs | 11 +- src/memory/rust_alloc.rs | 104 ------------------- src/memory/{vec_backed.rs => vec_bytebuf.rs} | 0 5 files changed, 15 insertions(+), 106 deletions(-) rename src/memory/{mmap.rs => mmap_bytebuf.rs} (100%) delete mode 100644 src/memory/rust_alloc.rs rename src/memory/{vec_backed.rs => vec_bytebuf.rs} (100%) diff --git a/Cargo.toml b/Cargo.toml index a28469c..6705a79 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,6 +38,12 @@ core = [ "wasmi-validation/core", "libm" ] +# Enforce using the linear memory implementation based on `Vec` instead of +# mmap on unix systems. +# +# Useful for tests and if you need to minimize unsafe usage at the cost of performance on some +# workloads. +vec_memory = [] [workspace] members = ["validation"] diff --git a/src/memory/mmap.rs b/src/memory/mmap_bytebuf.rs similarity index 100% rename from src/memory/mmap.rs rename to src/memory/mmap_bytebuf.rs diff --git a/src/memory/mod.rs b/src/memory/mod.rs index ddbb9c3..21ffd8e 100644 --- a/src/memory/mod.rs +++ b/src/memory/mod.rs @@ -69,8 +69,15 @@ impl fmt::Debug for MemoryInstance { } } -mod mmap; -use self::mmap::ByteBuf; +#[cfg(all(unix, not(feature="vec_memory")))] +#[path="mmap_bytebuf.rs"] +mod bytebuf; + +#[cfg(any(not(unix), feature="vec_memory"))] +#[path="vec_bytebuf.rs"] +mod bytebuf; + +use self::bytebuf::ByteBuf; // mod rust_alloc as byte_buf; // use self::rust_alloc::ByteBuf; diff --git a/src/memory/rust_alloc.rs b/src/memory/rust_alloc.rs deleted file mode 100644 index bde6df8..0000000 --- a/src/memory/rust_alloc.rs +++ /dev/null @@ -1,104 +0,0 @@ -//! An implementation of a `ByteBuf` based on Rust's `GlobalAlloc`. -//! -//! The performance of this is really depends on the underlying allocator implementation, -//! specifically on `alloc_zeroed`. On macOS, for example, it calls to `bzero` which -//! can ruin the performance for some workloads. - -use std::alloc::{System, Layout, GlobalAlloc}; -use std::{slice, ptr}; - -pub struct ByteBuf { - // If the `len` is 0, this would store a dangling pointer but not `null`. - ptr: *mut u8, - len: usize, -} - -impl ByteBuf { - pub fn new(len: usize) -> Self { - let ptr = if len == 0 { - // Craft a pointer which is not null, but - ptr::NonNull::dangling().as_ptr() - } else { - let ptr = unsafe { - // TODO: proof - System.alloc_zeroed(Self::layout(len)) - }; - - // TODO: proof - assert!(!ptr.is_null()); - - ptr - }; - - Self { - ptr, - len, - } - } - - pub fn realloc(&mut self, new_len: usize) { - let new_ptr = if self.len == 0 { - // special case, when the memory wasn't allocated before. - // Alignment of byte is 1. - // TODO: proof - let ptr = unsafe { - // TODO: proof - System.alloc_zeroed(Self::layout(new_len)) - }; - - // TODO: proof - assert!(!ptr.is_null()); - - ptr - } else { - // TODO: proof - let cur_layout = Self::layout(self.len); - let new_ptr = unsafe { - System.realloc(self.ptr, cur_layout, new_len) - }; - assert!(!new_ptr.is_null()); - - unsafe { - let new_area = new_ptr.offset(self.len as isize); - ptr::write_bytes(new_area, 0, new_len - self.len); - } - - new_ptr - }; - - self.ptr = new_ptr; - self.len = new_len; - } - - pub fn len(&self) -> usize { - self.len - } - - pub fn as_slice(&self) -> &[u8] { - unsafe { - // - slice::from_raw_parts(self.ptr, self.len) - } - } - - pub fn as_slice_mut(&mut self) -> &mut [u8] { - unsafe { - // TODO: zero sized. - slice::from_raw_parts_mut(self.ptr, self.len) - } - } - - fn layout(len: usize) -> Layout { - Layout::from_size_align(len, 1).expect("") - } -} - -impl Drop for ByteBuf { - fn drop(&mut self) { - if self.len != 0 { - unsafe { - System.dealloc(self.ptr, Self::layout(self.len)) - } - } - } -} diff --git a/src/memory/vec_backed.rs b/src/memory/vec_bytebuf.rs similarity index 100% rename from src/memory/vec_backed.rs rename to src/memory/vec_bytebuf.rs