Compare commits

..

2 Commits

Author SHA1 Message Date
arkpar dedcda99dd
Style 2019-07-04 18:05:41 +02:00
arkpar 2d2d1f49c3
Customizable allocator 2019-07-04 18:03:18 +02:00
33 changed files with 388 additions and 735 deletions

View File

@ -1,2 +0,0 @@
[target.armv7-unknown-linux-gnueabihf]
linker = "arm-linux-gnueabihf-gcc"

1
.gitignore vendored
View File

@ -3,4 +3,3 @@
**/*.rs.bk
Cargo.lock
spec/target
.idea

View File

@ -1,24 +1,25 @@
dist: xenial
dist: trusty
sudo: required
language:
- rust
- cpp
matrix:
fast_finish: true
include:
- rust: nightly
- rust: stable
- rust: stable
env: TARGET=armv7-unknown-linux-gnueabihf
rust:
- nightly
- stable
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- gcc-8
- g++-8
- cmake
env:
- CC=/usr/bin/gcc-8 CXX=/usr/bin/g++-8
install:
- if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then rustup target add wasm32-unknown-unknown; fi
- if [ -n "$TARGET" ]; then rustup target add "$TARGET" && sudo apt-get install --yes qemu-user-static; fi
- if [ "$TARGET" == "armv7-unknown-linux-gnueabihf" ]; then sudo apt-get install --yes crossbuild-essential-armhf && export QEMU_LD_PREFIX=/usr/arm-linux-gnueabihf; fi
- rustup component add rustfmt
- sudo apt-get install --yes cmake
script:
- cargo fmt --all -- --check
# Make sure nightly targets are not broken.
@ -26,11 +27,8 @@ script:
- if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then cargo check --benches --manifest-path=benches/Cargo.toml; fi
# Make sure `no_std` version checks.
- if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then cargo +nightly check --no-default-features --features core; fi
# Check that `vec_memory` feature works.
- cargo check --features vec_memory
- travis_wait 60 ./test.sh
- ./test.sh
- ./doc.sh
after_success: |
# Build documentation and deploy it to github pages.
[ $TRAVIS_BRANCH = master ] &&
@ -39,18 +37,7 @@ after_success: |
sudo pip install ghp-import &&
ghp-import -n target/doc &&
git push -fq https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages
cache:
# Don't use `cache: cargo` since it adds the `target` directory and that can be huge.
# Saving and loading this directory dwarfes actual compilation and test times. But what is more
# important, is that travis timeouts the build since the job doesn't produce any output for more
# than 10 minutes.
#
# So we just cache ~/.cargo directory
directories:
- /home/travis/.cargo
cache: cargo
before_cache:
# Travis can't cache files that are not readable by "others"
- chmod -R a+r $HOME/.cargo
# According to the Travis CI docs for building Rust project this is done by,
- rm -rf /home/travis/.cargo/registry

View File

@ -1,6 +1,6 @@
[package]
name = "wasmi"
version = "0.5.1"
version = "0.4.5"
authors = ["Nikolay Volf <nikvolf@gmail.com>", "Svyatoslav Nikolsky <svyatonik@yandex.ru>", "Sergey Pepyakin <s.pepyakin@gmail.com>"]
license = "MIT/Apache-2.0"
readme = "README.md"
@ -11,17 +11,16 @@ keywords = ["wasm", "webassembly", "bytecode", "interpreter"]
exclude = [ "/res/*", "/tests/*", "/fuzz/*", "/benches/*" ]
[dependencies]
wasmi-validation = { version = "0.2", path = "validation", default-features = false }
parity-wasm = { version = "0.40.1", default-features = false }
wasmi-validation = { version = "0.1", path = "validation", default-features = false }
parity-wasm = { version = "0.31", default-features = false }
hashbrown = { version = "0.1.8", optional = true }
memory_units = "0.3.0"
libm = { version = "0.1.2", optional = true }
num-rational = { version = "0.2.2", default-features = false }
num-traits = { version = "0.2.8", default-features = false }
[dev-dependencies]
assert_matches = "1.1"
rand = "0.4.2"
wabt = "0.9"
wabt = "0.6"
[features]
default = ["std"]
@ -29,23 +28,14 @@ default = ["std"]
std = [
"parity-wasm/std",
"wasmi-validation/std",
"num-rational/std",
"num-rational/bigint-std",
"num-traits/std"
]
# Enable for no_std support
# hashbrown only works on no_std
core = [
# `core` doesn't support vec_memory
"vec_memory",
"wasmi-validation/core",
"hashbrown/nightly",
"libm"
]
# Enforce using the linear memory implementation based on `Vec` instead of
# mmap on unix systems.
#
# Useful for tests and if you need to minimize unsafe usage at the cost of performance on some
# workloads.
vec_memory = []
[workspace]
members = ["validation"]

View File

@ -5,7 +5,7 @@
`wasmi` - a Wasm interpreter.
`wasmi` was conceived as a component of [parity-ethereum](https://github.com/paritytech/parity-ethereum) (ethereum-like contracts in wasm) and [substrate](https://github.com/paritytech/substrate). These projects are related to blockchain and require a high degree of correctness, even if that might be over conservative. This specifically means that we are not trying to be involved in any implementation of any of work-in-progress Wasm proposals. We are also trying to be as close as possible to the spec, which means we are trying to avoid features that is not directly supported by the spec. This means that it is flexible on the one hand and on the other hand there shouldn't be a problem migrating to another spec compliant execution engine.
`wasmi` was conceived as a component of [parity-ethereum](https://github.com/paritytech/parity-ethereum) (ethereum-like contracts in wasm) and [substrate](https://github.com/paritytech/substrate). These projects are related to blockchain and require a high degree of correctness, even if that might be over conservative. This specifically means that we are not trying to be involved in any implementation of any of work-in-progress Wasm proposals. We are also trying to be as close as possible to the spec, which means we are trying to avoid features that is not directly supported by the spec. This means that it is flexible on the one hand and on the other hand there shouldn't be a problem migrating to another spec compilant execution engine.
With all that said, `wasmi` should be a good option for initial prototyping.
@ -26,8 +26,8 @@ This crate supports `no_std` environments.
Enable the `core` feature and disable default features:
```toml
[dependencies]
wasmi = {
version = "*",
parity-wasm = {
version = "0.31",
default-features = false,
features = "core"
}

View File

@ -6,7 +6,7 @@ authors = ["Sergey Pepyakin <s.pepyakin@gmail.com>"]
[dependencies]
wasmi = { path = ".." }
assert_matches = "1.2"
wabt = "0.9"
wabt = "0.6"
[profile.bench]
debug = true

View File

@ -13,7 +13,7 @@ use wasmi::{ImportsBuilder, Module, ModuleInstance, NopExternals, RuntimeValue};
use test::Bencher;
// Load a module from a file.
fn load_from_file(filename: &str) -> Result<Module, Box<dyn error::Error>> {
fn load_from_file(filename: &str) -> Result<Module, Box<error::Error>> {
use std::io::prelude::*;
let mut file = File::open(filename)?;
let mut buf = Vec::new();

View File

@ -33,7 +33,7 @@ pub extern "C" fn prepare_tiny_keccak() -> *const TinyKeccakTestData {
}
#[no_mangle]
pub extern "C" fn bench_tiny_keccak(test_data: *mut TinyKeccakTestData) {
pub extern "C" fn bench_tiny_keccak(test_data: *const TinyKeccakTestData) {
unsafe {
let mut keccak = Keccak::new_keccak256();
keccak.update((*test_data).data);

View File

@ -10,7 +10,7 @@ cargo-fuzz = true
[dependencies]
wasmi = { path = ".." }
wabt = "0.9"
wabt = "0.6.0"
wasmparser = "0.14.1"
tempdir = "0.3.6"

View File

@ -7,4 +7,4 @@ authors = ["Sergey Pepyakin <s.pepyakin@gmail.com>"]
honggfuzz = "=0.5.9" # Strict equal since hfuzz requires dep and cmd versions to match.
wasmi = { path = ".." }
tempdir = "0.3.6"
wabt = "0.9"
wabt = "0.6.0"

View File

@ -1,14 +1,12 @@
use alloc::{
borrow::Cow,
rc::{Rc, Weak},
vec::Vec,
};
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::rc::{Rc, Weak};
use core::fmt;
use host::Externals;
use isa;
use module::ModuleInstance;
use parity_wasm::elements::Local;
use runner::{check_function_args, Interpreter, InterpreterState, StackRecycler};
use runner::{check_function_args, Interpreter, InterpreterState};
use types::ValueType;
use value::RuntimeValue;
use {Signature, Trap};
@ -142,7 +140,7 @@ impl FuncInstance {
check_function_args(func.signature(), &args)?;
match *func.as_internal() {
FuncInstanceInternal::Internal { .. } => {
let mut interpreter = Interpreter::new(func, args, None)?;
let mut interpreter = Interpreter::new(func, args)?;
interpreter.start_execution(externals)
}
FuncInstanceInternal::Host {
@ -152,34 +150,6 @@ impl FuncInstance {
}
}
/// Invoke this function using recycled stacks.
///
/// # Errors
///
/// Same as [`invoke`].
///
/// [`invoke`]: #method.invoke
pub fn invoke_with_stack<E: Externals>(
func: &FuncRef,
args: &[RuntimeValue],
externals: &mut E,
stack_recycler: &mut StackRecycler,
) -> Result<Option<RuntimeValue>, Trap> {
check_function_args(func.signature(), &args)?;
match *func.as_internal() {
FuncInstanceInternal::Internal { .. } => {
let mut interpreter = Interpreter::new(func, args, Some(stack_recycler))?;
let return_value = interpreter.start_execution(externals);
stack_recycler.recycle(interpreter);
return_value
}
FuncInstanceInternal::Host {
ref host_func_index,
..
} => externals.invoke_index(*host_func_index, args.into()),
}
}
/// Invoke the function, get a resumable handle. This handle can then be used to [`start_execution`]. If a
/// Host trap happens, caller can use [`resume_execution`] to feed the expected return value back in, and then
/// continue the execution.
@ -196,13 +166,12 @@ impl FuncInstance {
/// [`resume_execution`]: struct.FuncInvocation.html#method.resume_execution
pub fn invoke_resumable<'args>(
func: &FuncRef,
args: impl Into<Cow<'args, [RuntimeValue]>>,
args: &'args [RuntimeValue],
) -> Result<FuncInvocation<'args>, Trap> {
let args = args.into();
check_function_args(func.signature(), &args)?;
match *func.as_internal() {
FuncInstanceInternal::Internal { .. } => {
let interpreter = Interpreter::new(func, &*args, None)?;
let interpreter = Interpreter::new(func, args)?;
Ok(FuncInvocation {
kind: FuncInvocationKind::Internal(interpreter),
})
@ -259,7 +228,7 @@ pub struct FuncInvocation<'args> {
enum FuncInvocationKind<'args> {
Internal(Interpreter),
Host {
args: Cow<'args, [RuntimeValue]>,
args: &'args [RuntimeValue],
host_func_index: usize,
finished: bool,
},
@ -306,7 +275,7 @@ impl<'args> FuncInvocation<'args> {
return Err(ResumableError::AlreadyStarted);
}
*finished = true;
Ok(externals.invoke_index(*host_func_index, args.as_ref().into())?)
Ok(externals.invoke_index(*host_func_index, args.clone().into())?)
}
}
}

View File

@ -114,11 +114,11 @@ pub trait HostError: 'static + ::core::fmt::Display + ::core::fmt::Debug + Send
}
}
impl dyn HostError {
impl HostError {
/// Attempt to downcast this `HostError` to a concrete type by reference.
pub fn downcast_ref<T: HostError>(&self) -> Option<&T> {
if self.__private_get_type_id__() == TypeId::of::<T>() {
unsafe { Some(&*(self as *const dyn HostError as *const T)) }
unsafe { Some(&*(self as *const HostError as *const T)) }
} else {
None
}
@ -128,7 +128,7 @@ impl dyn HostError {
/// reference.
pub fn downcast_mut<T: HostError>(&mut self) -> Option<&mut T> {
if self.__private_get_type_id__() == TypeId::of::<T>() {
unsafe { Some(&mut *(self as *mut dyn HostError as *mut T)) }
unsafe { Some(&mut *(self as *mut HostError as *mut T)) }
} else {
None
}
@ -257,5 +257,5 @@ mod tests {
}
// Tests that `HostError` trait is object safe.
fn _host_error_is_object_safe(_: &dyn HostError) {}
fn _host_error_is_object_safe(_: &HostError) {}
}

View File

@ -1,4 +1,10 @@
use alloc::{collections::BTreeMap, string::String};
#[allow(unused_imports)]
use alloc::prelude::v1::*;
#[cfg(not(feature = "std"))]
use hashbrown::HashMap;
#[cfg(feature = "std")]
use std::collections::HashMap;
use func::FuncRef;
use global::GlobalRef;
@ -100,7 +106,7 @@ pub trait ImportResolver {
/// [`ImportResolver`]: trait.ImportResolver.html
/// [`ModuleImportResolver`]: trait.ModuleImportResolver.html
pub struct ImportsBuilder<'a> {
modules: BTreeMap<String, &'a dyn ModuleImportResolver>,
modules: HashMap<String, &'a ModuleImportResolver>,
}
impl<'a> Default for ImportsBuilder<'a> {
@ -113,7 +119,7 @@ impl<'a> ImportsBuilder<'a> {
/// Create an empty `ImportsBuilder`.
pub fn new() -> ImportsBuilder<'a> {
ImportsBuilder {
modules: BTreeMap::new(),
modules: HashMap::new(),
}
}
@ -121,7 +127,7 @@ impl<'a> ImportsBuilder<'a> {
pub fn with_resolver<N: Into<String>>(
mut self,
name: N,
resolver: &'a dyn ModuleImportResolver,
resolver: &'a ModuleImportResolver,
) -> Self {
self.modules.insert(name.into(), resolver);
self
@ -130,15 +136,11 @@ impl<'a> ImportsBuilder<'a> {
/// Register an resolver by a name.
///
/// Mutable borrowed version.
pub fn push_resolver<N: Into<String>>(
&mut self,
name: N,
resolver: &'a dyn ModuleImportResolver,
) {
pub fn push_resolver<N: Into<String>>(&mut self, name: N, resolver: &'a ModuleImportResolver) {
self.modules.insert(name.into(), resolver);
}
fn resolver(&self, name: &str) -> Option<&dyn ModuleImportResolver> {
fn resolver(&self, name: &str) -> Option<&ModuleImportResolver> {
self.modules.get(name).cloned()
}
}

View File

@ -67,7 +67,8 @@
//! - Reserved immediates are ignored for `call_indirect`, `current_memory`, `grow_memory`.
//!
use alloc::vec::Vec;
#[allow(unused_imports)]
use alloc::prelude::v1::*;
/// Should we keep a value before "discarding" a stack frame?
///

View File

@ -96,6 +96,8 @@
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
//// alloc is required in no_std
#![cfg_attr(not(feature = "std"), feature(alloc, alloc_prelude))]
#[cfg(not(feature = "std"))]
#[macro_use]
@ -112,16 +114,15 @@ extern crate assert_matches;
#[cfg(test)]
extern crate wabt;
#[cfg(not(feature = "std"))]
extern crate hashbrown;
extern crate memory_units as memory_units_crate;
extern crate parity_wasm;
extern crate wasmi_validation as validation;
use alloc::{
boxed::Box,
string::{String, ToString},
vec::Vec,
};
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use core::fmt;
#[cfg(feature = "std")]
use std::error;
@ -129,9 +130,6 @@ use std::error;
#[cfg(not(feature = "std"))]
extern crate libm;
extern crate num_rational;
extern crate num_traits;
/// Error type which can be thrown by wasm code or by host environment.
///
/// Under some conditions, wasm execution may produce a `Trap`, which immediately aborts execution.
@ -240,7 +238,7 @@ pub enum TrapKind {
/// Typically returned from an implementation of [`Externals`].
///
/// [`Externals`]: trait.Externals.html
Host(Box<dyn host::HostError>),
Host(Box<host::HostError>),
}
impl TrapKind {
@ -274,7 +272,7 @@ pub enum Error {
/// Trap.
Trap(Trap),
/// Custom embedder error.
Host(Box<dyn host::HostError>),
Host(Box<host::HostError>),
}
impl Error {
@ -286,7 +284,7 @@ impl Error {
/// [`Host`]: enum.Error.html#variant.Host
/// [`Trap`]: enum.Error.html#variant.Trap
/// [`TrapKind::Host`]: enum.TrapKind.html#variant.Host
pub fn as_host_error(&self) -> Option<&dyn host::HostError> {
pub fn as_host_error(&self) -> Option<&host::HostError> {
match *self {
Error::Host(ref host_err) => Some(&**host_err),
Error::Trap(ref trap) => match *trap.kind() {
@ -406,7 +404,6 @@ pub use self::host::{Externals, HostError, NopExternals, RuntimeArgs};
pub use self::imports::{ImportResolver, ImportsBuilder, ModuleImportResolver};
pub use self::memory::{MemoryInstance, MemoryRef, LINEAR_MEMORY_PAGE_SIZE};
pub use self::module::{ExternVal, ModuleInstance, ModuleRef, NotStartedModuleRef};
pub use self::runner::{StackRecycler, DEFAULT_CALL_STACK_LIMIT, DEFAULT_VALUE_STACK_LIMIT};
pub use self::table::{TableInstance, TableRef};
pub use self::types::{GlobalDescriptor, MemoryDescriptor, Signature, TableDescriptor, ValueType};
pub use self::value::{Error as ValueError, FromRuntimeValue, LittleEndianConvert, RuntimeValue};

View File

@ -1,8 +1,10 @@
use alloc::{rc::Rc, string::ToString, vec::Vec};
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::rc::Rc;
use core::{
cell::{Cell, RefCell},
cmp, fmt,
ops::Range,
cmp, fmt, slice,
ops::{Range, Deref, DerefMut},
u32,
};
use memory_units::{Bytes, Pages, RoundUpTo};
@ -10,16 +12,6 @@ use parity_wasm::elements::ResizableLimits;
use value::LittleEndianConvert;
use Error;
#[cfg(all(unix, not(feature = "vec_memory")))]
#[path = "mmap_bytebuf.rs"]
mod bytebuf;
#[cfg(any(not(unix), feature = "vec_memory"))]
#[path = "vec_bytebuf.rs"]
mod bytebuf;
use self::bytebuf::ByteBuf;
/// Size of a page of [linear memory][`MemoryInstance`] - 64KiB.
///
/// The size of a memory is always a integer multiple of a page size.
@ -43,6 +35,22 @@ impl ::core::ops::Deref for MemoryRef {
}
}
pub trait Allocator: Deref<Target=[u8]> + DerefMut<Target=[u8]> {
fn resize(&mut self, usize, value: u8);
}
impl Allocator for Vec<u8> {
fn resize(&mut self, size: usize, value: u8) {
Vec::resize(self, size, value)
}
}
impl Allocator for &'static mut [u8] {
fn resize(&mut self, _size: usize, _value: u8) {
// no op
}
}
/// Runtime representation of a linear memory (or `memory` for short).
///
/// A memory is a contiguous, mutable array of raw bytes. Wasm code can load and store values
@ -60,10 +68,13 @@ pub struct MemoryInstance {
/// Memory limits.
limits: ResizableLimits,
/// Linear memory buffer with lazy allocation.
buffer: RefCell<ByteBuf>,
buffer: RefCell<Box<dyn Allocator>>,
initial: Pages,
current_size: Cell<usize>,
maximum: Option<Pages>,
lowest_used: Cell<u32>,
buffer_ptr: Cell<*mut u8>,
buffer_size: Cell<usize>,
}
impl fmt::Debug for MemoryInstance {
@ -133,24 +144,62 @@ impl MemoryInstance {
validation::validate_memory(initial_u32, maximum_u32).map_err(Error::Memory)?;
}
let memory = MemoryInstance::new(initial, maximum)?;
let allocator = Box::new(Vec::with_capacity(4096));
let memory = MemoryInstance::new(initial, maximum, allocator);
Ok(MemoryRef(Rc::new(memory)))
}
/// Create a memory instance using specified raw memory. The memory address must
/// be aligned to a page size. The size must be a multiple of page size.
///
/// # Errors
///
/// Returns `Err` if:
///
/// - `buffer` is not aligned to page size.
/// - `size` is not a multiple of page size.
pub fn with_memory(buffer: *mut u8, size: usize) -> Result<MemoryRef, Error> {
if (buffer as usize) % LINEAR_MEMORY_PAGE_SIZE.0 != 0 {
return Err(Error::Memory(format!(
"Buffer address must be aligned to page size",
)))
}
if size % LINEAR_MEMORY_PAGE_SIZE.0 != 0 {
return Err(Error::Memory(format!(
"Size {} must be multiple of page size",
size,
)))
}
let pages: Pages = Bytes(size).round_up_to();
if pages > Pages(validation::LINEAR_MEMORY_MAX_PAGES as usize) {
return Err(Error::Memory(format!(
"Memory size must be at most {} pages",
validation::LINEAR_MEMORY_MAX_PAGES
)));
}
let allocator = unsafe { Box::new(slice::from_raw_parts_mut(buffer, size)) };
let memory = MemoryInstance::new(pages, Some(pages), allocator);
Ok(MemoryRef(Rc::new(memory)))
}
/// Create new linear memory instance.
fn new(initial: Pages, maximum: Option<Pages>) -> Result<Self, Error> {
fn new(initial: Pages, maximum: Option<Pages>, mut allocator: Box<Allocator>) -> Self {
let limits = ResizableLimits::new(initial.0 as u32, maximum.map(|p| p.0 as u32));
let initial_size: Bytes = initial.into();
Ok(MemoryInstance {
let ptr = allocator.as_mut_ptr();
MemoryInstance {
limits: limits,
buffer: RefCell::new(
ByteBuf::new(initial_size.0).map_err(|err| Error::Memory(err.to_string()))?,
),
buffer: RefCell::new(allocator),
initial: initial,
current_size: Cell::new(initial_size.0),
maximum: maximum,
})
lowest_used: Cell::new(u32::max_value()),
buffer_ptr: Cell::new(ptr),
buffer_size: Cell::new(0),
}
}
/// Return linear memory limits.
@ -171,6 +220,16 @@ impl MemoryInstance {
self.maximum
}
/// Returns lowest offset ever written or `u32::max_value()` if none.
pub fn lowest_used(&self) -> u32 {
self.lowest_used.get()
}
/// Resets tracked lowest offset.
pub fn reset_lowest_used(&self, addr: u32) {
self.lowest_used.set(addr)
}
/// Returns current linear memory size.
///
/// Maximum memory size cannot exceed `65536` pages or 4GiB.
@ -191,18 +250,21 @@ impl MemoryInstance {
/// );
/// ```
pub fn current_size(&self) -> Pages {
Bytes(self.buffer.borrow().len()).round_up_to()
Bytes(self.current_size.get()).round_up_to()
}
/// Returns current used memory size in bytes.
/// This is one more than the highest memory address that had been written to.
pub fn used_size(&self) -> Bytes {
Bytes(self.buffer_size.get())
}
/// Get value from memory at given offset.
pub fn get_value<T: LittleEndianConvert>(&self, offset: u32) -> Result<T, Error> {
let mut buffer = self.buffer.borrow_mut();
let region =
self.checked_region(&mut buffer, offset as usize, ::core::mem::size_of::<T>())?;
Ok(
T::from_little_endian(&buffer.as_slice_mut()[region.range()])
.expect("Slice size is checked"),
)
self.checked_region(offset as usize, ::core::mem::size_of::<T>())?;
let mem = unsafe { slice::from_raw_parts_mut(self.buffer_ptr.get(), self.buffer_size.get()) };
Ok(T::from_little_endian(&mem[region.range()]).expect("Slice size is checked"))
}
/// Copy data from memory at given offset.
@ -212,10 +274,9 @@ impl MemoryInstance {
///
/// [`get_into`]: #method.get_into
pub fn get(&self, offset: u32, size: usize) -> Result<Vec<u8>, Error> {
let mut buffer = self.buffer.borrow_mut();
let region = self.checked_region(&mut buffer, offset as usize, size)?;
Ok(buffer.as_slice_mut()[region.range()].to_vec())
let region = self.checked_region(offset as usize, size)?;
let mem = unsafe { slice::from_raw_parts_mut(self.buffer_ptr.get(), self.buffer_size.get()) };
Ok(mem[region.range()].to_vec())
}
/// Copy data from given offset in the memory into `target` slice.
@ -224,33 +285,37 @@ impl MemoryInstance {
///
/// Returns `Err` if the specified region is out of bounds.
pub fn get_into(&self, offset: u32, target: &mut [u8]) -> Result<(), Error> {
let mut buffer = self.buffer.borrow_mut();
let region = self.checked_region(&mut buffer, offset as usize, target.len())?;
target.copy_from_slice(&buffer.as_slice_mut()[region.range()]);
let region = self.checked_region(offset as usize, target.len())?;
let mem = unsafe { slice::from_raw_parts_mut(self.buffer_ptr.get(), self.buffer_size.get()) };
target.copy_from_slice(&mem[region.range()]);
Ok(())
}
/// Copy data in the memory at given offset.
pub fn set(&self, offset: u32, value: &[u8]) -> Result<(), Error> {
let mut buffer = self.buffer.borrow_mut();
let range = self
.checked_region(&mut buffer, offset as usize, value.len())?
.checked_region(offset as usize, value.len())?
.range();
buffer.as_slice_mut()[range].copy_from_slice(value);
if offset < self.lowest_used.get() {
self.lowest_used.set(offset);
}
let mem = unsafe { slice::from_raw_parts_mut(self.buffer_ptr.get(), self.buffer_size.get()) };
mem[range].copy_from_slice(value);
Ok(())
}
/// Copy value in the memory at given offset.
pub fn set_value<T: LittleEndianConvert>(&self, offset: u32, value: T) -> Result<(), Error> {
let mut buffer = self.buffer.borrow_mut();
let range = self
.checked_region(&mut buffer, offset as usize, ::core::mem::size_of::<T>())?
.checked_region(offset as usize, ::core::mem::size_of::<T>())?
.range();
value.into_little_endian(&mut buffer.as_slice_mut()[range]);
if offset < self.lowest_used.get() {
self.lowest_used.set(offset);
}
let mem = unsafe { slice::from_raw_parts_mut(self.buffer_ptr.get(), self.buffer_size.get()) };
value.into_little_endian(&mut mem[range]);
Ok(())
}
@ -284,19 +349,12 @@ impl MemoryInstance {
}
let new_buffer_length: Bytes = new_size.into();
self.buffer
.borrow_mut()
.realloc(new_buffer_length.0)
.map_err(|err| Error::Memory(err.to_string()))?;
self.current_size.set(new_buffer_length.0);
Ok(size_before_grow)
}
fn checked_region(
&self,
buffer: &mut ByteBuf,
offset: usize,
size: usize,
) -> Result<CheckedRegion, Error> {
@ -307,12 +365,19 @@ impl MemoryInstance {
))
})?;
if end > buffer.len() {
if end <= self.current_size.get() && self.buffer_size.get() < end {
let mut allocator = self.buffer.borrow_mut();
allocator.resize(end, 0);
self.buffer_ptr.set(allocator.as_mut_ptr());
self.buffer_size.set(allocator.len());
}
if end > self.buffer_size.get() {
return Err(Error::Memory(format!(
"trying to access region [{}..{}] in memory [0..{}]",
offset,
end,
buffer.len()
self.buffer_size.get(),
)));
}
@ -324,7 +389,6 @@ impl MemoryInstance {
fn checked_region_pair(
&self,
buffer: &mut ByteBuf,
offset1: usize,
size1: usize,
offset2: usize,
@ -344,21 +408,29 @@ impl MemoryInstance {
))
})?;
if end1 > buffer.len() {
let max = cmp::max(end1, end2);
if max <= self.current_size.get() && self.buffer_size.get() < max {
let mut allocator = self.buffer.borrow_mut();
allocator.resize(max, 0);
self.buffer_ptr.set(allocator.as_mut_ptr());
self.buffer_size.set(allocator.len());
}
if end1 > self.buffer_size.get() {
return Err(Error::Memory(format!(
"trying to access region [{}..{}] in memory [0..{}]",
offset1,
end1,
buffer.len()
self.buffer_size.get(),
)));
}
if end2 > buffer.len() {
if end2 > self.buffer_size.get() {
return Err(Error::Memory(format!(
"trying to access region [{}..{}] in memory [0..{}]",
offset2,
end2,
buffer.len()
self.buffer_size.get(),
)));
}
@ -382,15 +454,17 @@ impl MemoryInstance {
///
/// Returns `Err` if either of specified regions is out of bounds.
pub fn copy(&self, src_offset: usize, dst_offset: usize, len: usize) -> Result<(), Error> {
let mut buffer = self.buffer.borrow_mut();
let (read_region, write_region) =
self.checked_region_pair(&mut buffer, src_offset, len, dst_offset, len)?;
self.checked_region_pair(src_offset, len, dst_offset, len)?;
if dst_offset < self.lowest_used.get() as usize {
self.lowest_used.set(dst_offset as u32);
}
unsafe {
::core::ptr::copy(
buffer.as_slice()[read_region.range()].as_ptr(),
buffer.as_slice_mut()[write_region.range()].as_mut_ptr(),
self.buffer_ptr.get().offset(read_region.offset as isize),
self.buffer_ptr.get().offset(write_region.offset as isize),
len,
)
}
@ -415,10 +489,9 @@ impl MemoryInstance {
dst_offset: usize,
len: usize,
) -> Result<(), Error> {
let mut buffer = self.buffer.borrow_mut();
let (read_region, write_region) =
self.checked_region_pair(&mut buffer, src_offset, len, dst_offset, len)?;
self.checked_region_pair(src_offset, len, dst_offset, len)?;
if read_region.intersects(&write_region) {
return Err(Error::Memory(format!(
@ -426,10 +499,14 @@ impl MemoryInstance {
)));
}
if dst_offset < self.lowest_used.get() as usize {
self.lowest_used.set(dst_offset as u32);
}
unsafe {
::core::ptr::copy_nonoverlapping(
buffer.as_slice()[read_region.range()].as_ptr(),
buffer.as_slice_mut()[write_region.range()].as_mut_ptr(),
self.buffer_ptr.get().offset(read_region.offset as isize),
self.buffer_ptr.get().offset(write_region.offset as isize),
len,
)
}
@ -453,19 +530,24 @@ impl MemoryInstance {
return src.copy(src_offset, dst_offset, len);
}
// Because memory references point to different memory instances, it is safe to `borrow_mut`
// both buffers at once (modulo `with_direct_access_mut`).
let mut src_buffer = src.buffer.borrow_mut();
let mut dst_buffer = dst.buffer.borrow_mut();
let src_range = src
.checked_region(&mut src_buffer, src_offset, len)?
.checked_region(src_offset, len)?
.range();
let dst_range = dst
.checked_region(&mut dst_buffer, dst_offset, len)?
.checked_region(dst_offset, len)?
.range();
dst_buffer.as_slice_mut()[dst_range].copy_from_slice(&src_buffer.as_slice()[src_range]);
if dst_offset < dst.lowest_used.get() as usize {
dst.lowest_used.set(dst_offset as u32);
}
unsafe {
::core::ptr::copy_nonoverlapping(
src.buffer_ptr.get().offset(src_range.start as isize),
dst.buffer_ptr.get().offset(dst_range.start as isize),
len,
)
}
Ok(())
}
@ -478,12 +560,18 @@ impl MemoryInstance {
///
/// Returns `Err` if the specified region is out of bounds.
pub fn clear(&self, offset: usize, new_val: u8, len: usize) -> Result<(), Error> {
let mut buffer = self.buffer.borrow_mut();
let range = self.checked_region(offset, len)?.range();
let range = self.checked_region(&mut buffer, offset, len)?.range();
if offset < self.lowest_used.get() as usize {
self.lowest_used.set(offset as u32);
}
for val in &mut buffer.as_slice_mut()[range] {
*val = new_val
unsafe {
::core::ptr::write_bytes(
self.buffer_ptr.get().offset(range.start as isize),
new_val,
len,
);
}
Ok(())
}
@ -497,28 +585,18 @@ impl MemoryInstance {
self.clear(offset, 0, len)
}
/// Set every byte in the entire linear memory to 0, preserving its size.
///
/// Might be useful for some optimization shenanigans.
pub fn erase(&self) -> Result<(), Error> {
self.buffer
.borrow_mut()
.erase()
.map_err(|err| Error::Memory(err.to_string()))
}
/// Provides direct access to the underlying memory buffer.
///
/// # Panics
///
/// Any call that requires write access to memory (such as [`set`], [`clear`], etc) made within
/// the closure will panic.
/// the closure will panic. Note that the buffer size may be arbitraty. Proceed with caution.
///
/// [`set`]: #method.get
/// [`clear`]: #method.set
pub fn with_direct_access<R, F: FnOnce(&[u8]) -> R>(&self, f: F) -> R {
let buf = self.buffer.borrow();
f(buf.as_slice())
f(&*buf)
}
/// Provides direct mutable access to the underlying memory buffer.
@ -526,13 +604,15 @@ impl MemoryInstance {
/// # Panics
///
/// Any calls that requires either read or write access to memory (such as [`get`], [`set`], [`copy`], etc) made
/// within the closure will panic. Proceed with caution.
/// within the closure will panic. Note that the buffer size may be arbitraty.
/// The closure may however resize it. Proceed with caution.
///
/// [`get`]: #method.get
/// [`set`]: #method.set
pub fn with_direct_access_mut<R, F: FnOnce(&mut [u8]) -> R>(&self, f: F) -> R {
/// [`copy`]: #method.copy
pub fn with_direct_access_mut<R, F: FnOnce(&mut Allocator) -> R>(&self, f: F) -> R {
let mut buf = self.buffer.borrow_mut();
f(buf.as_slice_mut())
f(&mut **buf)
}
}
@ -546,7 +626,22 @@ mod tests {
#[test]
fn alloc() {
let mut fixtures = vec![
#[cfg(target_pointer_width = "64")]
let fixtures = &[
(0, None, true),
(0, Some(0), true),
(1, None, true),
(1, Some(1), true),
(0, Some(1), true),
(1, Some(0), false),
(0, Some(65536), true),
(65536, Some(65536), true),
(65536, Some(0), false),
(65536, None, true),
];
#[cfg(target_pointer_width = "32")]
let fixtures = &[
(0, None, true),
(0, Some(0), true),
(1, None, true),
@ -555,13 +650,6 @@ mod tests {
(1, Some(0), false),
];
#[cfg(target_pointer_width = "64")]
fixtures.extend(&[
(65536, Some(65536), true),
(65536, Some(0), false),
(65536, None, true),
]);
for (index, &(initial, maybe_max, expected_ok)) in fixtures.iter().enumerate() {
let initial: Pages = Pages(initial);
let maximum: Option<Pages> = maybe_max.map(|m| Pages(m));
@ -582,7 +670,7 @@ mod tests {
}
fn create_memory(initial_content: &[u8]) -> MemoryInstance {
let mem = MemoryInstance::new(Pages(1), Some(Pages(1))).unwrap();
let mem = MemoryInstance::new(Pages(1), Some(Pages(1)), Box::new(Vec::new()));
mem.set(0, initial_content)
.expect("Successful initialize the memory");
mem
@ -695,7 +783,7 @@ mod tests {
#[test]
fn get_into() {
let mem = MemoryInstance::new(Pages(1), None).unwrap();
let mem = MemoryInstance::new(Pages(1), None, Box::new(Vec::new()));
mem.set(6, &[13, 17, 129])
.expect("memory set should not fail");
@ -711,19 +799,11 @@ mod tests {
let mem = MemoryInstance::alloc(Pages(1), None).unwrap();
mem.set(100, &[0]).expect("memory set should not fail");
mem.with_direct_access_mut(|buf| {
assert_eq!(
buf.len(),
65536,
"the buffer length is expected to be 1 page long"
);
assert_eq!(buf.len(), 101);
buf[..10].copy_from_slice(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
});
mem.with_direct_access(|buf| {
assert_eq!(
buf.len(),
65536,
"the buffer length is expected to be 1 page long"
);
assert_eq!(buf.len(), 101);
assert_eq!(&buf[..10], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
});
}

View File

@ -1,189 +0,0 @@
//! An implementation of a `ByteBuf` based on virtual memory.
//!
//! This implementation uses `mmap` on POSIX systems (and should use `VirtualAlloc` on windows).
//! There are possibilities to improve the performance for the reallocating case by reserving
//! memory up to maximum. This might be a problem for systems that don't have a lot of virtual
//! memory (i.e. 32-bit platforms).
use std::ptr::{self, NonNull};
use std::slice;
struct Mmap {
/// The pointer that points to the start of the mapping.
///
/// This value doesn't change after creation.
ptr: NonNull<u8>,
/// The length of this mapping.
///
/// Cannot be more than `isize::max_value()`. This value doesn't change after creation.
len: usize,
}
impl Mmap {
/// Create a new mmap mapping
///
/// Returns `Err` if:
/// - `len` should not exceed `isize::max_value()`
/// - `len` should be greater than 0.
/// - `mmap` returns an error (almost certainly means out of memory).
fn new(len: usize) -> Result<Self, &'static str> {
if len > isize::max_value() as usize {
return Err("`len` should not exceed `isize::max_value()`");
}
if len == 0 {
return Err("`len` should be greater than 0");
}
let ptr_or_err = unsafe {
// Safety Proof:
// There are not specific safety proofs are required for this call, since the call
// by itself can't invoke any safety problems (however, misusing its result can).
libc::mmap(
// `addr` - let the system to choose the address at which to create the mapping.
ptr::null_mut(),
// the length of the mapping in bytes.
len,
// `prot` - protection flags: READ WRITE !EXECUTE
libc::PROT_READ | libc::PROT_WRITE,
// `flags`
// `MAP_ANON` - mapping is not backed by any file and initial contents are
// initialized to zero.
// `MAP_PRIVATE` - the mapping is private to this process.
libc::MAP_ANON | libc::MAP_PRIVATE,
// `fildes` - a file descriptor. Pass -1 as this is required for some platforms
// when the `MAP_ANON` is passed.
-1,
// `offset` - offset from the file.
0,
)
};
match ptr_or_err {
// With the current parameters, the error can only be returned in case of insufficient
// memory.
libc::MAP_FAILED => Err("mmap returned an error"),
_ => {
let ptr = NonNull::new(ptr_or_err as *mut u8).ok_or("mmap returned 0")?;
Ok(Self { ptr, len })
}
}
}
fn as_slice(&self) -> &[u8] {
unsafe {
// Safety Proof:
// - Aliasing guarantees of `self.ptr` are not violated since `self` is the only owner.
// - This pointer was allocated for `self.len` bytes and thus is a valid slice.
// - `self.len` doesn't change throughout the lifetime of `self`.
// - The value is returned valid for the duration of lifetime of `self`.
// `self` cannot be destroyed while the returned slice is alive.
// - `self.ptr` is of `NonNull` type and thus `.as_ptr()` can never return NULL.
// - `self.len` cannot be larger than `isize::max_value()`.
slice::from_raw_parts(self.ptr.as_ptr(), self.len)
}
}
fn as_slice_mut(&mut self) -> &mut [u8] {
unsafe {
// Safety Proof:
// - See the proof for `Self::as_slice`
// - Additionally, it is not possible to obtain two mutable references for `self.ptr`
slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len)
}
}
}
impl Drop for Mmap {
fn drop(&mut self) {
let ret_val = unsafe {
// Safety proof:
// - `self.ptr` was allocated by a call to `mmap`.
// - `self.len` was saved at the same time and it doesn't change throughout the lifetime
// of `self`.
libc::munmap(self.ptr.as_ptr() as *mut libc::c_void, self.len)
};
// There is no reason for `munmap` to fail to deallocate a private annonymous mapping
// allocated by `mmap`.
// However, for the cases when it actually fails prefer to fail, in order to not leak
// and exhaust the virtual memory.
assert_eq!(ret_val, 0, "munmap failed");
}
}
pub struct ByteBuf {
mmap: Option<Mmap>,
}
impl ByteBuf {
pub fn new(len: usize) -> Result<Self, &'static str> {
let mmap = if len == 0 {
None
} else {
Some(Mmap::new(len)?)
};
Ok(Self { mmap })
}
pub fn realloc(&mut self, new_len: usize) -> Result<(), &'static str> {
let new_mmap = if new_len == 0 {
None
} else {
let mut new_mmap = Mmap::new(new_len)?;
if let Some(cur_mmap) = self.mmap.take() {
let src = cur_mmap.as_slice();
let dst = new_mmap.as_slice_mut();
let amount = src.len().min(dst.len());
dst[..amount].copy_from_slice(&src[..amount]);
}
Some(new_mmap)
};
self.mmap = new_mmap;
Ok(())
}
pub fn len(&self) -> usize {
self.mmap.as_ref().map(|m| m.len).unwrap_or(0)
}
pub fn as_slice(&self) -> &[u8] {
self.mmap.as_ref().map(|m| m.as_slice()).unwrap_or(&[])
}
pub fn as_slice_mut(&mut self) -> &mut [u8] {
self.mmap
.as_mut()
.map(|m| m.as_slice_mut())
.unwrap_or(&mut [])
}
pub fn erase(&mut self) -> Result<(), &'static str> {
let len = self.len();
if len > 0 {
// The order is important.
//
// 1. First we clear, and thus drop, the current mmap if any.
// 2. And then we create a new one.
//
// Otherwise we double the peak memory consumption.
self.mmap = None;
self.mmap = Some(Mmap::new(len)?);
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::ByteBuf;
const PAGE_SIZE: usize = 4096;
// This is not required since wasm memories can only grow but nice to have.
#[test]
fn byte_buf_shrink() {
let mut byte_buf = ByteBuf::new(PAGE_SIZE * 3).unwrap();
byte_buf.realloc(PAGE_SIZE * 2).unwrap();
}
}

View File

@ -1,39 +0,0 @@
//! An implementation of `ByteBuf` based on a plain `Vec`.
use alloc::vec::Vec;
pub struct ByteBuf {
buf: Vec<u8>,
}
impl ByteBuf {
pub fn new(len: usize) -> Result<Self, &'static str> {
let mut buf = Vec::new();
buf.resize(len, 0u8);
Ok(Self { buf })
}
pub fn realloc(&mut self, new_len: usize) -> Result<(), &'static str> {
self.buf.resize(new_len, 0u8);
Ok(())
}
pub fn len(&self) -> usize {
self.buf.len()
}
pub fn as_slice(&self) -> &[u8] {
self.buf.as_ref()
}
pub fn as_slice_mut(&mut self) -> &mut [u8] {
self.buf.as_mut()
}
pub fn erase(&mut self) -> Result<(), &'static str> {
for v in &mut self.buf {
*v = 0;
}
Ok(())
}
}

View File

@ -1,14 +1,14 @@
use alloc::{
borrow::ToOwned,
rc::Rc,
string::{String, ToString},
vec::Vec,
};
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::rc::Rc;
use core::cell::RefCell;
use core::fmt;
use Trap;
use alloc::collections::BTreeMap;
#[cfg(not(feature = "std"))]
use hashbrown::HashMap;
#[cfg(feature = "std")]
use std::collections::HashMap;
use core::cell::Ref;
use func::{FuncBody, FuncInstance, FuncRef};
@ -18,7 +18,6 @@ use imports::ImportResolver;
use memory::MemoryRef;
use memory_units::Pages;
use parity_wasm::elements::{External, InitExpr, Instruction, Internal, ResizableLimits, Type};
use runner::StackRecycler;
use table::TableRef;
use types::{GlobalDescriptor, MemoryDescriptor, TableDescriptor};
use validation::{DEFAULT_MEMORY_INDEX, DEFAULT_TABLE_INDEX};
@ -162,7 +161,7 @@ pub struct ModuleInstance {
funcs: RefCell<Vec<FuncRef>>,
memories: RefCell<Vec<MemoryRef>>,
globals: RefCell<Vec<GlobalRef>>,
exports: RefCell<BTreeMap<String, ExternVal>>,
exports: RefCell<HashMap<String, ExternVal>>,
}
impl ModuleInstance {
@ -173,7 +172,7 @@ impl ModuleInstance {
tables: RefCell::new(Vec::new()),
memories: RefCell::new(Vec::new()),
globals: RefCell::new(Vec::new()),
exports: RefCell::new(BTreeMap::new()),
exports: RefCell::new(HashMap::new()),
}
}
@ -421,11 +420,7 @@ impl ModuleInstance {
.map(|es| es.entries())
.unwrap_or(&[])
{
let offset = element_segment
.offset()
.as_ref()
.expect("passive segments are rejected due to validation");
let offset_val = match eval_init_expr(offset, &module_ref) {
let offset_val = match eval_init_expr(element_segment.offset(), &module_ref) {
RuntimeValue::I32(v) => v as u32,
_ => panic!("Due to validation elem segment offset should evaluate to i32"),
};
@ -454,11 +449,7 @@ impl ModuleInstance {
}
for data_segment in module.data_section().map(|ds| ds.entries()).unwrap_or(&[]) {
let offset = data_segment
.offset()
.as_ref()
.expect("passive segments are rejected due to validation");
let offset_val = match eval_init_expr(offset, &module_ref) {
let offset_val = match eval_init_expr(data_segment.offset(), &module_ref) {
RuntimeValue::I32(v) => v as u32,
_ => panic!("Due to validation data segment offset should evaluate to i32"),
};
@ -634,43 +625,21 @@ impl ModuleInstance {
args: &[RuntimeValue],
externals: &mut E,
) -> Result<Option<RuntimeValue>, Error> {
let func_instance = self.func_by_name(func_name)?;
FuncInstance::invoke(&func_instance, args, externals).map_err(|t| Error::Trap(t))
}
/// Invoke exported function by a name using recycled stacks.
///
/// # Errors
///
/// Same as [`invoke_export`].
///
/// [`invoke_export`]: #method.invoke_export
pub fn invoke_export_with_stack<E: Externals>(
&self,
func_name: &str,
args: &[RuntimeValue],
externals: &mut E,
stack_recycler: &mut StackRecycler,
) -> Result<Option<RuntimeValue>, Error> {
let func_instance = self.func_by_name(func_name)?;
FuncInstance::invoke_with_stack(&func_instance, args, externals, stack_recycler)
.map_err(|t| Error::Trap(t))
}
fn func_by_name(&self, func_name: &str) -> Result<FuncRef, Error> {
let extern_val = self
.export_by_name(func_name)
.ok_or_else(|| Error::Function(format!("Module doesn't have export {}", func_name)))?;
match extern_val {
ExternVal::Func(func_instance) => Ok(func_instance),
unexpected => Err(Error::Function(format!(
let func_instance = match extern_val {
ExternVal::Func(func_instance) => func_instance,
unexpected => {
return Err(Error::Function(format!(
"Export {} is not a function, but {:?}",
func_name, unexpected
))),
)));
}
};
FuncInstance::invoke(&func_instance, args, externals).map_err(|t| Error::Trap(t))
}
/// Find export by a name.

View File

@ -1,4 +1,5 @@
use alloc::{string::String, vec::Vec};
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use parity_wasm::elements::{BlockType, FuncBody, Instruction};
@ -251,14 +252,13 @@ impl Compiler {
);
self.sink.emit_br_nez(target);
}
BrTable(ref br_table_data) => {
BrTable(ref table, default) => {
// At this point, the condition value is at the top of the stack.
// But at the point of actual jump the condition will already be
// popped off.
let value_stack_height = context.value_stack.len().saturating_sub(1);
let targets = br_table_data
.table
let targets = table
.iter()
.map(|depth| {
require_target(
@ -270,7 +270,7 @@ impl Compiler {
})
.collect::<Result<Vec<_>, _>>();
let default_target = require_target(
br_table_data.default,
default,
value_stack_height,
&context.frame_stack,
&self.label_stack,

View File

@ -1,4 +1,5 @@
use alloc::vec::Vec;
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use crate::{
isa,

View File

@ -1,4 +1,5 @@
use alloc::{boxed::Box, vec::Vec};
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use core::fmt;
use core::ops;
use core::{u32, usize};
@ -17,10 +18,10 @@ use value::{
};
use {Signature, Trap, TrapKind, ValueType};
/// Maximum number of bytes on the value stack.
pub const DEFAULT_VALUE_STACK_LIMIT: usize = 1024 * 1024;
/// Maximum number of entries in value stack.
pub const DEFAULT_VALUE_STACK_LIMIT: usize = (1024 * 1024) / ::core::mem::size_of::<RuntimeValue>();
/// Maximum number of levels on the call stack.
// TODO: Make these parameters changeble.
pub const DEFAULT_CALL_STACK_LIMIT: usize = 64 * 1024;
/// This is a wrapper around u64 to allow us to treat runtime values as a tag-free `u64`
@ -165,18 +166,14 @@ enum RunResult {
/// Function interpreter.
pub struct Interpreter {
value_stack: ValueStack,
call_stack: CallStack,
call_stack: Vec<FunctionContext>,
return_type: Option<ValueType>,
state: InterpreterState,
}
impl Interpreter {
pub fn new(
func: &FuncRef,
args: &[RuntimeValue],
mut stack_recycler: Option<&mut StackRecycler>,
) -> Result<Interpreter, Trap> {
let mut value_stack = StackRecycler::recreate_value_stack(&mut stack_recycler);
pub fn new(func: &FuncRef, args: &[RuntimeValue]) -> Result<Interpreter, Trap> {
let mut value_stack = ValueStack::with_limit(DEFAULT_VALUE_STACK_LIMIT);
for &arg in args {
let arg = arg.into();
value_stack.push(arg).map_err(
@ -186,7 +183,7 @@ impl Interpreter {
)?;
}
let mut call_stack = StackRecycler::recreate_call_stack(&mut stack_recycler);
let mut call_stack = Vec::new();
let initial_frame = FunctionContext::new(func.clone());
call_stack.push(initial_frame);
@ -281,14 +278,14 @@ impl Interpreter {
match function_return {
RunResult::Return => {
if self.call_stack.is_empty() {
if self.call_stack.last().is_none() {
// This was the last frame in the call stack. This means we
// are done executing.
return Ok(());
}
}
RunResult::NestedCall(nested_func) => {
if self.call_stack.is_full() {
if self.call_stack.len() + 1 >= DEFAULT_CALL_STACK_LIMIT {
return Err(TrapKind::StackOverflow.into());
}
@ -1289,8 +1286,14 @@ impl FunctionContext {
debug_assert!(!self.is_initialized);
let num_locals = locals.iter().map(|l| l.count() as usize).sum();
let locals = vec![Default::default(); num_locals];
value_stack.extend(num_locals)?;
// TODO: Replace with extend.
for local in locals {
value_stack
.push(local)
.map_err(|_| TrapKind::StackOverflow)?;
}
self.is_initialized = true;
Ok(())
@ -1360,6 +1363,16 @@ struct ValueStack {
}
impl ValueStack {
fn with_limit(limit: usize) -> ValueStack {
let mut buf = Vec::new();
buf.resize(limit, RuntimeValueInternal(0));
ValueStack {
buf: buf.into_boxed_slice(),
sp: 0,
}
}
#[inline]
fn drop_keep(&mut self, drop_keep: isa::DropKeep) {
if drop_keep.keep == isa::Keep::Single {
@ -1436,126 +1449,8 @@ impl ValueStack {
Ok(())
}
fn extend(&mut self, len: usize) -> Result<(), TrapKind> {
let cells = self
.buf
.get_mut(self.sp..self.sp + len)
.ok_or_else(|| TrapKind::StackOverflow)?;
for cell in cells {
*cell = Default::default();
}
self.sp += len;
Ok(())
}
#[inline]
fn len(&self) -> usize {
self.sp
}
}
struct CallStack {
buf: Vec<FunctionContext>,
limit: usize,
}
impl CallStack {
fn push(&mut self, ctx: FunctionContext) {
self.buf.push(ctx);
}
fn pop(&mut self) -> Option<FunctionContext> {
self.buf.pop()
}
fn is_empty(&self) -> bool {
self.buf.is_empty()
}
fn is_full(&self) -> bool {
self.buf.len() + 1 >= self.limit
}
}
/// Used to recycle stacks instead of allocating them repeatedly.
pub struct StackRecycler {
value_stack_buf: Option<Box<[RuntimeValueInternal]>>,
value_stack_limit: usize,
call_stack_buf: Option<Vec<FunctionContext>>,
call_stack_limit: usize,
}
impl StackRecycler {
/// Limit stacks created by this recycler to
/// - `value_stack_limit` bytes for values and
/// - `call_stack_limit` levels for calls.
pub fn with_limits(value_stack_limit: usize, call_stack_limit: usize) -> Self {
Self {
value_stack_buf: None,
value_stack_limit,
call_stack_buf: None,
call_stack_limit,
}
}
/// Clears any values left on the stack to avoid
/// leaking them to future export invocations.
///
/// This is a secondary defense to prevent modules from
/// exploiting faulty stack handling in the interpreter.
///
/// Do note that there are additional channels that
/// can leak information into an untrusted module.
pub fn clear(&mut self) {
if let Some(buf) = &mut self.value_stack_buf {
for cell in buf.iter_mut() {
*cell = RuntimeValueInternal(0);
}
}
}
fn recreate_value_stack(this: &mut Option<&mut Self>) -> ValueStack {
let limit = this
.as_ref()
.map_or(DEFAULT_VALUE_STACK_LIMIT, |this| this.value_stack_limit)
/ ::core::mem::size_of::<RuntimeValueInternal>();
let buf = this
.as_mut()
.and_then(|this| this.value_stack_buf.take())
.unwrap_or_else(|| {
let mut buf = Vec::new();
buf.reserve_exact(limit);
buf.resize(limit, RuntimeValueInternal(0));
buf.into_boxed_slice()
});
ValueStack { buf, sp: 0 }
}
fn recreate_call_stack(this: &mut Option<&mut Self>) -> CallStack {
let limit = this
.as_ref()
.map_or(DEFAULT_CALL_STACK_LIMIT, |this| this.call_stack_limit);
let buf = this
.as_mut()
.and_then(|this| this.call_stack_buf.take())
.unwrap_or_default();
CallStack { buf, limit }
}
pub(crate) fn recycle(&mut self, mut interpreter: Interpreter) {
interpreter.call_stack.buf.clear();
self.value_stack_buf = Some(interpreter.value_stack.buf);
self.call_stack_buf = Some(interpreter.call_stack.buf);
}
}
impl Default for StackRecycler {
fn default() -> Self {
Self::with_limits(DEFAULT_VALUE_STACK_LIMIT, DEFAULT_CALL_STACK_LIMIT)
}
}

View File

@ -1,4 +1,6 @@
use alloc::{rc::Rc, vec::Vec};
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::rc::Rc;
use core::cell::RefCell;
use core::fmt;
use core::u32;

View File

@ -285,7 +285,7 @@ fn resume_call_host_func() {
let export = instance.export_by_name("test").unwrap();
let func_instance = export.as_func().unwrap();
let mut invocation = FuncInstance::invoke_resumable(&func_instance, &[][..]).unwrap();
let mut invocation = FuncInstance::invoke_resumable(&func_instance, &[]).unwrap();
let result = invocation.start_execution(&mut env);
match result {
Err(ResumableError::Trap(_)) => {}
@ -330,7 +330,7 @@ fn resume_call_host_func_type_mismatch() {
let export = instance.export_by_name("test").unwrap();
let func_instance = export.as_func().unwrap();
let mut invocation = FuncInstance::invoke_resumable(&func_instance, &[][..]).unwrap();
let mut invocation = FuncInstance::invoke_resumable(&func_instance, &[]).unwrap();
let result = invocation.start_execution(&mut env);
match result {
Err(ResumableError::Trap(_)) => {}

View File

@ -366,17 +366,8 @@ impl WrapInto<F32> for F64 {
}
macro_rules! impl_try_truncate_into {
(@primitive $from: ident, $into: ident, $to_primitive:path) => {
($from: ident, $into: ident) => {
impl TryTruncateInto<$into, TrapKind> for $from {
#[cfg(feature = "std")]
fn try_truncate_into(self) -> Result<$into, TrapKind> {
// Casting from a float to an integer will round the float towards zero
num_rational::BigRational::from_float(self)
.map(|val| val.to_integer())
.and_then(|val| $to_primitive(&val))
.ok_or(TrapKind::InvalidConversionToInt)
}
#[cfg(not(feature = "std"))]
fn try_truncate_into(self) -> Result<$into, TrapKind> {
// Casting from a float to an integer will round the float towards zero
// NOTE: currently this will cause Undefined Behavior if the rounded value cannot be represented by the
@ -395,7 +386,7 @@ macro_rules! impl_try_truncate_into {
}
}
};
(@wrapped $from:ident, $intermediate:ident, $into:ident) => {
($from:ident, $intermediate:ident, $into:ident) => {
impl TryTruncateInto<$into, TrapKind> for $from {
fn try_truncate_into(self) -> Result<$into, TrapKind> {
$intermediate::from(self).try_truncate_into()
@ -404,22 +395,22 @@ macro_rules! impl_try_truncate_into {
};
}
impl_try_truncate_into!(@primitive f32, i32, num_traits::cast::ToPrimitive::to_i32);
impl_try_truncate_into!(@primitive f32, i64, num_traits::cast::ToPrimitive::to_i64);
impl_try_truncate_into!(@primitive f64, i32, num_traits::cast::ToPrimitive::to_i32);
impl_try_truncate_into!(@primitive f64, i64, num_traits::cast::ToPrimitive::to_i64);
impl_try_truncate_into!(@primitive f32, u32, num_traits::cast::ToPrimitive::to_u32);
impl_try_truncate_into!(@primitive f32, u64, num_traits::cast::ToPrimitive::to_u64);
impl_try_truncate_into!(@primitive f64, u32, num_traits::cast::ToPrimitive::to_u32);
impl_try_truncate_into!(@primitive f64, u64, num_traits::cast::ToPrimitive::to_u64);
impl_try_truncate_into!(@wrapped F32, f32, i32);
impl_try_truncate_into!(@wrapped F32, f32, i64);
impl_try_truncate_into!(@wrapped F64, f64, i32);
impl_try_truncate_into!(@wrapped F64, f64, i64);
impl_try_truncate_into!(@wrapped F32, f32, u32);
impl_try_truncate_into!(@wrapped F32, f32, u64);
impl_try_truncate_into!(@wrapped F64, f64, u32);
impl_try_truncate_into!(@wrapped F64, f64, u64);
impl_try_truncate_into!(f32, i32);
impl_try_truncate_into!(f32, i64);
impl_try_truncate_into!(f64, i32);
impl_try_truncate_into!(f64, i64);
impl_try_truncate_into!(f32, u32);
impl_try_truncate_into!(f32, u64);
impl_try_truncate_into!(f64, u32);
impl_try_truncate_into!(f64, u64);
impl_try_truncate_into!(F32, f32, i32);
impl_try_truncate_into!(F32, f32, i64);
impl_try_truncate_into!(F64, f64, i32);
impl_try_truncate_into!(F64, f64, i64);
impl_try_truncate_into!(F32, f32, u32);
impl_try_truncate_into!(F32, f32, u64);
impl_try_truncate_into!(F64, f64, u32);
impl_try_truncate_into!(F64, f64, u64);
macro_rules! impl_extend_into {
($from:ident, $into:ident) => {
@ -837,6 +828,15 @@ impl_integer!(u32);
impl_integer!(i64);
impl_integer!(u64);
// Use std float functions in std environment.
// And libm's implementation in no_std
#[cfg(feature = "std")]
macro_rules! call_math {
($op:ident, $e:expr, $fXX:ident, $FXXExt:ident) => {
$fXX::$op($e)
};
}
#[cfg(not(feature = "std"))]
macro_rules! call_math {
($op:ident, $e:expr, $fXX:ident, $FXXExt:ident) => {
::libm::$FXXExt::$op($e)

12
test.sh
View File

@ -2,18 +2,8 @@
set -eux
EXTRA_ARGS=""
if [ -n "${TARGET-}" ]; then
# Tests build in debug mode are prohibitively
# slow when ran under emulation so that
# e.g. Travis CI will hit timeouts.
EXTRA_ARGS="--release --target=${TARGET}"
export RUSTFLAGS="--cfg debug_assertions"
fi
cd $(dirname $0)
time cargo test --all ${EXTRA_ARGS}
time cargo test --all
cd -

View File

@ -18,7 +18,6 @@ fn spec_to_runtime_value(val: Value<u32, u64>) -> RuntimeValue {
Value::I64(v) => RuntimeValue::I64(v),
Value::F32(v) => RuntimeValue::F32(v.into()),
Value::F64(v) => RuntimeValue::F64(v.into()),
Value::V128(_) => panic!("v128 is not supported"),
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "wasmi-validation"
version = "0.2.0"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
edition = "2018"
license = "MIT/Apache-2.0"
@ -8,7 +8,8 @@ repository = "https://github.com/paritytech/wasmi"
description = "Wasm code validator"
[dependencies]
parity-wasm = { version = "0.40.1", default-features = false }
parity-wasm = { version = "0.31", default-features = false }
hashbrown = { version = "0.1.8", optional = true }
[dev-dependencies]
assert_matches = "1.1"
@ -16,4 +17,6 @@ assert_matches = "1.1"
[features]
default = ["std"]
std = ["parity-wasm/std"]
core = []
core = [
"hashbrown/nightly"
]

View File

@ -1,5 +1,6 @@
use crate::Error;
use alloc::vec::Vec;
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use parity_wasm::elements::{
BlockType, FunctionType, GlobalType, MemoryType, TableType, ValueType,
};

View File

@ -1,3 +1,6 @@
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use crate::{
context::ModuleContext, stack::StackWithLimit, util::Locals, Error, FuncValidator,
DEFAULT_MEMORY_INDEX, DEFAULT_TABLE_INDEX,
@ -266,8 +269,8 @@ impl<'a> FunctionValidationContext<'a> {
BrIf(depth) => {
self.validate_br_if(depth)?;
}
BrTable(ref br_table_data) => {
self.validate_br_table(&*br_table_data.table, br_table_data.default)?;
BrTable(ref table, default) => {
self.validate_br_table(table, default)?;
make_top_frame_polymorphic(&mut self.value_stack, &mut self.frame_stack);
}
Return => {

View File

@ -2,6 +2,8 @@
// #![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
//// alloc is required in no_std
#![cfg_attr(not(feature = "std"), feature(alloc, alloc_prelude))]
#[cfg(not(feature = "std"))]
#[macro_use]
@ -19,15 +21,21 @@ pub const DEFAULT_TABLE_INDEX: u32 = 0;
/// Maximal number of pages that a wasm instance supports.
pub const LINEAR_MEMORY_MAX_PAGES: u32 = 65536;
use alloc::{string::String, vec::Vec};
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use core::fmt;
#[cfg(feature = "std")]
use std::error;
#[cfg(not(feature = "std"))]
use hashbrown::HashSet;
#[cfg(feature = "std")]
use std::collections::HashSet;
use self::context::ModuleContextBuilder;
use parity_wasm::elements::{
BlockType, ExportEntry, External, FuncBody, GlobalEntry, GlobalType, InitExpr, Instruction,
Internal, MemoryType, Module, ResizableLimits, TableType, Type, ValueType,
BlockType, External, FuncBody, GlobalEntry, GlobalType, InitExpr, Instruction, Internal,
MemoryType, Module, ResizableLimits, TableType, Type, ValueType,
};
pub mod context;
@ -242,21 +250,13 @@ pub fn validate_module<V: Validator>(module: &Module) -> Result<V::Output, Error
// validate export section
if let Some(export_section) = module.export_section() {
let mut export_names = export_section
.entries()
.iter()
.map(ExportEntry::field)
.collect::<Vec<_>>();
export_names.sort_unstable();
for (fst, snd) in export_names.iter().zip(export_names.iter().skip(1)) {
if fst == snd {
return Err(Error(format!("duplicate export {}", fst)));
}
}
let mut export_names = HashSet::with_capacity(export_section.entries().len());
for export in export_section.entries() {
// HashSet::insert returns false if item already in set.
let duplicate = export_names.insert(export.field()) == false;
if duplicate {
return Err(Error(format!("duplicate export {}", export.field())));
}
match *export.internal() {
Internal::Function(function_index) => {
context.require_function(function_index)?;
@ -319,11 +319,7 @@ pub fn validate_module<V: Validator>(module: &Module) -> Result<V::Output, Error
if let Some(data_section) = module.data_section() {
for data_segment in data_section.entries() {
context.require_memory(data_segment.index())?;
let offset = data_segment
.offset()
.as_ref()
.ok_or_else(|| Error("passive memory segments are not supported".into()))?;
let init_ty = expr_const_type(&offset, context.globals())?;
let init_ty = expr_const_type(data_segment.offset(), context.globals())?;
if init_ty != ValueType::I32 {
return Err(Error("segment offset should return I32".into()));
}
@ -334,11 +330,8 @@ pub fn validate_module<V: Validator>(module: &Module) -> Result<V::Output, Error
if let Some(element_section) = module.elements_section() {
for element_segment in element_section.entries() {
context.require_table(element_segment.index())?;
let offset = element_segment
.offset()
.as_ref()
.ok_or_else(|| Error("passive element segments are not supported".into()))?;
let init_ty = expr_const_type(&offset, context.globals())?;
let init_ty = expr_const_type(element_segment.offset(), context.globals())?;
if init_ty != ValueType::I32 {
return Err(Error("segment offset should return I32".into()));
}

View File

@ -1,4 +1,5 @@
use alloc::{string::String, vec::Vec};
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use core::fmt;
#[cfg(feature = "std")]

View File

@ -1,5 +1,6 @@
use crate::Error;
use alloc::string::String;
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use parity_wasm::elements::{Local, ValueType};
#[cfg(test)]