Compare commits

...

20 Commits

Author SHA1 Message Date
Cadey Ratio 0b6dd64219 fix no_std building 2019-10-24 10:29:04 -04:00
adam-rhebo d2ea44e37c Avoid temporary allocations during function context initialization (#217)
* Avoid temporary allocation when push locals during function invocation.

* Extend value stack for all locals at once.
2019-10-22 17:23:25 +02:00
Sergei Pepyakin f19e1c27fc
Fix tiny_keccak (#215) 2019-09-28 19:05:17 +02:00
Sergei Pepyakin 59ab1c8d78
Don't use `cache: cargo` in Travis CI's config (#213) 2019-09-26 14:13:29 +02:00
Sergei Pepyakin e6bdaf76f6
Bump wabt up to 0.9. (#212) 2019-09-26 13:18:57 +02:00
Pierre Krieger 390f4b2c4a Use a Cow for the resumable parameters (#210)
* Use a Cow for the resumable parameters

* Try fixing tests
2019-09-09 12:34:49 +02:00
Sergei Pepyakin 08c09adbf2
Bump wasmi-validation (#209) 2019-09-05 23:49:30 +02:00
Sergei Pepyakin 990e6698cb
Bump wasmi (#208) 2019-09-05 23:26:48 +02:00
DemiMarie-parity 7b1e5820c3 Update parity-wasm (#207) 2019-09-05 22:59:10 +02:00
thiolliere 9d998c7289 Update README.md (#205) 2019-08-27 22:20:17 +02:00
Sergei Pepyakin b1ea069c4a
Update parity-wasm (#198) 2019-07-17 14:24:36 +03:00
Sergei Pepyakin b67af25899
Apply cargo-fix on wasmi (#191)
* cargo-fix wasmi

* fmt

* Remove allow_failures

* Add dyn in benches

* Fix nightly
2019-07-10 17:45:06 +03:00
NikVolf 57cc6c6a3d bump to 0.5 2019-07-09 18:45:31 +03:00
Sergey Pepyakin 1bf3cbe5d0 Bump version to 0.4.6. 2019-07-09 15:55:24 +02:00
Sergei Pepyakin 1a6e5b30de
Use mmap for allocation (#190) 2019-07-08 13:53:04 +02:00
adam-rhebo f29f301e6e Make clearing value stack between export invocations optional (#188)
This avoids the main overhead of repeated export invocations by making
it optional to clear the value stack after each interpreter run.

This is especially useful if different exports of the same module are
invoked repeated so that no unintended information leaks are possible.
2019-06-21 11:03:58 +02:00
adam-rhebo 7fe6ef4e35 Add ARMv7 as CI test target (#186)
* Add ARMv7 as CI test target

* Avoid UB in conversions from floating point

When truncating floating point values to integer values, we need to
avoid undefined behavior if the argument does not fit into the target
type which is currently impossible using casts of primitive types.

Hence, this reimplements those conversions using arbitrary precision
integers and rationals from the num crate.
2019-06-18 15:06:16 +02:00
adam-rhebo 8dac328ea7 Remove hashbrown and use BTree{Map,Set} from the alloc crate (#187)
* Remove hashbrown and use BTree{Map,Set} from the alloc crate

wasmi-validation must handle untrusted input and hence we switch from
Hash{Set,Map} (whether std's or hashbrown's) to BTree{Set,Map} to avoid
algorithmic complexity attacks while retaining no_std support.

Closes #183

* Improve memory locality of checking for duplicate exports

Using a sorted slice gives us the same O(N log N) worst case execution
time as using a BTreeMap, but using a single allocation as with HashMap,
so that we should see better memory locality and hence better constant
factors when checking for duplicate exports.
2019-06-12 11:30:10 +02:00
adam-rhebo 284c907b29 Recycle value stacks to avoid allocation costs (#184) 2019-06-12 10:51:04 +02:00
Björn Wagner 2520bfc5a8
fix typo 2019-06-07 14:56:19 +08:00
33 changed files with 701 additions and 302 deletions

2
.cargo/config Normal file
View File

@ -0,0 +1,2 @@
[target.armv7-unknown-linux-gnueabihf]
linker = "arm-linux-gnueabihf-gcc"

1
.gitignore vendored
View File

@ -3,3 +3,4 @@
**/*.rs.bk
Cargo.lock
spec/target
.idea

View File

@ -1,25 +1,24 @@
dist: trusty
sudo: required
dist: xenial
language:
- rust
- cpp
rust:
- nightly
- stable
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- gcc-8
- g++-8
- cmake
env:
- CC=/usr/bin/gcc-8 CXX=/usr/bin/g++-8
matrix:
fast_finish: true
include:
- rust: nightly
- rust: stable
- rust: stable
env: TARGET=armv7-unknown-linux-gnueabihf
install:
- if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then rustup target add wasm32-unknown-unknown; fi
- if [ -n "$TARGET" ]; then rustup target add "$TARGET" && sudo apt-get install --yes qemu-user-static; fi
- if [ "$TARGET" == "armv7-unknown-linux-gnueabihf" ]; then sudo apt-get install --yes crossbuild-essential-armhf && export QEMU_LD_PREFIX=/usr/arm-linux-gnueabihf; fi
- rustup component add rustfmt
- sudo apt-get install --yes cmake
script:
- cargo fmt --all -- --check
# Make sure nightly targets are not broken.
@ -27,8 +26,11 @@ script:
- if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then cargo check --benches --manifest-path=benches/Cargo.toml; fi
# Make sure `no_std` version checks.
- if [ "$TRAVIS_RUST_VERSION" == "nightly" ]; then cargo +nightly check --no-default-features --features core; fi
- ./test.sh
# Check that `vec_memory` feature works.
- cargo check --features vec_memory
- travis_wait 60 ./test.sh
- ./doc.sh
after_success: |
# Build documentation and deploy it to github pages.
[ $TRAVIS_BRANCH = master ] &&
@ -37,7 +39,18 @@ after_success: |
sudo pip install ghp-import &&
ghp-import -n target/doc &&
git push -fq https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages
cache: cargo
cache:
# Don't use `cache: cargo` since it adds the `target` directory and that can be huge.
# Saving and loading this directory dwarfes actual compilation and test times. But what is more
# important, is that travis timeouts the build since the job doesn't produce any output for more
# than 10 minutes.
#
# So we just cache ~/.cargo directory
directories:
- /home/travis/.cargo
before_cache:
# Travis can't cache files that are not readable by "others"
- chmod -R a+r $HOME/.cargo
# According to the Travis CI docs for building Rust project this is done by,
- rm -rf /home/travis/.cargo/registry

View File

@ -1,6 +1,6 @@
[package]
name = "wasmi"
version = "0.4.5"
version = "0.5.1"
authors = ["Nikolay Volf <nikvolf@gmail.com>", "Svyatoslav Nikolsky <svyatonik@yandex.ru>", "Sergey Pepyakin <s.pepyakin@gmail.com>"]
license = "MIT/Apache-2.0"
readme = "README.md"
@ -11,16 +11,17 @@ keywords = ["wasm", "webassembly", "bytecode", "interpreter"]
exclude = [ "/res/*", "/tests/*", "/fuzz/*", "/benches/*" ]
[dependencies]
wasmi-validation = { version = "0.1", path = "validation", default-features = false }
parity-wasm = { version = "0.31", default-features = false }
hashbrown = { version = "0.1.8", optional = true }
wasmi-validation = { version = "0.2", path = "validation", default-features = false }
parity-wasm = { version = "0.40.1", default-features = false }
memory_units = "0.3.0"
libm = { version = "0.1.2", optional = true }
num-rational = { version = "0.2.2", default-features = false }
num-traits = { version = "0.2.8", default-features = false }
[dev-dependencies]
assert_matches = "1.1"
rand = "0.4.2"
wabt = "0.6"
wabt = "0.9"
[features]
default = ["std"]
@ -28,14 +29,23 @@ default = ["std"]
std = [
"parity-wasm/std",
"wasmi-validation/std",
"num-rational/std",
"num-rational/bigint-std",
"num-traits/std"
]
# Enable for no_std support
# hashbrown only works on no_std
core = [
# `core` doesn't support vec_memory
"vec_memory",
"wasmi-validation/core",
"hashbrown/nightly",
"libm"
]
# Enforce using the linear memory implementation based on `Vec` instead of
# mmap on unix systems.
#
# Useful for tests and if you need to minimize unsafe usage at the cost of performance on some
# workloads.
vec_memory = []
[workspace]
members = ["validation"]

View File

@ -5,7 +5,7 @@
`wasmi` - a Wasm interpreter.
`wasmi` was conceived as a component of [parity-ethereum](https://github.com/paritytech/parity-ethereum) (ethereum-like contracts in wasm) and [substrate](https://github.com/paritytech/substrate). These projects are related to blockchain and require a high degree of correctness, even if that might be over conservative. This specifically means that we are not trying to be involved in any implementation of any of work-in-progress Wasm proposals. We are also trying to be as close as possible to the spec, which means we are trying to avoid features that is not directly supported by the spec. This means that it is flexible on the one hand and on the other hand there shouldn't be a problem migrating to another spec compilant execution engine.
`wasmi` was conceived as a component of [parity-ethereum](https://github.com/paritytech/parity-ethereum) (ethereum-like contracts in wasm) and [substrate](https://github.com/paritytech/substrate). These projects are related to blockchain and require a high degree of correctness, even if that might be over conservative. This specifically means that we are not trying to be involved in any implementation of any of work-in-progress Wasm proposals. We are also trying to be as close as possible to the spec, which means we are trying to avoid features that is not directly supported by the spec. This means that it is flexible on the one hand and on the other hand there shouldn't be a problem migrating to another spec compliant execution engine.
With all that said, `wasmi` should be a good option for initial prototyping.
@ -26,8 +26,8 @@ This crate supports `no_std` environments.
Enable the `core` feature and disable default features:
```toml
[dependencies]
parity-wasm = {
version = "0.31",
wasmi = {
version = "*",
default-features = false,
features = "core"
}

View File

@ -6,7 +6,7 @@ authors = ["Sergey Pepyakin <s.pepyakin@gmail.com>"]
[dependencies]
wasmi = { path = ".." }
assert_matches = "1.2"
wabt = "0.6"
wabt = "0.9"
[profile.bench]
debug = true

View File

@ -13,7 +13,7 @@ use wasmi::{ImportsBuilder, Module, ModuleInstance, NopExternals, RuntimeValue};
use test::Bencher;
// Load a module from a file.
fn load_from_file(filename: &str) -> Result<Module, Box<error::Error>> {
fn load_from_file(filename: &str) -> Result<Module, Box<dyn error::Error>> {
use std::io::prelude::*;
let mut file = File::open(filename)?;
let mut buf = Vec::new();

View File

@ -33,7 +33,7 @@ pub extern "C" fn prepare_tiny_keccak() -> *const TinyKeccakTestData {
}
#[no_mangle]
pub extern "C" fn bench_tiny_keccak(test_data: *const TinyKeccakTestData) {
pub extern "C" fn bench_tiny_keccak(test_data: *mut TinyKeccakTestData) {
unsafe {
let mut keccak = Keccak::new_keccak256();
keccak.update((*test_data).data);

View File

@ -10,7 +10,7 @@ cargo-fuzz = true
[dependencies]
wasmi = { path = ".." }
wabt = "0.6.0"
wabt = "0.9"
wasmparser = "0.14.1"
tempdir = "0.3.6"

View File

@ -7,4 +7,4 @@ authors = ["Sergey Pepyakin <s.pepyakin@gmail.com>"]
honggfuzz = "=0.5.9" # Strict equal since hfuzz requires dep and cmd versions to match.
wasmi = { path = ".." }
tempdir = "0.3.6"
wabt = "0.6.0"
wabt = "0.9"

View File

@ -1,12 +1,14 @@
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::rc::{Rc, Weak};
use alloc::{
borrow::Cow,
rc::{Rc, Weak},
vec::Vec,
};
use core::fmt;
use host::Externals;
use isa;
use module::ModuleInstance;
use parity_wasm::elements::Local;
use runner::{check_function_args, Interpreter, InterpreterState};
use runner::{check_function_args, Interpreter, InterpreterState, StackRecycler};
use types::ValueType;
use value::RuntimeValue;
use {Signature, Trap};
@ -140,7 +142,7 @@ impl FuncInstance {
check_function_args(func.signature(), &args)?;
match *func.as_internal() {
FuncInstanceInternal::Internal { .. } => {
let mut interpreter = Interpreter::new(func, args)?;
let mut interpreter = Interpreter::new(func, args, None)?;
interpreter.start_execution(externals)
}
FuncInstanceInternal::Host {
@ -150,6 +152,34 @@ impl FuncInstance {
}
}
/// Invoke this function using recycled stacks.
///
/// # Errors
///
/// Same as [`invoke`].
///
/// [`invoke`]: #method.invoke
pub fn invoke_with_stack<E: Externals>(
func: &FuncRef,
args: &[RuntimeValue],
externals: &mut E,
stack_recycler: &mut StackRecycler,
) -> Result<Option<RuntimeValue>, Trap> {
check_function_args(func.signature(), &args)?;
match *func.as_internal() {
FuncInstanceInternal::Internal { .. } => {
let mut interpreter = Interpreter::new(func, args, Some(stack_recycler))?;
let return_value = interpreter.start_execution(externals);
stack_recycler.recycle(interpreter);
return_value
}
FuncInstanceInternal::Host {
ref host_func_index,
..
} => externals.invoke_index(*host_func_index, args.into()),
}
}
/// Invoke the function, get a resumable handle. This handle can then be used to [`start_execution`]. If a
/// Host trap happens, caller can use [`resume_execution`] to feed the expected return value back in, and then
/// continue the execution.
@ -166,12 +196,13 @@ impl FuncInstance {
/// [`resume_execution`]: struct.FuncInvocation.html#method.resume_execution
pub fn invoke_resumable<'args>(
func: &FuncRef,
args: &'args [RuntimeValue],
args: impl Into<Cow<'args, [RuntimeValue]>>,
) -> Result<FuncInvocation<'args>, Trap> {
let args = args.into();
check_function_args(func.signature(), &args)?;
match *func.as_internal() {
FuncInstanceInternal::Internal { .. } => {
let interpreter = Interpreter::new(func, args)?;
let interpreter = Interpreter::new(func, &*args, None)?;
Ok(FuncInvocation {
kind: FuncInvocationKind::Internal(interpreter),
})
@ -228,7 +259,7 @@ pub struct FuncInvocation<'args> {
enum FuncInvocationKind<'args> {
Internal(Interpreter),
Host {
args: &'args [RuntimeValue],
args: Cow<'args, [RuntimeValue]>,
host_func_index: usize,
finished: bool,
},
@ -275,7 +306,7 @@ impl<'args> FuncInvocation<'args> {
return Err(ResumableError::AlreadyStarted);
}
*finished = true;
Ok(externals.invoke_index(*host_func_index, args.clone().into())?)
Ok(externals.invoke_index(*host_func_index, args.as_ref().into())?)
}
}
}

View File

@ -114,11 +114,11 @@ pub trait HostError: 'static + ::core::fmt::Display + ::core::fmt::Debug + Send
}
}
impl HostError {
impl dyn HostError {
/// Attempt to downcast this `HostError` to a concrete type by reference.
pub fn downcast_ref<T: HostError>(&self) -> Option<&T> {
if self.__private_get_type_id__() == TypeId::of::<T>() {
unsafe { Some(&*(self as *const HostError as *const T)) }
unsafe { Some(&*(self as *const dyn HostError as *const T)) }
} else {
None
}
@ -128,7 +128,7 @@ impl HostError {
/// reference.
pub fn downcast_mut<T: HostError>(&mut self) -> Option<&mut T> {
if self.__private_get_type_id__() == TypeId::of::<T>() {
unsafe { Some(&mut *(self as *mut HostError as *mut T)) }
unsafe { Some(&mut *(self as *mut dyn HostError as *mut T)) }
} else {
None
}
@ -257,5 +257,5 @@ mod tests {
}
// Tests that `HostError` trait is object safe.
fn _host_error_is_object_safe(_: &HostError) {}
fn _host_error_is_object_safe(_: &dyn HostError) {}
}

View File

@ -1,10 +1,4 @@
#[allow(unused_imports)]
use alloc::prelude::v1::*;
#[cfg(not(feature = "std"))]
use hashbrown::HashMap;
#[cfg(feature = "std")]
use std::collections::HashMap;
use alloc::{collections::BTreeMap, string::String};
use func::FuncRef;
use global::GlobalRef;
@ -106,7 +100,7 @@ pub trait ImportResolver {
/// [`ImportResolver`]: trait.ImportResolver.html
/// [`ModuleImportResolver`]: trait.ModuleImportResolver.html
pub struct ImportsBuilder<'a> {
modules: HashMap<String, &'a ModuleImportResolver>,
modules: BTreeMap<String, &'a dyn ModuleImportResolver>,
}
impl<'a> Default for ImportsBuilder<'a> {
@ -119,7 +113,7 @@ impl<'a> ImportsBuilder<'a> {
/// Create an empty `ImportsBuilder`.
pub fn new() -> ImportsBuilder<'a> {
ImportsBuilder {
modules: HashMap::new(),
modules: BTreeMap::new(),
}
}
@ -127,7 +121,7 @@ impl<'a> ImportsBuilder<'a> {
pub fn with_resolver<N: Into<String>>(
mut self,
name: N,
resolver: &'a ModuleImportResolver,
resolver: &'a dyn ModuleImportResolver,
) -> Self {
self.modules.insert(name.into(), resolver);
self
@ -136,11 +130,15 @@ impl<'a> ImportsBuilder<'a> {
/// Register an resolver by a name.
///
/// Mutable borrowed version.
pub fn push_resolver<N: Into<String>>(&mut self, name: N, resolver: &'a ModuleImportResolver) {
pub fn push_resolver<N: Into<String>>(
&mut self,
name: N,
resolver: &'a dyn ModuleImportResolver,
) {
self.modules.insert(name.into(), resolver);
}
fn resolver(&self, name: &str) -> Option<&ModuleImportResolver> {
fn resolver(&self, name: &str) -> Option<&dyn ModuleImportResolver> {
self.modules.get(name).cloned()
}
}

View File

@ -67,8 +67,7 @@
//! - Reserved immediates are ignored for `call_indirect`, `current_memory`, `grow_memory`.
//!
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::vec::Vec;
/// Should we keep a value before "discarding" a stack frame?
///

View File

@ -96,8 +96,6 @@
#![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
//// alloc is required in no_std
#![cfg_attr(not(feature = "std"), feature(alloc, alloc_prelude))]
#[cfg(not(feature = "std"))]
#[macro_use]
@ -114,15 +112,16 @@ extern crate assert_matches;
#[cfg(test)]
extern crate wabt;
#[cfg(not(feature = "std"))]
extern crate hashbrown;
extern crate memory_units as memory_units_crate;
extern crate parity_wasm;
extern crate wasmi_validation as validation;
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::{
boxed::Box,
string::{String, ToString},
vec::Vec,
};
use core::fmt;
#[cfg(feature = "std")]
use std::error;
@ -130,6 +129,9 @@ use std::error;
#[cfg(not(feature = "std"))]
extern crate libm;
extern crate num_rational;
extern crate num_traits;
/// Error type which can be thrown by wasm code or by host environment.
///
/// Under some conditions, wasm execution may produce a `Trap`, which immediately aborts execution.
@ -238,7 +240,7 @@ pub enum TrapKind {
/// Typically returned from an implementation of [`Externals`].
///
/// [`Externals`]: trait.Externals.html
Host(Box<host::HostError>),
Host(Box<dyn host::HostError>),
}
impl TrapKind {
@ -272,7 +274,7 @@ pub enum Error {
/// Trap.
Trap(Trap),
/// Custom embedder error.
Host(Box<host::HostError>),
Host(Box<dyn host::HostError>),
}
impl Error {
@ -284,7 +286,7 @@ impl Error {
/// [`Host`]: enum.Error.html#variant.Host
/// [`Trap`]: enum.Error.html#variant.Trap
/// [`TrapKind::Host`]: enum.TrapKind.html#variant.Host
pub fn as_host_error(&self) -> Option<&host::HostError> {
pub fn as_host_error(&self) -> Option<&dyn host::HostError> {
match *self {
Error::Host(ref host_err) => Some(&**host_err),
Error::Trap(ref trap) => match *trap.kind() {
@ -404,6 +406,7 @@ pub use self::host::{Externals, HostError, NopExternals, RuntimeArgs};
pub use self::imports::{ImportResolver, ImportsBuilder, ModuleImportResolver};
pub use self::memory::{MemoryInstance, MemoryRef, LINEAR_MEMORY_PAGE_SIZE};
pub use self::module::{ExternVal, ModuleInstance, ModuleRef, NotStartedModuleRef};
pub use self::runner::{StackRecycler, DEFAULT_CALL_STACK_LIMIT, DEFAULT_VALUE_STACK_LIMIT};
pub use self::table::{TableInstance, TableRef};
pub use self::types::{GlobalDescriptor, MemoryDescriptor, Signature, TableDescriptor, ValueType};
pub use self::value::{Error as ValueError, FromRuntimeValue, LittleEndianConvert, RuntimeValue};

189
src/memory/mmap_bytebuf.rs Normal file
View File

@ -0,0 +1,189 @@
//! An implementation of a `ByteBuf` based on virtual memory.
//!
//! This implementation uses `mmap` on POSIX systems (and should use `VirtualAlloc` on windows).
//! There are possibilities to improve the performance for the reallocating case by reserving
//! memory up to maximum. This might be a problem for systems that don't have a lot of virtual
//! memory (i.e. 32-bit platforms).
use std::ptr::{self, NonNull};
use std::slice;
struct Mmap {
/// The pointer that points to the start of the mapping.
///
/// This value doesn't change after creation.
ptr: NonNull<u8>,
/// The length of this mapping.
///
/// Cannot be more than `isize::max_value()`. This value doesn't change after creation.
len: usize,
}
impl Mmap {
/// Create a new mmap mapping
///
/// Returns `Err` if:
/// - `len` should not exceed `isize::max_value()`
/// - `len` should be greater than 0.
/// - `mmap` returns an error (almost certainly means out of memory).
fn new(len: usize) -> Result<Self, &'static str> {
if len > isize::max_value() as usize {
return Err("`len` should not exceed `isize::max_value()`");
}
if len == 0 {
return Err("`len` should be greater than 0");
}
let ptr_or_err = unsafe {
// Safety Proof:
// There are not specific safety proofs are required for this call, since the call
// by itself can't invoke any safety problems (however, misusing its result can).
libc::mmap(
// `addr` - let the system to choose the address at which to create the mapping.
ptr::null_mut(),
// the length of the mapping in bytes.
len,
// `prot` - protection flags: READ WRITE !EXECUTE
libc::PROT_READ | libc::PROT_WRITE,
// `flags`
// `MAP_ANON` - mapping is not backed by any file and initial contents are
// initialized to zero.
// `MAP_PRIVATE` - the mapping is private to this process.
libc::MAP_ANON | libc::MAP_PRIVATE,
// `fildes` - a file descriptor. Pass -1 as this is required for some platforms
// when the `MAP_ANON` is passed.
-1,
// `offset` - offset from the file.
0,
)
};
match ptr_or_err {
// With the current parameters, the error can only be returned in case of insufficient
// memory.
libc::MAP_FAILED => Err("mmap returned an error"),
_ => {
let ptr = NonNull::new(ptr_or_err as *mut u8).ok_or("mmap returned 0")?;
Ok(Self { ptr, len })
}
}
}
fn as_slice(&self) -> &[u8] {
unsafe {
// Safety Proof:
// - Aliasing guarantees of `self.ptr` are not violated since `self` is the only owner.
// - This pointer was allocated for `self.len` bytes and thus is a valid slice.
// - `self.len` doesn't change throughout the lifetime of `self`.
// - The value is returned valid for the duration of lifetime of `self`.
// `self` cannot be destroyed while the returned slice is alive.
// - `self.ptr` is of `NonNull` type and thus `.as_ptr()` can never return NULL.
// - `self.len` cannot be larger than `isize::max_value()`.
slice::from_raw_parts(self.ptr.as_ptr(), self.len)
}
}
fn as_slice_mut(&mut self) -> &mut [u8] {
unsafe {
// Safety Proof:
// - See the proof for `Self::as_slice`
// - Additionally, it is not possible to obtain two mutable references for `self.ptr`
slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len)
}
}
}
impl Drop for Mmap {
fn drop(&mut self) {
let ret_val = unsafe {
// Safety proof:
// - `self.ptr` was allocated by a call to `mmap`.
// - `self.len` was saved at the same time and it doesn't change throughout the lifetime
// of `self`.
libc::munmap(self.ptr.as_ptr() as *mut libc::c_void, self.len)
};
// There is no reason for `munmap` to fail to deallocate a private annonymous mapping
// allocated by `mmap`.
// However, for the cases when it actually fails prefer to fail, in order to not leak
// and exhaust the virtual memory.
assert_eq!(ret_val, 0, "munmap failed");
}
}
pub struct ByteBuf {
mmap: Option<Mmap>,
}
impl ByteBuf {
pub fn new(len: usize) -> Result<Self, &'static str> {
let mmap = if len == 0 {
None
} else {
Some(Mmap::new(len)?)
};
Ok(Self { mmap })
}
pub fn realloc(&mut self, new_len: usize) -> Result<(), &'static str> {
let new_mmap = if new_len == 0 {
None
} else {
let mut new_mmap = Mmap::new(new_len)?;
if let Some(cur_mmap) = self.mmap.take() {
let src = cur_mmap.as_slice();
let dst = new_mmap.as_slice_mut();
let amount = src.len().min(dst.len());
dst[..amount].copy_from_slice(&src[..amount]);
}
Some(new_mmap)
};
self.mmap = new_mmap;
Ok(())
}
pub fn len(&self) -> usize {
self.mmap.as_ref().map(|m| m.len).unwrap_or(0)
}
pub fn as_slice(&self) -> &[u8] {
self.mmap.as_ref().map(|m| m.as_slice()).unwrap_or(&[])
}
pub fn as_slice_mut(&mut self) -> &mut [u8] {
self.mmap
.as_mut()
.map(|m| m.as_slice_mut())
.unwrap_or(&mut [])
}
pub fn erase(&mut self) -> Result<(), &'static str> {
let len = self.len();
if len > 0 {
// The order is important.
//
// 1. First we clear, and thus drop, the current mmap if any.
// 2. And then we create a new one.
//
// Otherwise we double the peak memory consumption.
self.mmap = None;
self.mmap = Some(Mmap::new(len)?);
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::ByteBuf;
const PAGE_SIZE: usize = 4096;
// This is not required since wasm memories can only grow but nice to have.
#[test]
fn byte_buf_shrink() {
let mut byte_buf = ByteBuf::new(PAGE_SIZE * 3).unwrap();
byte_buf.realloc(PAGE_SIZE * 2).unwrap();
}
}

View File

@ -1,6 +1,4 @@
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::rc::Rc;
use alloc::{rc::Rc, string::ToString, vec::Vec};
use core::{
cell::{Cell, RefCell},
cmp, fmt,
@ -12,6 +10,16 @@ use parity_wasm::elements::ResizableLimits;
use value::LittleEndianConvert;
use Error;
#[cfg(all(unix, not(feature = "vec_memory")))]
#[path = "mmap_bytebuf.rs"]
mod bytebuf;
#[cfg(any(not(unix), feature = "vec_memory"))]
#[path = "vec_bytebuf.rs"]
mod bytebuf;
use self::bytebuf::ByteBuf;
/// Size of a page of [linear memory][`MemoryInstance`] - 64KiB.
///
/// The size of a memory is always a integer multiple of a page size.
@ -52,11 +60,10 @@ pub struct MemoryInstance {
/// Memory limits.
limits: ResizableLimits,
/// Linear memory buffer with lazy allocation.
buffer: RefCell<Vec<u8>>,
buffer: RefCell<ByteBuf>,
initial: Pages,
current_size: Cell<usize>,
maximum: Option<Pages>,
lowest_used: Cell<u32>,
}
impl fmt::Debug for MemoryInstance {
@ -126,23 +133,24 @@ impl MemoryInstance {
validation::validate_memory(initial_u32, maximum_u32).map_err(Error::Memory)?;
}
let memory = MemoryInstance::new(initial, maximum);
let memory = MemoryInstance::new(initial, maximum)?;
Ok(MemoryRef(Rc::new(memory)))
}
/// Create new linear memory instance.
fn new(initial: Pages, maximum: Option<Pages>) -> Self {
fn new(initial: Pages, maximum: Option<Pages>) -> Result<Self, Error> {
let limits = ResizableLimits::new(initial.0 as u32, maximum.map(|p| p.0 as u32));
let initial_size: Bytes = initial.into();
MemoryInstance {
Ok(MemoryInstance {
limits: limits,
buffer: RefCell::new(Vec::with_capacity(4096)),
buffer: RefCell::new(
ByteBuf::new(initial_size.0).map_err(|err| Error::Memory(err.to_string()))?,
),
initial: initial,
current_size: Cell::new(initial_size.0),
maximum: maximum,
lowest_used: Cell::new(u32::max_value()),
}
})
}
/// Return linear memory limits.
@ -163,16 +171,6 @@ impl MemoryInstance {
self.maximum
}
/// Returns lowest offset ever written or `u32::max_value()` if none.
pub fn lowest_used(&self) -> u32 {
self.lowest_used.get()
}
/// Resets tracked lowest offset.
pub fn reset_lowest_used(&self, addr: u32) {
self.lowest_used.set(addr)
}
/// Returns current linear memory size.
///
/// Maximum memory size cannot exceed `65536` pages or 4GiB.
@ -193,13 +191,7 @@ impl MemoryInstance {
/// );
/// ```
pub fn current_size(&self) -> Pages {
Bytes(self.current_size.get()).round_up_to()
}
/// Returns current used memory size in bytes.
/// This is one more than the highest memory address that had been written to.
pub fn used_size(&self) -> Bytes {
Bytes(self.buffer.borrow().len())
Bytes(self.buffer.borrow().len()).round_up_to()
}
/// Get value from memory at given offset.
@ -207,7 +199,10 @@ impl MemoryInstance {
let mut buffer = self.buffer.borrow_mut();
let region =
self.checked_region(&mut buffer, offset as usize, ::core::mem::size_of::<T>())?;
Ok(T::from_little_endian(&buffer[region.range()]).expect("Slice size is checked"))
Ok(
T::from_little_endian(&buffer.as_slice_mut()[region.range()])
.expect("Slice size is checked"),
)
}
/// Copy data from memory at given offset.
@ -220,7 +215,7 @@ impl MemoryInstance {
let mut buffer = self.buffer.borrow_mut();
let region = self.checked_region(&mut buffer, offset as usize, size)?;
Ok(buffer[region.range()].to_vec())
Ok(buffer.as_slice_mut()[region.range()].to_vec())
}
/// Copy data from given offset in the memory into `target` slice.
@ -232,7 +227,7 @@ impl MemoryInstance {
let mut buffer = self.buffer.borrow_mut();
let region = self.checked_region(&mut buffer, offset as usize, target.len())?;
target.copy_from_slice(&buffer[region.range()]);
target.copy_from_slice(&buffer.as_slice_mut()[region.range()]);
Ok(())
}
@ -244,10 +239,7 @@ impl MemoryInstance {
.checked_region(&mut buffer, offset as usize, value.len())?
.range();
if offset < self.lowest_used.get() {
self.lowest_used.set(offset);
}
buffer[range].copy_from_slice(value);
buffer.as_slice_mut()[range].copy_from_slice(value);
Ok(())
}
@ -258,10 +250,7 @@ impl MemoryInstance {
let range = self
.checked_region(&mut buffer, offset as usize, ::core::mem::size_of::<T>())?
.range();
if offset < self.lowest_used.get() {
self.lowest_used.set(offset);
}
value.into_little_endian(&mut buffer[range]);
value.into_little_endian(&mut buffer.as_slice_mut()[range]);
Ok(())
}
@ -295,19 +284,22 @@ impl MemoryInstance {
}
let new_buffer_length: Bytes = new_size.into();
self.buffer
.borrow_mut()
.realloc(new_buffer_length.0)
.map_err(|err| Error::Memory(err.to_string()))?;
self.current_size.set(new_buffer_length.0);
Ok(size_before_grow)
}
fn checked_region<B>(
fn checked_region(
&self,
buffer: &mut B,
buffer: &mut ByteBuf,
offset: usize,
size: usize,
) -> Result<CheckedRegion, Error>
where
B: ::core::ops::DerefMut<Target = Vec<u8>>,
{
) -> Result<CheckedRegion, Error> {
let end = offset.checked_add(size).ok_or_else(|| {
Error::Memory(format!(
"trying to access memory block of size {} from offset {}",
@ -315,10 +307,6 @@ impl MemoryInstance {
))
})?;
if end <= self.current_size.get() && buffer.len() < end {
buffer.resize(end, 0);
}
if end > buffer.len() {
return Err(Error::Memory(format!(
"trying to access region [{}..{}] in memory [0..{}]",
@ -334,17 +322,14 @@ impl MemoryInstance {
})
}
fn checked_region_pair<B>(
fn checked_region_pair(
&self,
buffer: &mut B,
buffer: &mut ByteBuf,
offset1: usize,
size1: usize,
offset2: usize,
size2: usize,
) -> Result<(CheckedRegion, CheckedRegion), Error>
where
B: ::core::ops::DerefMut<Target = Vec<u8>>,
{
) -> Result<(CheckedRegion, CheckedRegion), Error> {
let end1 = offset1.checked_add(size1).ok_or_else(|| {
Error::Memory(format!(
"trying to access memory block of size {} from offset {}",
@ -359,11 +344,6 @@ impl MemoryInstance {
))
})?;
let max = cmp::max(end1, end2);
if max <= self.current_size.get() && buffer.len() < max {
buffer.resize(max, 0);
}
if end1 > buffer.len() {
return Err(Error::Memory(format!(
"trying to access region [{}..{}] in memory [0..{}]",
@ -407,14 +387,10 @@ impl MemoryInstance {
let (read_region, write_region) =
self.checked_region_pair(&mut buffer, src_offset, len, dst_offset, len)?;
if dst_offset < self.lowest_used.get() as usize {
self.lowest_used.set(dst_offset as u32);
}
unsafe {
::core::ptr::copy(
buffer[read_region.range()].as_ptr(),
buffer[write_region.range()].as_mut_ptr(),
buffer.as_slice()[read_region.range()].as_ptr(),
buffer.as_slice_mut()[write_region.range()].as_mut_ptr(),
len,
)
}
@ -450,14 +426,10 @@ impl MemoryInstance {
)));
}
if dst_offset < self.lowest_used.get() as usize {
self.lowest_used.set(dst_offset as u32);
}
unsafe {
::core::ptr::copy_nonoverlapping(
buffer[read_region.range()].as_ptr(),
buffer[write_region.range()].as_mut_ptr(),
buffer.as_slice()[read_region.range()].as_ptr(),
buffer.as_slice_mut()[write_region.range()].as_mut_ptr(),
len,
)
}
@ -493,11 +465,7 @@ impl MemoryInstance {
.checked_region(&mut dst_buffer, dst_offset, len)?
.range();
if dst_offset < dst.lowest_used.get() as usize {
dst.lowest_used.set(dst_offset as u32);
}
dst_buffer[dst_range].copy_from_slice(&src_buffer[src_range]);
dst_buffer.as_slice_mut()[dst_range].copy_from_slice(&src_buffer.as_slice()[src_range]);
Ok(())
}
@ -514,11 +482,7 @@ impl MemoryInstance {
let range = self.checked_region(&mut buffer, offset, len)?.range();
if offset < self.lowest_used.get() as usize {
self.lowest_used.set(offset as u32);
}
for val in &mut buffer[range] {
for val in &mut buffer.as_slice_mut()[range] {
*val = new_val
}
Ok(())
@ -533,18 +497,28 @@ impl MemoryInstance {
self.clear(offset, 0, len)
}
/// Set every byte in the entire linear memory to 0, preserving its size.
///
/// Might be useful for some optimization shenanigans.
pub fn erase(&self) -> Result<(), Error> {
self.buffer
.borrow_mut()
.erase()
.map_err(|err| Error::Memory(err.to_string()))
}
/// Provides direct access to the underlying memory buffer.
///
/// # Panics
///
/// Any call that requires write access to memory (such as [`set`], [`clear`], etc) made within
/// the closure will panic. Note that the buffer size may be arbitraty. Proceed with caution.
/// the closure will panic.
///
/// [`set`]: #method.get
/// [`clear`]: #method.set
pub fn with_direct_access<R, F: FnOnce(&[u8]) -> R>(&self, f: F) -> R {
let buf = self.buffer.borrow();
f(&*buf)
f(buf.as_slice())
}
/// Provides direct mutable access to the underlying memory buffer.
@ -552,15 +526,13 @@ impl MemoryInstance {
/// # Panics
///
/// Any calls that requires either read or write access to memory (such as [`get`], [`set`], [`copy`], etc) made
/// within the closure will panic. Note that the buffer size may be arbitraty.
/// The closure may however resize it. Proceed with caution.
/// within the closure will panic. Proceed with caution.
///
/// [`get`]: #method.get
/// [`set`]: #method.set
/// [`copy`]: #method.copy
pub fn with_direct_access_mut<R, F: FnOnce(&mut Vec<u8>) -> R>(&self, f: F) -> R {
pub fn with_direct_access_mut<R, F: FnOnce(&mut [u8]) -> R>(&self, f: F) -> R {
let mut buf = self.buffer.borrow_mut();
f(&mut buf)
f(buf.as_slice_mut())
}
}
@ -574,29 +546,21 @@ mod tests {
#[test]
fn alloc() {
#[cfg(target_pointer_width = "64")]
let fixtures = &[
let mut fixtures = vec![
(0, None, true),
(0, Some(0), true),
(1, None, true),
(1, Some(1), true),
(0, Some(1), true),
(1, Some(0), false),
(0, Some(65536), true),
];
#[cfg(target_pointer_width = "64")]
fixtures.extend(&[
(65536, Some(65536), true),
(65536, Some(0), false),
(65536, None, true),
];
#[cfg(target_pointer_width = "32")]
let fixtures = &[
(0, None, true),
(0, Some(0), true),
(1, None, true),
(1, Some(1), true),
(0, Some(1), true),
(1, Some(0), false),
];
]);
for (index, &(initial, maybe_max, expected_ok)) in fixtures.iter().enumerate() {
let initial: Pages = Pages(initial);
@ -618,7 +582,7 @@ mod tests {
}
fn create_memory(initial_content: &[u8]) -> MemoryInstance {
let mem = MemoryInstance::new(Pages(1), Some(Pages(1)));
let mem = MemoryInstance::new(Pages(1), Some(Pages(1))).unwrap();
mem.set(0, initial_content)
.expect("Successful initialize the memory");
mem
@ -731,7 +695,7 @@ mod tests {
#[test]
fn get_into() {
let mem = MemoryInstance::new(Pages(1), None);
let mem = MemoryInstance::new(Pages(1), None).unwrap();
mem.set(6, &[13, 17, 129])
.expect("memory set should not fail");
@ -747,11 +711,19 @@ mod tests {
let mem = MemoryInstance::alloc(Pages(1), None).unwrap();
mem.set(100, &[0]).expect("memory set should not fail");
mem.with_direct_access_mut(|buf| {
assert_eq!(buf.len(), 101);
assert_eq!(
buf.len(),
65536,
"the buffer length is expected to be 1 page long"
);
buf[..10].copy_from_slice(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
});
mem.with_direct_access(|buf| {
assert_eq!(buf.len(), 101);
assert_eq!(
buf.len(),
65536,
"the buffer length is expected to be 1 page long"
);
assert_eq!(&buf[..10], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
});
}

39
src/memory/vec_bytebuf.rs Normal file
View File

@ -0,0 +1,39 @@
//! An implementation of `ByteBuf` based on a plain `Vec`.
use alloc::vec::Vec;
pub struct ByteBuf {
buf: Vec<u8>,
}
impl ByteBuf {
pub fn new(len: usize) -> Result<Self, &'static str> {
let mut buf = Vec::new();
buf.resize(len, 0u8);
Ok(Self { buf })
}
pub fn realloc(&mut self, new_len: usize) -> Result<(), &'static str> {
self.buf.resize(new_len, 0u8);
Ok(())
}
pub fn len(&self) -> usize {
self.buf.len()
}
pub fn as_slice(&self) -> &[u8] {
self.buf.as_ref()
}
pub fn as_slice_mut(&mut self) -> &mut [u8] {
self.buf.as_mut()
}
pub fn erase(&mut self) -> Result<(), &'static str> {
for v in &mut self.buf {
*v = 0;
}
Ok(())
}
}

View File

@ -1,14 +1,14 @@
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::rc::Rc;
use alloc::{
borrow::ToOwned,
rc::Rc,
string::{String, ToString},
vec::Vec,
};
use core::cell::RefCell;
use core::fmt;
use Trap;
#[cfg(not(feature = "std"))]
use hashbrown::HashMap;
#[cfg(feature = "std")]
use std::collections::HashMap;
use alloc::collections::BTreeMap;
use core::cell::Ref;
use func::{FuncBody, FuncInstance, FuncRef};
@ -18,6 +18,7 @@ use imports::ImportResolver;
use memory::MemoryRef;
use memory_units::Pages;
use parity_wasm::elements::{External, InitExpr, Instruction, Internal, ResizableLimits, Type};
use runner::StackRecycler;
use table::TableRef;
use types::{GlobalDescriptor, MemoryDescriptor, TableDescriptor};
use validation::{DEFAULT_MEMORY_INDEX, DEFAULT_TABLE_INDEX};
@ -161,7 +162,7 @@ pub struct ModuleInstance {
funcs: RefCell<Vec<FuncRef>>,
memories: RefCell<Vec<MemoryRef>>,
globals: RefCell<Vec<GlobalRef>>,
exports: RefCell<HashMap<String, ExternVal>>,
exports: RefCell<BTreeMap<String, ExternVal>>,
}
impl ModuleInstance {
@ -172,7 +173,7 @@ impl ModuleInstance {
tables: RefCell::new(Vec::new()),
memories: RefCell::new(Vec::new()),
globals: RefCell::new(Vec::new()),
exports: RefCell::new(HashMap::new()),
exports: RefCell::new(BTreeMap::new()),
}
}
@ -420,7 +421,11 @@ impl ModuleInstance {
.map(|es| es.entries())
.unwrap_or(&[])
{
let offset_val = match eval_init_expr(element_segment.offset(), &module_ref) {
let offset = element_segment
.offset()
.as_ref()
.expect("passive segments are rejected due to validation");
let offset_val = match eval_init_expr(offset, &module_ref) {
RuntimeValue::I32(v) => v as u32,
_ => panic!("Due to validation elem segment offset should evaluate to i32"),
};
@ -449,7 +454,11 @@ impl ModuleInstance {
}
for data_segment in module.data_section().map(|ds| ds.entries()).unwrap_or(&[]) {
let offset_val = match eval_init_expr(data_segment.offset(), &module_ref) {
let offset = data_segment
.offset()
.as_ref()
.expect("passive segments are rejected due to validation");
let offset_val = match eval_init_expr(offset, &module_ref) {
RuntimeValue::I32(v) => v as u32,
_ => panic!("Due to validation data segment offset should evaluate to i32"),
};
@ -625,21 +634,43 @@ impl ModuleInstance {
args: &[RuntimeValue],
externals: &mut E,
) -> Result<Option<RuntimeValue>, Error> {
let func_instance = self.func_by_name(func_name)?;
FuncInstance::invoke(&func_instance, args, externals).map_err(|t| Error::Trap(t))
}
/// Invoke exported function by a name using recycled stacks.
///
/// # Errors
///
/// Same as [`invoke_export`].
///
/// [`invoke_export`]: #method.invoke_export
pub fn invoke_export_with_stack<E: Externals>(
&self,
func_name: &str,
args: &[RuntimeValue],
externals: &mut E,
stack_recycler: &mut StackRecycler,
) -> Result<Option<RuntimeValue>, Error> {
let func_instance = self.func_by_name(func_name)?;
FuncInstance::invoke_with_stack(&func_instance, args, externals, stack_recycler)
.map_err(|t| Error::Trap(t))
}
fn func_by_name(&self, func_name: &str) -> Result<FuncRef, Error> {
let extern_val = self
.export_by_name(func_name)
.ok_or_else(|| Error::Function(format!("Module doesn't have export {}", func_name)))?;
let func_instance = match extern_val {
ExternVal::Func(func_instance) => func_instance,
unexpected => {
return Err(Error::Function(format!(
"Export {} is not a function, but {:?}",
func_name, unexpected
)));
}
};
FuncInstance::invoke(&func_instance, args, externals).map_err(|t| Error::Trap(t))
match extern_val {
ExternVal::Func(func_instance) => Ok(func_instance),
unexpected => Err(Error::Function(format!(
"Export {} is not a function, but {:?}",
func_name, unexpected
))),
}
}
/// Find export by a name.

View File

@ -1,5 +1,4 @@
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::{string::String, vec::Vec};
use parity_wasm::elements::{BlockType, FuncBody, Instruction};
@ -252,13 +251,14 @@ impl Compiler {
);
self.sink.emit_br_nez(target);
}
BrTable(ref table, default) => {
BrTable(ref br_table_data) => {
// At this point, the condition value is at the top of the stack.
// But at the point of actual jump the condition will already be
// popped off.
let value_stack_height = context.value_stack.len().saturating_sub(1);
let targets = table
let targets = br_table_data
.table
.iter()
.map(|depth| {
require_target(
@ -270,7 +270,7 @@ impl Compiler {
})
.collect::<Result<Vec<_>, _>>();
let default_target = require_target(
default,
br_table_data.default,
value_stack_height,
&context.frame_stack,
&self.label_stack,

View File

@ -1,5 +1,4 @@
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::vec::Vec;
use crate::{
isa,

View File

@ -1,5 +1,4 @@
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::{boxed::Box, vec::Vec};
use core::fmt;
use core::ops;
use core::{u32, usize};
@ -18,10 +17,10 @@ use value::{
};
use {Signature, Trap, TrapKind, ValueType};
/// Maximum number of entries in value stack.
pub const DEFAULT_VALUE_STACK_LIMIT: usize = (1024 * 1024) / ::core::mem::size_of::<RuntimeValue>();
/// Maximum number of bytes on the value stack.
pub const DEFAULT_VALUE_STACK_LIMIT: usize = 1024 * 1024;
// TODO: Make these parameters changeble.
/// Maximum number of levels on the call stack.
pub const DEFAULT_CALL_STACK_LIMIT: usize = 64 * 1024;
/// This is a wrapper around u64 to allow us to treat runtime values as a tag-free `u64`
@ -166,14 +165,18 @@ enum RunResult {
/// Function interpreter.
pub struct Interpreter {
value_stack: ValueStack,
call_stack: Vec<FunctionContext>,
call_stack: CallStack,
return_type: Option<ValueType>,
state: InterpreterState,
}
impl Interpreter {
pub fn new(func: &FuncRef, args: &[RuntimeValue]) -> Result<Interpreter, Trap> {
let mut value_stack = ValueStack::with_limit(DEFAULT_VALUE_STACK_LIMIT);
pub fn new(
func: &FuncRef,
args: &[RuntimeValue],
mut stack_recycler: Option<&mut StackRecycler>,
) -> Result<Interpreter, Trap> {
let mut value_stack = StackRecycler::recreate_value_stack(&mut stack_recycler);
for &arg in args {
let arg = arg.into();
value_stack.push(arg).map_err(
@ -183,7 +186,7 @@ impl Interpreter {
)?;
}
let mut call_stack = Vec::new();
let mut call_stack = StackRecycler::recreate_call_stack(&mut stack_recycler);
let initial_frame = FunctionContext::new(func.clone());
call_stack.push(initial_frame);
@ -278,14 +281,14 @@ impl Interpreter {
match function_return {
RunResult::Return => {
if self.call_stack.last().is_none() {
if self.call_stack.is_empty() {
// This was the last frame in the call stack. This means we
// are done executing.
return Ok(());
}
}
RunResult::NestedCall(nested_func) => {
if self.call_stack.len() + 1 >= DEFAULT_CALL_STACK_LIMIT {
if self.call_stack.is_full() {
return Err(TrapKind::StackOverflow.into());
}
@ -1286,14 +1289,8 @@ impl FunctionContext {
debug_assert!(!self.is_initialized);
let num_locals = locals.iter().map(|l| l.count() as usize).sum();
let locals = vec![Default::default(); num_locals];
// TODO: Replace with extend.
for local in locals {
value_stack
.push(local)
.map_err(|_| TrapKind::StackOverflow)?;
}
value_stack.extend(num_locals)?;
self.is_initialized = true;
Ok(())
@ -1363,16 +1360,6 @@ struct ValueStack {
}
impl ValueStack {
fn with_limit(limit: usize) -> ValueStack {
let mut buf = Vec::new();
buf.resize(limit, RuntimeValueInternal(0));
ValueStack {
buf: buf.into_boxed_slice(),
sp: 0,
}
}
#[inline]
fn drop_keep(&mut self, drop_keep: isa::DropKeep) {
if drop_keep.keep == isa::Keep::Single {
@ -1449,8 +1436,126 @@ impl ValueStack {
Ok(())
}
fn extend(&mut self, len: usize) -> Result<(), TrapKind> {
let cells = self
.buf
.get_mut(self.sp..self.sp + len)
.ok_or_else(|| TrapKind::StackOverflow)?;
for cell in cells {
*cell = Default::default();
}
self.sp += len;
Ok(())
}
#[inline]
fn len(&self) -> usize {
self.sp
}
}
struct CallStack {
buf: Vec<FunctionContext>,
limit: usize,
}
impl CallStack {
fn push(&mut self, ctx: FunctionContext) {
self.buf.push(ctx);
}
fn pop(&mut self) -> Option<FunctionContext> {
self.buf.pop()
}
fn is_empty(&self) -> bool {
self.buf.is_empty()
}
fn is_full(&self) -> bool {
self.buf.len() + 1 >= self.limit
}
}
/// Used to recycle stacks instead of allocating them repeatedly.
pub struct StackRecycler {
value_stack_buf: Option<Box<[RuntimeValueInternal]>>,
value_stack_limit: usize,
call_stack_buf: Option<Vec<FunctionContext>>,
call_stack_limit: usize,
}
impl StackRecycler {
/// Limit stacks created by this recycler to
/// - `value_stack_limit` bytes for values and
/// - `call_stack_limit` levels for calls.
pub fn with_limits(value_stack_limit: usize, call_stack_limit: usize) -> Self {
Self {
value_stack_buf: None,
value_stack_limit,
call_stack_buf: None,
call_stack_limit,
}
}
/// Clears any values left on the stack to avoid
/// leaking them to future export invocations.
///
/// This is a secondary defense to prevent modules from
/// exploiting faulty stack handling in the interpreter.
///
/// Do note that there are additional channels that
/// can leak information into an untrusted module.
pub fn clear(&mut self) {
if let Some(buf) = &mut self.value_stack_buf {
for cell in buf.iter_mut() {
*cell = RuntimeValueInternal(0);
}
}
}
fn recreate_value_stack(this: &mut Option<&mut Self>) -> ValueStack {
let limit = this
.as_ref()
.map_or(DEFAULT_VALUE_STACK_LIMIT, |this| this.value_stack_limit)
/ ::core::mem::size_of::<RuntimeValueInternal>();
let buf = this
.as_mut()
.and_then(|this| this.value_stack_buf.take())
.unwrap_or_else(|| {
let mut buf = Vec::new();
buf.reserve_exact(limit);
buf.resize(limit, RuntimeValueInternal(0));
buf.into_boxed_slice()
});
ValueStack { buf, sp: 0 }
}
fn recreate_call_stack(this: &mut Option<&mut Self>) -> CallStack {
let limit = this
.as_ref()
.map_or(DEFAULT_CALL_STACK_LIMIT, |this| this.call_stack_limit);
let buf = this
.as_mut()
.and_then(|this| this.call_stack_buf.take())
.unwrap_or_default();
CallStack { buf, limit }
}
pub(crate) fn recycle(&mut self, mut interpreter: Interpreter) {
interpreter.call_stack.buf.clear();
self.value_stack_buf = Some(interpreter.value_stack.buf);
self.call_stack_buf = Some(interpreter.call_stack.buf);
}
}
impl Default for StackRecycler {
fn default() -> Self {
Self::with_limits(DEFAULT_VALUE_STACK_LIMIT, DEFAULT_CALL_STACK_LIMIT)
}
}

View File

@ -1,6 +1,4 @@
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::rc::Rc;
use alloc::{rc::Rc, vec::Vec};
use core::cell::RefCell;
use core::fmt;
use core::u32;

View File

@ -285,7 +285,7 @@ fn resume_call_host_func() {
let export = instance.export_by_name("test").unwrap();
let func_instance = export.as_func().unwrap();
let mut invocation = FuncInstance::invoke_resumable(&func_instance, &[]).unwrap();
let mut invocation = FuncInstance::invoke_resumable(&func_instance, &[][..]).unwrap();
let result = invocation.start_execution(&mut env);
match result {
Err(ResumableError::Trap(_)) => {}
@ -330,7 +330,7 @@ fn resume_call_host_func_type_mismatch() {
let export = instance.export_by_name("test").unwrap();
let func_instance = export.as_func().unwrap();
let mut invocation = FuncInstance::invoke_resumable(&func_instance, &[]).unwrap();
let mut invocation = FuncInstance::invoke_resumable(&func_instance, &[][..]).unwrap();
let result = invocation.start_execution(&mut env);
match result {
Err(ResumableError::Trap(_)) => {}

View File

@ -366,8 +366,17 @@ impl WrapInto<F32> for F64 {
}
macro_rules! impl_try_truncate_into {
($from: ident, $into: ident) => {
(@primitive $from: ident, $into: ident, $to_primitive:path) => {
impl TryTruncateInto<$into, TrapKind> for $from {
#[cfg(feature = "std")]
fn try_truncate_into(self) -> Result<$into, TrapKind> {
// Casting from a float to an integer will round the float towards zero
num_rational::BigRational::from_float(self)
.map(|val| val.to_integer())
.and_then(|val| $to_primitive(&val))
.ok_or(TrapKind::InvalidConversionToInt)
}
#[cfg(not(feature = "std"))]
fn try_truncate_into(self) -> Result<$into, TrapKind> {
// Casting from a float to an integer will round the float towards zero
// NOTE: currently this will cause Undefined Behavior if the rounded value cannot be represented by the
@ -386,7 +395,7 @@ macro_rules! impl_try_truncate_into {
}
}
};
($from:ident, $intermediate:ident, $into:ident) => {
(@wrapped $from:ident, $intermediate:ident, $into:ident) => {
impl TryTruncateInto<$into, TrapKind> for $from {
fn try_truncate_into(self) -> Result<$into, TrapKind> {
$intermediate::from(self).try_truncate_into()
@ -395,22 +404,22 @@ macro_rules! impl_try_truncate_into {
};
}
impl_try_truncate_into!(f32, i32);
impl_try_truncate_into!(f32, i64);
impl_try_truncate_into!(f64, i32);
impl_try_truncate_into!(f64, i64);
impl_try_truncate_into!(f32, u32);
impl_try_truncate_into!(f32, u64);
impl_try_truncate_into!(f64, u32);
impl_try_truncate_into!(f64, u64);
impl_try_truncate_into!(F32, f32, i32);
impl_try_truncate_into!(F32, f32, i64);
impl_try_truncate_into!(F64, f64, i32);
impl_try_truncate_into!(F64, f64, i64);
impl_try_truncate_into!(F32, f32, u32);
impl_try_truncate_into!(F32, f32, u64);
impl_try_truncate_into!(F64, f64, u32);
impl_try_truncate_into!(F64, f64, u64);
impl_try_truncate_into!(@primitive f32, i32, num_traits::cast::ToPrimitive::to_i32);
impl_try_truncate_into!(@primitive f32, i64, num_traits::cast::ToPrimitive::to_i64);
impl_try_truncate_into!(@primitive f64, i32, num_traits::cast::ToPrimitive::to_i32);
impl_try_truncate_into!(@primitive f64, i64, num_traits::cast::ToPrimitive::to_i64);
impl_try_truncate_into!(@primitive f32, u32, num_traits::cast::ToPrimitive::to_u32);
impl_try_truncate_into!(@primitive f32, u64, num_traits::cast::ToPrimitive::to_u64);
impl_try_truncate_into!(@primitive f64, u32, num_traits::cast::ToPrimitive::to_u32);
impl_try_truncate_into!(@primitive f64, u64, num_traits::cast::ToPrimitive::to_u64);
impl_try_truncate_into!(@wrapped F32, f32, i32);
impl_try_truncate_into!(@wrapped F32, f32, i64);
impl_try_truncate_into!(@wrapped F64, f64, i32);
impl_try_truncate_into!(@wrapped F64, f64, i64);
impl_try_truncate_into!(@wrapped F32, f32, u32);
impl_try_truncate_into!(@wrapped F32, f32, u64);
impl_try_truncate_into!(@wrapped F64, f64, u32);
impl_try_truncate_into!(@wrapped F64, f64, u64);
macro_rules! impl_extend_into {
($from:ident, $into:ident) => {
@ -828,15 +837,6 @@ impl_integer!(u32);
impl_integer!(i64);
impl_integer!(u64);
// Use std float functions in std environment.
// And libm's implementation in no_std
#[cfg(feature = "std")]
macro_rules! call_math {
($op:ident, $e:expr, $fXX:ident, $FXXExt:ident) => {
$fXX::$op($e)
};
}
#[cfg(not(feature = "std"))]
macro_rules! call_math {
($op:ident, $e:expr, $fXX:ident, $FXXExt:ident) => {
::libm::$FXXExt::$op($e)

12
test.sh
View File

@ -2,8 +2,18 @@
set -eux
EXTRA_ARGS=""
if [ -n "${TARGET-}" ]; then
# Tests build in debug mode are prohibitively
# slow when ran under emulation so that
# e.g. Travis CI will hit timeouts.
EXTRA_ARGS="--release --target=${TARGET}"
export RUSTFLAGS="--cfg debug_assertions"
fi
cd $(dirname $0)
time cargo test --all
time cargo test --all ${EXTRA_ARGS}
cd -

View File

@ -18,6 +18,7 @@ fn spec_to_runtime_value(val: Value<u32, u64>) -> RuntimeValue {
Value::I64(v) => RuntimeValue::I64(v),
Value::F32(v) => RuntimeValue::F32(v.into()),
Value::F64(v) => RuntimeValue::F64(v.into()),
Value::V128(_) => panic!("v128 is not supported"),
}
}

View File

@ -1,6 +1,6 @@
[package]
name = "wasmi-validation"
version = "0.1.0"
version = "0.2.0"
authors = ["Parity Technologies <admin@parity.io>"]
edition = "2018"
license = "MIT/Apache-2.0"
@ -8,8 +8,7 @@ repository = "https://github.com/paritytech/wasmi"
description = "Wasm code validator"
[dependencies]
parity-wasm = { version = "0.31", default-features = false }
hashbrown = { version = "0.1.8", optional = true }
parity-wasm = { version = "0.40.1", default-features = false }
[dev-dependencies]
assert_matches = "1.1"
@ -17,6 +16,4 @@ assert_matches = "1.1"
[features]
default = ["std"]
std = ["parity-wasm/std"]
core = [
"hashbrown/nightly"
]
core = []

View File

@ -1,6 +1,5 @@
use crate::Error;
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::vec::Vec;
use parity_wasm::elements::{
BlockType, FunctionType, GlobalType, MemoryType, TableType, ValueType,
};

View File

@ -1,6 +1,3 @@
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use crate::{
context::ModuleContext, stack::StackWithLimit, util::Locals, Error, FuncValidator,
DEFAULT_MEMORY_INDEX, DEFAULT_TABLE_INDEX,
@ -269,8 +266,8 @@ impl<'a> FunctionValidationContext<'a> {
BrIf(depth) => {
self.validate_br_if(depth)?;
}
BrTable(ref table, default) => {
self.validate_br_table(table, default)?;
BrTable(ref br_table_data) => {
self.validate_br_table(&*br_table_data.table, br_table_data.default)?;
make_top_frame_polymorphic(&mut self.value_stack, &mut self.frame_stack);
}
Return => {

View File

@ -2,8 +2,6 @@
// #![warn(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
//// alloc is required in no_std
#![cfg_attr(not(feature = "std"), feature(alloc, alloc_prelude))]
#[cfg(not(feature = "std"))]
#[macro_use]
@ -21,21 +19,15 @@ pub const DEFAULT_TABLE_INDEX: u32 = 0;
/// Maximal number of pages that a wasm instance supports.
pub const LINEAR_MEMORY_MAX_PAGES: u32 = 65536;
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::{string::String, vec::Vec};
use core::fmt;
#[cfg(feature = "std")]
use std::error;
#[cfg(not(feature = "std"))]
use hashbrown::HashSet;
#[cfg(feature = "std")]
use std::collections::HashSet;
use self::context::ModuleContextBuilder;
use parity_wasm::elements::{
BlockType, External, FuncBody, GlobalEntry, GlobalType, InitExpr, Instruction, Internal,
MemoryType, Module, ResizableLimits, TableType, Type, ValueType,
BlockType, ExportEntry, External, FuncBody, GlobalEntry, GlobalType, InitExpr, Instruction,
Internal, MemoryType, Module, ResizableLimits, TableType, Type, ValueType,
};
pub mod context;
@ -250,13 +242,21 @@ pub fn validate_module<V: Validator>(module: &Module) -> Result<V::Output, Error
// validate export section
if let Some(export_section) = module.export_section() {
let mut export_names = HashSet::with_capacity(export_section.entries().len());
for export in export_section.entries() {
// HashSet::insert returns false if item already in set.
let duplicate = export_names.insert(export.field()) == false;
if duplicate {
return Err(Error(format!("duplicate export {}", export.field())));
let mut export_names = export_section
.entries()
.iter()
.map(ExportEntry::field)
.collect::<Vec<_>>();
export_names.sort_unstable();
for (fst, snd) in export_names.iter().zip(export_names.iter().skip(1)) {
if fst == snd {
return Err(Error(format!("duplicate export {}", fst)));
}
}
for export in export_section.entries() {
match *export.internal() {
Internal::Function(function_index) => {
context.require_function(function_index)?;
@ -319,7 +319,11 @@ pub fn validate_module<V: Validator>(module: &Module) -> Result<V::Output, Error
if let Some(data_section) = module.data_section() {
for data_segment in data_section.entries() {
context.require_memory(data_segment.index())?;
let init_ty = expr_const_type(data_segment.offset(), context.globals())?;
let offset = data_segment
.offset()
.as_ref()
.ok_or_else(|| Error("passive memory segments are not supported".into()))?;
let init_ty = expr_const_type(&offset, context.globals())?;
if init_ty != ValueType::I32 {
return Err(Error("segment offset should return I32".into()));
}
@ -330,8 +334,11 @@ pub fn validate_module<V: Validator>(module: &Module) -> Result<V::Output, Error
if let Some(element_section) = module.elements_section() {
for element_segment in element_section.entries() {
context.require_table(element_segment.index())?;
let init_ty = expr_const_type(element_segment.offset(), context.globals())?;
let offset = element_segment
.offset()
.as_ref()
.ok_or_else(|| Error("passive element segments are not supported".into()))?;
let init_ty = expr_const_type(&offset, context.globals())?;
if init_ty != ValueType::I32 {
return Err(Error("segment offset should return I32".into()));
}

View File

@ -1,5 +1,4 @@
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::{string::String, vec::Vec};
use core::fmt;
#[cfg(feature = "std")]

View File

@ -1,6 +1,5 @@
use crate::Error;
#[allow(unused_imports)]
use alloc::prelude::v1::*;
use alloc::string::String;
use parity_wasm::elements::{Local, ValueType};
#[cfg(test)]