Add explanation of `RuntimeValueInternal`, move it so I don't need to do `pub(crate)`

This commit is contained in:
Jef 2018-10-29 11:37:55 +01:00
parent 68e447ce28
commit 24d69b5bd6
2 changed files with 355 additions and 257 deletions

View File

@ -1,24 +1,22 @@
#[allow(unused_imports)] #[allow(unused_imports)]
use alloc::prelude::*; use alloc::prelude::*;
use common::{DEFAULT_MEMORY_INDEX, DEFAULT_TABLE_INDEX};
use core::fmt;
use core::ops; use core::ops;
use core::{u32, usize}; use core::{u32, usize};
use core::fmt; use func::{FuncInstance, FuncInstanceInternal, FuncRef};
use core::iter::repeat;
use parity_wasm::elements::Local;
use {Trap, TrapKind, Signature};
use module::ModuleRef;
use memory::MemoryRef;
use func::{FuncRef, FuncInstance, FuncInstanceInternal};
use value::{
ArithmeticOps, ExtendInto, Float, FromRuntimeValueInternal, Integer, LittleEndianConvert,
RuntimeValue, RuntimeValueInternal, TransmuteInto, TryTruncateInto, WrapInto,
};
use host::Externals; use host::Externals;
use common::{DEFAULT_MEMORY_INDEX, DEFAULT_TABLE_INDEX};
use types::ValueType;
use memory_units::Pages;
use nan_preserving_float::{F32, F64};
use isa; use isa;
use memory::MemoryRef;
use memory_units::Pages;
use module::ModuleRef;
use nan_preserving_float::{F32, F64};
use parity_wasm::elements::Local;
use value::{
ArithmeticOps, ExtendInto, Float, Integer, LittleEndianConvert, RuntimeValue, TransmuteInto,
TryTruncateInto, WrapInto,
};
use {Signature, Trap, TrapKind, ValueType};
/// Maximum number of entries in value stack. /// Maximum number of entries in value stack.
pub const DEFAULT_VALUE_STACK_LIMIT: usize = (1024 * 1024) / ::core::mem::size_of::<RuntimeValue>(); pub const DEFAULT_VALUE_STACK_LIMIT: usize = (1024 * 1024) / ::core::mem::size_of::<RuntimeValue>();
@ -26,6 +24,104 @@ pub const DEFAULT_VALUE_STACK_LIMIT: usize = (1024 * 1024) / ::core::mem::size_o
// TODO: Make these parameters changeble. // TODO: Make these parameters changeble.
pub const DEFAULT_CALL_STACK_LIMIT: usize = 64 * 1024; pub const DEFAULT_CALL_STACK_LIMIT: usize = 64 * 1024;
/// This is a wrapper around u64 to allow us to treat runtime values as a tag-free `u64`
/// (where if the runtime value is <64 bits the upper bits are 0). This is safe, since
/// all of the possible runtime values are valid to create from 64 defined bits, so if
/// types don't line up we get a logic error (which will ideally be caught by the wasm
/// spec tests) and not undefined behaviour.
///
/// At the boundary between the interpreter and the outside world we convert to the public
/// `RuntimeValue` type, which can then be matched on. We can create a `RuntimeValue` from
/// a `RuntimeValueInternal` only when the type is statically known, which it always is
/// at these boundaries.
#[derive(Copy, Clone, Debug, PartialEq, Default)]
#[repr(transparent)]
struct RuntimeValueInternal(pub u64);
impl RuntimeValueInternal {
pub fn with_type(self, ty: ValueType) -> RuntimeValue {
match ty {
ValueType::I32 => RuntimeValue::I32(<_>::from_runtime_value_internal(self)),
ValueType::I64 => RuntimeValue::I64(<_>::from_runtime_value_internal(self)),
ValueType::F32 => RuntimeValue::F32(<_>::from_runtime_value_internal(self)),
ValueType::F64 => RuntimeValue::F64(<_>::from_runtime_value_internal(self)),
}
}
}
trait FromRuntimeValueInternal
where
Self: Sized,
{
fn from_runtime_value_internal(val: RuntimeValueInternal) -> Self;
}
macro_rules! impl_from_runtime_value_internal {
($($t:ty),*) => {
$(
impl FromRuntimeValueInternal for $t {
fn from_runtime_value_internal(
RuntimeValueInternal(val): RuntimeValueInternal,
) -> Self {
val as _
}
}
impl From<$t> for RuntimeValueInternal {
fn from(other: $t) -> Self {
RuntimeValueInternal(other as _)
}
}
)*
};
}
macro_rules! impl_from_runtime_value_internal_float {
($($t:ty),*) => {
$(
impl FromRuntimeValueInternal for $t {
fn from_runtime_value_internal(
RuntimeValueInternal(val): RuntimeValueInternal,
) -> Self {
<$t>::from_bits(val as _)
}
}
impl From<$t> for RuntimeValueInternal {
fn from(other: $t) -> Self {
RuntimeValueInternal(other.to_bits() as _)
}
}
)*
};
}
impl_from_runtime_value_internal!(i8, u8, i16, u16, i32, u32, i64, u64);
impl_from_runtime_value_internal_float!(f32, f64, F32, F64);
impl From<bool> for RuntimeValueInternal {
fn from(other: bool) -> Self {
(if other { 1 } else { 0 }).into()
}
}
impl FromRuntimeValueInternal for bool {
fn from_runtime_value_internal(RuntimeValueInternal(val): RuntimeValueInternal) -> Self {
val != 0
}
}
impl From<RuntimeValue> for RuntimeValueInternal {
fn from(other: RuntimeValue) -> Self {
match other {
RuntimeValue::I32(val) => val.into(),
RuntimeValue::I64(val) => val.into(),
RuntimeValue::F32(val) => val.into(),
RuntimeValue::F64(val) => val.into(),
}
}
}
/// Interpreter action to execute after executing instruction. /// Interpreter action to execute after executing instruction.
pub enum InstructionOutcome { pub enum InstructionOutcome {
/// Continue with next instruction. /// Continue with next instruction.
@ -105,7 +201,10 @@ impl Interpreter {
&self.state &self.state
} }
pub fn start_execution<'a, E: Externals + 'a>(&mut self, externals: &'a mut E) -> Result<Option<RuntimeValue>, Trap> { pub fn start_execution<'a, E: Externals + 'a>(
&mut self,
externals: &'a mut E,
) -> Result<Option<RuntimeValue>, Trap> {
// Ensure that the VM has not been executed. This is checked in `FuncInvocation::start_execution`. // Ensure that the VM has not been executed. This is checked in `FuncInvocation::start_execution`.
assert!(self.state == InterpreterState::Initialized); assert!(self.state == InterpreterState::Initialized);
@ -122,7 +221,11 @@ impl Interpreter {
Ok(opt_return_value) Ok(opt_return_value)
} }
pub fn resume_execution<'a, E: Externals + 'a>(&mut self, return_val: Option<RuntimeValue>, externals: &'a mut E) -> Result<Option<RuntimeValue>, Trap> { pub fn resume_execution<'a, E: Externals + 'a>(
&mut self,
return_val: Option<RuntimeValue>,
externals: &'a mut E,
) -> Result<Option<RuntimeValue>, Trap> {
use core::mem::swap; use core::mem::swap;
// Ensure that the VM is resumable. This is checked in `FuncInvocation::resume_execution`. // Ensure that the VM is resumable. This is checked in `FuncInvocation::resume_execution`.
@ -149,11 +252,14 @@ impl Interpreter {
Ok(opt_return_value) Ok(opt_return_value)
} }
fn run_interpreter_loop<'a, E: Externals + 'a>(&mut self, externals: &'a mut E) -> Result<(), Trap> { fn run_interpreter_loop<'a, E: Externals + 'a>(
&mut self,
externals: &'a mut E,
) -> Result<(), Trap> {
loop { loop {
let mut function_context = self.call_stack let mut function_context = self.call_stack.pop().expect(
.pop() "on loop entry - not empty; on loop continue - checking for emptiness; qed",
.expect("on loop entry - not empty; on loop continue - checking for emptiness; qed"); );
let function_ref = function_context.function.clone(); let function_ref = function_context.function.clone();
let function_body = function_ref let function_body = function_ref
.body() .body()
@ -166,11 +272,9 @@ impl Interpreter {
function_context.initialize(&function_body.locals, &mut self.value_stack)?; function_context.initialize(&function_body.locals, &mut self.value_stack)?;
} }
let function_return = let function_return = self
self.do_run_function( .do_run_function(&mut function_context, &function_body.code)
&mut function_context, .map_err(Trap::new)?;
&function_body.code,
).map_err(Trap::new)?;
match function_return { match function_return {
RunResult::Return => { RunResult::Return => {
@ -179,7 +283,7 @@ impl Interpreter {
// are done executing. // are done executing.
return Ok(()); return Ok(());
} }
}, }
RunResult::NestedCall(nested_func) => { RunResult::NestedCall(nested_func) => {
if self.call_stack.len() + 1 >= DEFAULT_CALL_STACK_LIMIT { if self.call_stack.len() + 1 >= DEFAULT_CALL_STACK_LIMIT {
return Err(TrapKind::StackOverflow.into()); return Err(TrapKind::StackOverflow.into());
@ -190,20 +294,23 @@ impl Interpreter {
let nested_context = FunctionContext::new(nested_func.clone()); let nested_context = FunctionContext::new(nested_func.clone());
self.call_stack.push(function_context); self.call_stack.push(function_context);
self.call_stack.push(nested_context); self.call_stack.push(nested_context);
}, }
FuncInstanceInternal::Host { ref signature, .. } => { FuncInstanceInternal::Host { ref signature, .. } => {
let args = prepare_function_args(signature, &mut self.value_stack); let args = prepare_function_args(signature, &mut self.value_stack);
// We push the function context first. If the VM is not resumable, it does no harm. If it is, we then save the context here. // We push the function context first. If the VM is not resumable, it does no harm. If it is, we then save the context here.
self.call_stack.push(function_context); self.call_stack.push(function_context);
let return_val = match FuncInstance::invoke(&nested_func, &args, externals) { let return_val =
match FuncInstance::invoke(&nested_func, &args, externals) {
Ok(val) => val, Ok(val) => val,
Err(trap) => { Err(trap) => {
if trap.kind().is_host() { if trap.kind().is_host() {
self.state = InterpreterState::Resumable(nested_func.signature().return_type()); self.state = InterpreterState::Resumable(
nested_func.signature().return_type(),
);
} }
return Err(trap); return Err(trap);
}, }
}; };
// Check if `return_val` matches the signature. // Check if `return_val` matches the signature.
@ -220,32 +327,34 @@ impl Interpreter {
} }
} }
} }
}, }
} }
} }
} }
fn do_run_function(&mut self, function_context: &mut FunctionContext, instructions: &isa::Instructions) fn do_run_function(
-> Result<RunResult, TrapKind> &mut self,
{ function_context: &mut FunctionContext,
instructions: &isa::Instructions,
) -> Result<RunResult, TrapKind> {
let mut iter = instructions.iterate_from(function_context.position); let mut iter = instructions.iterate_from(function_context.position);
loop { loop {
let instruction = iter.next().expect("instruction"); let instruction = iter.next().expect("instruction");
match self.run_instruction(function_context, instruction)? { match self.run_instruction(function_context, instruction)? {
InstructionOutcome::RunNextInstruction => {}, InstructionOutcome::RunNextInstruction => {}
InstructionOutcome::Branch(target) => { InstructionOutcome::Branch(target) => {
iter = instructions.iterate_from(target.dst_pc); iter = instructions.iterate_from(target.dst_pc);
self.value_stack.drop_keep(target.drop_keep); self.value_stack.drop_keep(target.drop_keep);
}, }
InstructionOutcome::ExecuteCall(func_ref) => { InstructionOutcome::ExecuteCall(func_ref) => {
function_context.position = iter.position(); function_context.position = iter.position();
return Ok(RunResult::NestedCall(func_ref)); return Ok(RunResult::NestedCall(func_ref));
}, }
InstructionOutcome::Return(drop_keep) => { InstructionOutcome::Return(drop_keep) => {
self.value_stack.drop_keep(drop_keep); self.value_stack.drop_keep(drop_keep);
break; break;
}, }
} }
} }
@ -253,7 +362,11 @@ impl Interpreter {
} }
#[inline(always)] #[inline(always)]
fn run_instruction(&mut self, context: &mut FunctionContext, instruction: &isa::Instruction) -> Result<InstructionOutcome, TrapKind> { fn run_instruction(
&mut self,
context: &mut FunctionContext,
instruction: &isa::Instruction,
) -> Result<InstructionOutcome, TrapKind> {
match instruction { match instruction {
&isa::Instruction::Unreachable => self.run_unreachable(context), &isa::Instruction::Unreachable => self.run_unreachable(context),
@ -279,26 +392,52 @@ impl Interpreter {
&isa::Instruction::I64Load(offset) => self.run_load::<i64>(context, offset), &isa::Instruction::I64Load(offset) => self.run_load::<i64>(context, offset),
&isa::Instruction::F32Load(offset) => self.run_load::<F32>(context, offset), &isa::Instruction::F32Load(offset) => self.run_load::<F32>(context, offset),
&isa::Instruction::F64Load(offset) => self.run_load::<F64>(context, offset), &isa::Instruction::F64Load(offset) => self.run_load::<F64>(context, offset),
&isa::Instruction::I32Load8S(offset) => self.run_load_extend::<i8, i32>(context, offset), &isa::Instruction::I32Load8S(offset) => {
&isa::Instruction::I32Load8U(offset) => self.run_load_extend::<u8, i32>(context, offset), self.run_load_extend::<i8, i32>(context, offset)
&isa::Instruction::I32Load16S(offset) => self.run_load_extend::<i16, i32>(context, offset), }
&isa::Instruction::I32Load16U(offset) => self.run_load_extend::<u16, i32>(context, offset), &isa::Instruction::I32Load8U(offset) => {
&isa::Instruction::I64Load8S(offset) => self.run_load_extend::<i8, i64>(context, offset), self.run_load_extend::<u8, i32>(context, offset)
&isa::Instruction::I64Load8U(offset) => self.run_load_extend::<u8, i64>(context, offset), }
&isa::Instruction::I64Load16S(offset) => self.run_load_extend::<i16, i64>(context, offset), &isa::Instruction::I32Load16S(offset) => {
&isa::Instruction::I64Load16U(offset) => self.run_load_extend::<u16, i64>(context, offset), self.run_load_extend::<i16, i32>(context, offset)
&isa::Instruction::I64Load32S(offset) => self.run_load_extend::<i32, i64>(context, offset), }
&isa::Instruction::I64Load32U(offset) => self.run_load_extend::<u32, i64>(context, offset), &isa::Instruction::I32Load16U(offset) => {
self.run_load_extend::<u16, i32>(context, offset)
}
&isa::Instruction::I64Load8S(offset) => {
self.run_load_extend::<i8, i64>(context, offset)
}
&isa::Instruction::I64Load8U(offset) => {
self.run_load_extend::<u8, i64>(context, offset)
}
&isa::Instruction::I64Load16S(offset) => {
self.run_load_extend::<i16, i64>(context, offset)
}
&isa::Instruction::I64Load16U(offset) => {
self.run_load_extend::<u16, i64>(context, offset)
}
&isa::Instruction::I64Load32S(offset) => {
self.run_load_extend::<i32, i64>(context, offset)
}
&isa::Instruction::I64Load32U(offset) => {
self.run_load_extend::<u32, i64>(context, offset)
}
&isa::Instruction::I32Store(offset) => self.run_store::<i32>(context, offset), &isa::Instruction::I32Store(offset) => self.run_store::<i32>(context, offset),
&isa::Instruction::I64Store(offset) => self.run_store::<i64>(context, offset), &isa::Instruction::I64Store(offset) => self.run_store::<i64>(context, offset),
&isa::Instruction::F32Store(offset) => self.run_store::<F32>(context, offset), &isa::Instruction::F32Store(offset) => self.run_store::<F32>(context, offset),
&isa::Instruction::F64Store(offset) => self.run_store::<F64>(context, offset), &isa::Instruction::F64Store(offset) => self.run_store::<F64>(context, offset),
&isa::Instruction::I32Store8(offset) => self.run_store_wrap::<i32, i8>(context, offset), &isa::Instruction::I32Store8(offset) => self.run_store_wrap::<i32, i8>(context, offset),
&isa::Instruction::I32Store16(offset) => self.run_store_wrap::<i32, i16>(context, offset), &isa::Instruction::I32Store16(offset) => {
self.run_store_wrap::<i32, i16>(context, offset)
}
&isa::Instruction::I64Store8(offset) => self.run_store_wrap::<i64, i8>(context, offset), &isa::Instruction::I64Store8(offset) => self.run_store_wrap::<i64, i8>(context, offset),
&isa::Instruction::I64Store16(offset) => self.run_store_wrap::<i64, i16>(context, offset), &isa::Instruction::I64Store16(offset) => {
&isa::Instruction::I64Store32(offset) => self.run_store_wrap::<i64, i32>(context, offset), self.run_store_wrap::<i64, i16>(context, offset)
}
&isa::Instruction::I64Store32(offset) => {
self.run_store_wrap::<i64, i32>(context, offset)
}
&isa::Instruction::CurrentMemory => self.run_current_memory(context), &isa::Instruction::CurrentMemory => self.run_current_memory(context),
&isa::Instruction::GrowMemory => self.run_grow_memory(context), &isa::Instruction::GrowMemory => self.run_grow_memory(context),
@ -443,11 +582,18 @@ impl Interpreter {
} }
} }
fn run_unreachable(&mut self, _context: &mut FunctionContext) -> Result<InstructionOutcome, TrapKind> { fn run_unreachable(
&mut self,
_context: &mut FunctionContext,
) -> Result<InstructionOutcome, TrapKind> {
Err(TrapKind::Unreachable) Err(TrapKind::Unreachable)
} }
fn run_br(&mut self, _context: &mut FunctionContext, target: isa::Target) -> Result<InstructionOutcome, TrapKind> { fn run_br(
&mut self,
_context: &mut FunctionContext,
target: isa::Target,
) -> Result<InstructionOutcome, TrapKind> {
Ok(InstructionOutcome::Branch(target)) Ok(InstructionOutcome::Branch(target))
} }
@ -465,7 +611,6 @@ impl Interpreter {
if condition { if condition {
Ok(InstructionOutcome::RunNextInstruction) Ok(InstructionOutcome::RunNextInstruction)
} else { } else {
Ok(InstructionOutcome::Branch(target)) Ok(InstructionOutcome::Branch(target))
} }
} }
@ -510,7 +655,8 @@ impl Interpreter {
.module() .module()
.table_by_index(DEFAULT_TABLE_INDEX) .table_by_index(DEFAULT_TABLE_INDEX)
.expect("Due to validation table should exists"); .expect("Due to validation table should exists");
let func_ref = table.get(table_func_idx) let func_ref = table
.get(table_func_idx)
.map_err(|_| TrapKind::TableAccessOutOfBounds)? .map_err(|_| TrapKind::TableAccessOutOfBounds)?
.ok_or_else(|| TrapKind::ElemUninitialized)?; .ok_or_else(|| TrapKind::ElemUninitialized)?;
@ -535,9 +681,7 @@ impl Interpreter {
} }
fn run_select(&mut self) -> Result<InstructionOutcome, TrapKind> { fn run_select(&mut self) -> Result<InstructionOutcome, TrapKind> {
let (left, mid, right) = self let (left, mid, right) = self.value_stack.pop_triple();
.value_stack
.pop_triple();
let condition = <_>::from_runtime_value_internal(right); let condition = <_>::from_runtime_value_internal(right);
let val = if condition { left } else { mid }; let val = if condition { left } else { mid };
@ -552,18 +696,13 @@ impl Interpreter {
} }
fn run_set_local(&mut self, index: u32) -> Result<InstructionOutcome, TrapKind> { fn run_set_local(&mut self, index: u32) -> Result<InstructionOutcome, TrapKind> {
let val = self let val = self.value_stack.pop();
.value_stack
.pop();
*self.value_stack.pick_mut(index as usize) = val; *self.value_stack.pick_mut(index as usize) = val;
Ok(InstructionOutcome::RunNextInstruction) Ok(InstructionOutcome::RunNextInstruction)
} }
fn run_tee_local(&mut self, index: u32) -> Result<InstructionOutcome, TrapKind> { fn run_tee_local(&mut self, index: u32) -> Result<InstructionOutcome, TrapKind> {
let val = self let val = self.value_stack.top().clone();
.value_stack
.top()
.clone();
*self.value_stack.pick_mut(index as usize) = val; *self.value_stack.pick_mut(index as usize) = val;
Ok(InstructionOutcome::RunNextInstruction) Ok(InstructionOutcome::RunNextInstruction)
} }
@ -587,9 +726,7 @@ impl Interpreter {
context: &mut FunctionContext, context: &mut FunctionContext,
index: u32, index: u32,
) -> Result<InstructionOutcome, TrapKind> { ) -> Result<InstructionOutcome, TrapKind> {
let val = self let val = self.value_stack.pop();
.value_stack
.pop();
let global = context let global = context
.module() .module()
.global_by_index(index) .global_by_index(index)
@ -614,7 +751,8 @@ impl Interpreter {
let m = context let m = context
.memory() .memory()
.expect("Due to validation memory should exists"); .expect("Due to validation memory should exists");
let n: T = m.get_value(address) let n: T = m
.get_value(address)
.map_err(|_| TrapKind::MemoryAccessOutOfBounds)?; .map_err(|_| TrapKind::MemoryAccessOutOfBounds)?;
self.value_stack.push(n.into())?; self.value_stack.push(n.into())?;
Ok(InstructionOutcome::RunNextInstruction) Ok(InstructionOutcome::RunNextInstruction)
@ -635,11 +773,11 @@ impl Interpreter {
let m = context let m = context
.memory() .memory()
.expect("Due to validation memory should exists"); .expect("Due to validation memory should exists");
let v: T = m.get_value(address) let v: T = m
.get_value(address)
.map_err(|_| TrapKind::MemoryAccessOutOfBounds)?; .map_err(|_| TrapKind::MemoryAccessOutOfBounds)?;
let stack_value: U = v.extend_into(); let stack_value: U = v.extend_into();
self self.value_stack
.value_stack
.push(stack_value.into()) .push(stack_value.into())
.map_err(Into::into) .map_err(Into::into)
.map(|_| InstructionOutcome::RunNextInstruction) .map(|_| InstructionOutcome::RunNextInstruction)
@ -678,14 +816,8 @@ impl Interpreter {
{ {
let stack_value: T = <_>::from_runtime_value_internal(self.value_stack.pop()); let stack_value: T = <_>::from_runtime_value_internal(self.value_stack.pop());
let stack_value = stack_value.wrap_into(); let stack_value = stack_value.wrap_into();
let raw_address = self let raw_address = self.value_stack.pop_as::<u32>();
.value_stack let address = effective_address(offset, raw_address)?;
.pop_as::<u32>();
let address =
effective_address(
offset,
raw_address,
)?;
let m = context let m = context
.memory() .memory()
.expect("Due to validation memory should exists"); .expect("Due to validation memory should exists");
@ -694,7 +826,10 @@ impl Interpreter {
Ok(InstructionOutcome::RunNextInstruction) Ok(InstructionOutcome::RunNextInstruction)
} }
fn run_current_memory(&mut self, context: &mut FunctionContext) -> Result<InstructionOutcome, TrapKind> { fn run_current_memory(
&mut self,
context: &mut FunctionContext,
) -> Result<InstructionOutcome, TrapKind> {
let m = context let m = context
.memory() .memory()
.expect("Due to validation memory should exists"); .expect("Due to validation memory should exists");
@ -703,10 +838,11 @@ impl Interpreter {
Ok(InstructionOutcome::RunNextInstruction) Ok(InstructionOutcome::RunNextInstruction)
} }
fn run_grow_memory(&mut self, context: &mut FunctionContext) -> Result<InstructionOutcome, TrapKind> { fn run_grow_memory(
let pages: u32 = self &mut self,
.value_stack context: &mut FunctionContext,
.pop_as(); ) -> Result<InstructionOutcome, TrapKind> {
let pages: u32 = self.value_stack.pop_as();
let m = context let m = context
.memory() .memory()
.expect("Due to validation memory should exists"); .expect("Due to validation memory should exists");
@ -719,8 +855,7 @@ impl Interpreter {
} }
fn run_const(&mut self, val: RuntimeValue) -> Result<InstructionOutcome, TrapKind> { fn run_const(&mut self, val: RuntimeValue) -> Result<InstructionOutcome, TrapKind> {
self self.value_stack
.value_stack
.push(val.into()) .push(val.into())
.map_err(Into::into) .map_err(Into::into)
.map(|_| InstructionOutcome::RunNextInstruction) .map(|_| InstructionOutcome::RunNextInstruction)
@ -731,9 +866,7 @@ impl Interpreter {
T: FromRuntimeValueInternal, T: FromRuntimeValueInternal,
F: FnOnce(T, T) -> bool, F: FnOnce(T, T) -> bool,
{ {
let (left, right) = self let (left, right) = self.value_stack.pop_pair_as::<T>();
.value_stack
.pop_pair_as::<T>();
let v = if f(left, right) { let v = if f(left, right) {
RuntimeValueInternal(1) RuntimeValueInternal(1)
} else { } else {
@ -802,9 +935,7 @@ impl Interpreter {
T: FromRuntimeValueInternal, T: FromRuntimeValueInternal,
RuntimeValueInternal: From<U>, RuntimeValueInternal: From<U>,
{ {
let v = self let v = self.value_stack.pop_as::<T>();
.value_stack
.pop_as::<T>();
let v = f(v); let v = f(v);
self.value_stack.push(v.into())?; self.value_stack.push(v.into())?;
Ok(InstructionOutcome::RunNextInstruction) Ok(InstructionOutcome::RunNextInstruction)
@ -970,9 +1101,7 @@ impl Interpreter {
RuntimeValueInternal: From<T>, RuntimeValueInternal: From<T>,
T: Integer<T> + FromRuntimeValueInternal, T: Integer<T> + FromRuntimeValueInternal,
{ {
let (left, right) = self let (left, right) = self.value_stack.pop_pair_as::<T>();
.value_stack
.pop_pair_as::<T>();
let v = left.rotr(right); let v = left.rotr(right);
self.value_stack.push(v.into())?; self.value_stack.push(v.into())?;
Ok(InstructionOutcome::RunNextInstruction) Ok(InstructionOutcome::RunNextInstruction)
@ -1039,9 +1168,7 @@ impl Interpreter {
RuntimeValueInternal: From<T>, RuntimeValueInternal: From<T>,
T: Float<T> + FromRuntimeValueInternal, T: Float<T> + FromRuntimeValueInternal,
{ {
let (left, right) = self let (left, right) = self.value_stack.pop_pair_as::<T>();
.value_stack
.pop_pair_as::<T>();
let v = left.min(right); let v = left.min(right);
self.value_stack.push(v.into())?; self.value_stack.push(v.into())?;
Ok(InstructionOutcome::RunNextInstruction) Ok(InstructionOutcome::RunNextInstruction)
@ -1152,18 +1279,20 @@ impl FunctionContext {
self.is_initialized self.is_initialized
} }
pub fn initialize(&mut self, locals: &[Local], value_stack: &mut ValueStack) -> Result<(), TrapKind> { pub fn initialize(
&mut self,
locals: &[Local],
value_stack: &mut ValueStack,
) -> Result<(), TrapKind> {
debug_assert!(!self.is_initialized); debug_assert!(!self.is_initialized);
let num_locals = locals let num_locals = locals.iter().map(|l| l.count() as usize).sum();
.iter()
.map(|l| l.count() as usize)
.sum();
let locals = vec![Default::default(); num_locals]; let locals = vec![Default::default(); num_locals];
// TODO: Replace with extend. // TODO: Replace with extend.
for local in locals { for local in locals {
value_stack.push(local) value_stack
.push(local)
.map_err(|_| TrapKind::StackOverflow)?; .map_err(|_| TrapKind::StackOverflow)?;
} }
@ -1209,12 +1338,14 @@ fn prepare_function_args(
pub fn check_function_args(signature: &Signature, args: &[RuntimeValue]) -> Result<(), Trap> { pub fn check_function_args(signature: &Signature, args: &[RuntimeValue]) -> Result<(), Trap> {
if signature.params().len() != args.len() { if signature.params().len() != args.len() {
return Err( return Err(TrapKind::UnexpectedSignature.into());
TrapKind::UnexpectedSignature.into()
);
} }
if signature.params().iter().zip(args).any(|(expected_type, param_value)| { if signature
.params()
.iter()
.zip(args)
.any(|(expected_type, param_value)| {
let actual_type = param_value.value_type(); let actual_type = param_value.value_type();
&actual_type != expected_type &actual_type != expected_type
}) { }) {

View File

@ -1,6 +1,7 @@
use byteorder::{ByteOrder, LittleEndian}; use byteorder::{ByteOrder, LittleEndian};
use nan_preserving_float::{F32, F64};
use core::{f32, i32, i64, u32, u64}; use core::{f32, i32, i64, u32, u64};
use nan_preserving_float::{F32, F64};
use types::ValueType;
use TrapKind; use TrapKind;
#[derive(Debug)] #[derive(Debug)]
@ -27,93 +28,6 @@ pub enum RuntimeValue {
F64(F64), F64(F64),
} }
#[derive(Copy, Clone, Debug, PartialEq, Default)]
pub(crate) struct RuntimeValueInternal(pub u64);
impl RuntimeValueInternal {
pub fn with_type(self, ty: ValueType) -> RuntimeValue {
match ty {
ValueType::I32 => RuntimeValue::I32(<_>::from_runtime_value_internal(self)),
ValueType::I64 => RuntimeValue::I64(<_>::from_runtime_value_internal(self)),
ValueType::F32 => RuntimeValue::F32(<_>::from_runtime_value_internal(self)),
ValueType::F64 => RuntimeValue::F64(<_>::from_runtime_value_internal(self)),
}
}
}
pub(crate) trait FromRuntimeValueInternal
where
Self: Sized,
{
fn from_runtime_value_internal(val: RuntimeValueInternal) -> Self;
}
macro_rules! impl_from_runtime_value_internal {
($($t:ty),*) => {
$(
impl FromRuntimeValueInternal for $t {
fn from_runtime_value_internal(
RuntimeValueInternal(val): RuntimeValueInternal,
) -> Self {
val as _
}
}
impl From<$t> for RuntimeValueInternal {
fn from(other: $t) -> Self {
RuntimeValueInternal(other as _)
}
}
)*
};
}
macro_rules! impl_from_runtime_value_internal_float {
($($t:ty),*) => {
$(
impl FromRuntimeValueInternal for $t {
fn from_runtime_value_internal(
RuntimeValueInternal(val): RuntimeValueInternal,
) -> Self {
<$t>::from_bits(val as _)
}
}
impl From<$t> for RuntimeValueInternal {
fn from(other: $t) -> Self {
RuntimeValueInternal(other.to_bits() as _)
}
}
)*
};
}
impl_from_runtime_value_internal!(i8, u8, i16, u16, i32, u32, i64, u64);
impl_from_runtime_value_internal_float!(f32, f64, F32, F64);
impl From<bool> for RuntimeValueInternal {
fn from(other: bool) -> Self {
(if other { 1 } else { 0 }).into()
}
}
impl FromRuntimeValueInternal for bool {
fn from_runtime_value_internal(RuntimeValueInternal(val): RuntimeValueInternal) -> Self {
val != 0
}
}
impl From<RuntimeValue> for RuntimeValueInternal {
fn from(other: RuntimeValue) -> Self {
match other {
RuntimeValue::I32(val) => val.into(),
RuntimeValue::I64(val) => val.into(),
RuntimeValue::F32(val) => val.into(),
RuntimeValue::F64(val) => val.into(),
}
}
}
/// Trait for creating value from a [`RuntimeValue`]. /// Trait for creating value from a [`RuntimeValue`].
/// ///
/// Typically each implementation can create a value from the specific type. /// Typically each implementation can create a value from the specific type.
@ -123,7 +37,10 @@ impl From<RuntimeValue> for RuntimeValueInternal {
/// [`I32`]: enum.RuntimeValue.html#variant.I32 /// [`I32`]: enum.RuntimeValue.html#variant.I32
/// [`F64`]: enum.RuntimeValue.html#variant.F64 /// [`F64`]: enum.RuntimeValue.html#variant.F64
/// [`RuntimeValue`]: enum.RuntimeValue.html /// [`RuntimeValue`]: enum.RuntimeValue.html
pub trait FromRuntimeValue where Self: Sized { pub trait FromRuntimeValue
where
Self: Sized,
{
/// Create a value of type `Self` from a given [`RuntimeValue`]. /// Create a value of type `Self` from a given [`RuntimeValue`].
/// ///
/// Returns `None` if the [`RuntimeValue`] is of type different than /// Returns `None` if the [`RuntimeValue`] is of type different than
@ -158,9 +75,12 @@ pub trait TransmuteInto<T> {
} }
/// Convert from and to little endian. /// Convert from and to little endian.
pub trait LittleEndianConvert where Self: Sized { pub trait LittleEndianConvert
where
Self: Sized,
{
/// Convert to little endian buffer. /// Convert to little endian buffer.
fn into_little_endian(self, buffer: &mut[u8]); fn into_little_endian(self, buffer: &mut [u8]);
/// Convert from little endian buffer. /// Convert from little endian buffer.
fn from_little_endian(buffer: &[u8]) -> Result<Self, Error>; fn from_little_endian(buffer: &[u8]) -> Result<Self, Error>;
} }
@ -548,7 +468,7 @@ macro_rules! impl_transmute_into_self {
self self
} }
} }
} };
} }
impl_transmute_into_self!(i32); impl_transmute_into_self!(i32);
@ -565,7 +485,7 @@ macro_rules! impl_transmute_into_as {
self as $into self as $into
} }
} }
} };
} }
impl_transmute_into_as!(i8, u8); impl_transmute_into_as!(i8, u8);
@ -616,139 +536,160 @@ impl_transmute_into_npf!(F32, f32, i32, u32);
impl_transmute_into_npf!(F64, f64, i64, u64); impl_transmute_into_npf!(F64, f64, i64, u64);
impl TransmuteInto<i32> for f32 { impl TransmuteInto<i32> for f32 {
fn transmute_into(self) -> i32 { self.to_bits() as i32 } fn transmute_into(self) -> i32 {
self.to_bits() as i32
}
} }
impl TransmuteInto<i64> for f64 { impl TransmuteInto<i64> for f64 {
fn transmute_into(self) -> i64 { self.to_bits() as i64 } fn transmute_into(self) -> i64 {
self.to_bits() as i64
}
} }
impl TransmuteInto<f32> for i32 { impl TransmuteInto<f32> for i32 {
fn transmute_into(self) -> f32 { f32::from_bits(self as u32) } fn transmute_into(self) -> f32 {
f32::from_bits(self as u32)
}
} }
impl TransmuteInto<f64> for i64 { impl TransmuteInto<f64> for i64 {
fn transmute_into(self) -> f64 { f64::from_bits(self as u64) } fn transmute_into(self) -> f64 {
f64::from_bits(self as u64)
}
} }
impl TransmuteInto<i32> for u32 { impl TransmuteInto<i32> for u32 {
fn transmute_into(self) -> i32 { self as _ } fn transmute_into(self) -> i32 {
self as _
}
} }
impl TransmuteInto<i64> for u64 { impl TransmuteInto<i64> for u64 {
fn transmute_into(self) -> i64 { self as _ } fn transmute_into(self) -> i64 {
self as _
}
} }
impl LittleEndianConvert for i8 { impl LittleEndianConvert for i8 {
fn into_little_endian(self, buffer: &mut[u8]) { fn into_little_endian(self, buffer: &mut [u8]) {
buffer[0] = self as u8; buffer[0] = self as u8;
} }
fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> { fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> {
buffer.get(0) buffer
.get(0)
.map(|v| *v as i8) .map(|v| *v as i8)
.ok_or_else(|| Error::InvalidLittleEndianBuffer) .ok_or_else(|| Error::InvalidLittleEndianBuffer)
} }
} }
impl LittleEndianConvert for u8 { impl LittleEndianConvert for u8 {
fn into_little_endian(self, buffer: &mut[u8]) { fn into_little_endian(self, buffer: &mut [u8]) {
buffer[0] = self; buffer[0] = self;
} }
fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> { fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> {
buffer.get(0) buffer
.get(0)
.cloned() .cloned()
.ok_or_else(|| Error::InvalidLittleEndianBuffer) .ok_or_else(|| Error::InvalidLittleEndianBuffer)
} }
} }
impl LittleEndianConvert for i16 { impl LittleEndianConvert for i16 {
fn into_little_endian(self, buffer: &mut[u8]) { fn into_little_endian(self, buffer: &mut [u8]) {
LittleEndian::write_i16(buffer, self); LittleEndian::write_i16(buffer, self);
} }
fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> { fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> {
buffer.get(0..2) buffer
.get(0..2)
.map(LittleEndian::read_i16) .map(LittleEndian::read_i16)
.ok_or_else(|| Error::InvalidLittleEndianBuffer) .ok_or_else(|| Error::InvalidLittleEndianBuffer)
} }
} }
impl LittleEndianConvert for u16 { impl LittleEndianConvert for u16 {
fn into_little_endian(self, buffer: &mut[u8]) { fn into_little_endian(self, buffer: &mut [u8]) {
LittleEndian::write_u16(buffer, self); LittleEndian::write_u16(buffer, self);
} }
fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> { fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> {
buffer.get(0..2) buffer
.get(0..2)
.map(LittleEndian::read_u16) .map(LittleEndian::read_u16)
.ok_or_else(|| Error::InvalidLittleEndianBuffer) .ok_or_else(|| Error::InvalidLittleEndianBuffer)
} }
} }
impl LittleEndianConvert for i32 { impl LittleEndianConvert for i32 {
fn into_little_endian(self, buffer: &mut[u8]) { fn into_little_endian(self, buffer: &mut [u8]) {
LittleEndian::write_i32(buffer, self); LittleEndian::write_i32(buffer, self);
} }
fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> { fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> {
buffer.get(0..4) buffer
.get(0..4)
.map(LittleEndian::read_i32) .map(LittleEndian::read_i32)
.ok_or_else(|| Error::InvalidLittleEndianBuffer) .ok_or_else(|| Error::InvalidLittleEndianBuffer)
} }
} }
impl LittleEndianConvert for u32 { impl LittleEndianConvert for u32 {
fn into_little_endian(self, buffer: &mut[u8]) { fn into_little_endian(self, buffer: &mut [u8]) {
LittleEndian::write_u32(buffer, self); LittleEndian::write_u32(buffer, self);
} }
fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> { fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> {
buffer.get(0..4) buffer
.get(0..4)
.map(LittleEndian::read_u32) .map(LittleEndian::read_u32)
.ok_or_else(|| Error::InvalidLittleEndianBuffer) .ok_or_else(|| Error::InvalidLittleEndianBuffer)
} }
} }
impl LittleEndianConvert for i64 { impl LittleEndianConvert for i64 {
fn into_little_endian(self, buffer: &mut[u8]) { fn into_little_endian(self, buffer: &mut [u8]) {
LittleEndian::write_i64(buffer, self); LittleEndian::write_i64(buffer, self);
} }
fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> { fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> {
buffer.get(0..8) buffer
.get(0..8)
.map(LittleEndian::read_i64) .map(LittleEndian::read_i64)
.ok_or_else(|| Error::InvalidLittleEndianBuffer) .ok_or_else(|| Error::InvalidLittleEndianBuffer)
} }
} }
impl LittleEndianConvert for f32 { impl LittleEndianConvert for f32 {
fn into_little_endian(self, buffer: &mut[u8]) { fn into_little_endian(self, buffer: &mut [u8]) {
LittleEndian::write_f32(buffer, self); LittleEndian::write_f32(buffer, self);
} }
fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> { fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> {
buffer.get(0..4) buffer
.get(0..4)
.map(LittleEndian::read_f32) .map(LittleEndian::read_f32)
.ok_or_else(|| Error::InvalidLittleEndianBuffer) .ok_or_else(|| Error::InvalidLittleEndianBuffer)
} }
} }
impl LittleEndianConvert for f64 { impl LittleEndianConvert for f64 {
fn into_little_endian(self, buffer: &mut[u8]) { fn into_little_endian(self, buffer: &mut [u8]) {
LittleEndian::write_f64(buffer, self); LittleEndian::write_f64(buffer, self);
} }
fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> { fn from_little_endian(buffer: &[u8]) -> Result<Self, Error> {
buffer.get(0..8) buffer
.get(0..8)
.map(LittleEndian::read_f64) .map(LittleEndian::read_f64)
.ok_or_else(|| Error::InvalidLittleEndianBuffer) .ok_or_else(|| Error::InvalidLittleEndianBuffer)
} }
} }
impl LittleEndianConvert for F32 { impl LittleEndianConvert for F32 {
fn into_little_endian(self, buffer: &mut[u8]) { fn into_little_endian(self, buffer: &mut [u8]) {
(self.to_bits() as i32).into_little_endian(buffer) (self.to_bits() as i32).into_little_endian(buffer)
} }
@ -758,7 +699,7 @@ impl LittleEndianConvert for F32 {
} }
impl LittleEndianConvert for F64 { impl LittleEndianConvert for F64 {
fn into_little_endian(self, buffer: &mut[u8]) { fn into_little_endian(self, buffer: &mut [u8]) {
(self.to_bits() as i64).into_little_endian(buffer) (self.to_bits() as i64).into_little_endian(buffer)
} }
@ -770,14 +711,19 @@ impl LittleEndianConvert for F64 {
macro_rules! impl_integer_arithmetic_ops { macro_rules! impl_integer_arithmetic_ops {
($type: ident) => { ($type: ident) => {
impl ArithmeticOps<$type> for $type { impl ArithmeticOps<$type> for $type {
fn add(self, other: $type) -> $type { self.wrapping_add(other) } fn add(self, other: $type) -> $type {
fn sub(self, other: $type) -> $type { self.wrapping_sub(other) } self.wrapping_add(other)
fn mul(self, other: $type) -> $type { self.wrapping_mul(other) } }
fn sub(self, other: $type) -> $type {
self.wrapping_sub(other)
}
fn mul(self, other: $type) -> $type {
self.wrapping_mul(other)
}
fn div(self, other: $type) -> Result<$type, TrapKind> { fn div(self, other: $type) -> Result<$type, TrapKind> {
if other == 0 { if other == 0 {
Err(TrapKind::DivisionByZero) Err(TrapKind::DivisionByZero)
} } else {
else {
let (result, overflow) = self.overflowing_div(other); let (result, overflow) = self.overflowing_div(other);
if overflow { if overflow {
Err(TrapKind::InvalidConversionToInt) Err(TrapKind::InvalidConversionToInt)
@ -787,7 +733,7 @@ macro_rules! impl_integer_arithmetic_ops {
} }
} }
} }
} };
} }
impl_integer_arithmetic_ops!(i32); impl_integer_arithmetic_ops!(i32);
@ -798,12 +744,20 @@ impl_integer_arithmetic_ops!(u64);
macro_rules! impl_float_arithmetic_ops { macro_rules! impl_float_arithmetic_ops {
($type: ident) => { ($type: ident) => {
impl ArithmeticOps<$type> for $type { impl ArithmeticOps<$type> for $type {
fn add(self, other: $type) -> $type { self + other } fn add(self, other: $type) -> $type {
fn sub(self, other: $type) -> $type { self - other } self + other
fn mul(self, other: $type) -> $type { self * other } }
fn div(self, other: $type) -> Result<$type, TrapKind> { Ok(self / other) } fn sub(self, other: $type) -> $type {
self - other
}
fn mul(self, other: $type) -> $type {
self * other
}
fn div(self, other: $type) -> Result<$type, TrapKind> {
Ok(self / other)
} }
} }
};
} }
impl_float_arithmetic_ops!(f32); impl_float_arithmetic_ops!(f32);
@ -814,17 +768,30 @@ impl_float_arithmetic_ops!(F64);
macro_rules! impl_integer { macro_rules! impl_integer {
($type: ident) => { ($type: ident) => {
impl Integer<$type> for $type { impl Integer<$type> for $type {
fn leading_zeros(self) -> $type { self.leading_zeros() as $type } fn leading_zeros(self) -> $type {
fn trailing_zeros(self) -> $type { self.trailing_zeros() as $type } self.leading_zeros() as $type
fn count_ones(self) -> $type { self.count_ones() as $type } }
fn rotl(self, other: $type) -> $type { self.rotate_left(other as u32) } fn trailing_zeros(self) -> $type {
fn rotr(self, other: $type) -> $type { self.rotate_right(other as u32) } self.trailing_zeros() as $type
}
fn count_ones(self) -> $type {
self.count_ones() as $type
}
fn rotl(self, other: $type) -> $type {
self.rotate_left(other as u32)
}
fn rotr(self, other: $type) -> $type {
self.rotate_right(other as u32)
}
fn rem(self, other: $type) -> Result<$type, TrapKind> { fn rem(self, other: $type) -> Result<$type, TrapKind> {
if other == 0 { Err(TrapKind::DivisionByZero) } if other == 0 {
else { Ok(self.wrapping_rem(other)) } Err(TrapKind::DivisionByZero)
} else {
Ok(self.wrapping_rem(other))
} }
} }
} }
};
} }
impl_integer!(i32); impl_integer!(i32);