diff --git a/substrate/core/executor/src/allocator.rs b/substrate/core/executor/src/allocator.rs index d326b96622..403cf92e20 100644 --- a/substrate/core/executor/src/allocator.rs +++ b/substrate/core/executor/src/allocator.rs @@ -39,8 +39,7 @@ //! We then check to see if the linked list is empty. If empty, we use //! the bump allocator to get the allocation with an extra 8 bytes //! preceding it. We initialise those preceding 8 bytes to identify the -//! list to which it belongs (e.g. `0x__ffffffffffffff` where `__` is the -//! linked list index). If it is not empty, we unlink the first item from +//! list to which it belongs. If it is not empty, we unlink the first item from //! the linked list and then reset the 8 preceding bytes so they now record //! the identity of the linked list. //! @@ -49,7 +48,8 @@ use crate::error::{Error, Result}; use log::trace; -use wasmi::{MemoryRef, memory_units::Bytes}; +use std::convert::{TryFrom, TryInto}; +use std::ops::Range; use wasm_interface::{Pointer, WordSize}; // The pointers need to be aligned to 8 bytes. This is because the @@ -72,8 +72,6 @@ const PREFIX_SIZE: u32 = 8; pub struct FreeingBumpHeapAllocator { bumper: u32, heads: [u32; N], - heap: MemoryRef, - max_heap_size: u32, ptr_offset: u32, total_size: u32, } @@ -89,26 +87,15 @@ impl FreeingBumpHeapAllocator { /// /// # Arguments /// - /// - `mem` - reference to the linear memory instance on which this allocator operates. /// - `heap_base` - the offset from the beginning of the linear memory where the heap starts. - pub fn new(mem: MemoryRef, heap_base: u32) -> Self { - let current_size: Bytes = mem.current_size().into(); - let current_size = current_size.0 as u32; - - let mut ptr_offset = heap_base; - let padding = ptr_offset % ALIGNMENT; - if padding != 0 { - ptr_offset += ALIGNMENT - padding; - } - - let heap_size = current_size - ptr_offset; + pub fn new(heap_base: u32) -> Self { + // ptr_offset is the next alignment boundary on or after heap_base. + let ptr_offset = (heap_base + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT; FreeingBumpHeapAllocator { bumper: 0, heads: [0; N], - heap: mem, - max_heap_size: heap_size, - ptr_offset: ptr_offset, + ptr_offset, total_size: 0, } } @@ -118,14 +105,23 @@ impl FreeingBumpHeapAllocator { /// There is no minimum size, but whatever size is passed into /// this function is rounded to the next power of two. If the requested /// size is below 8 bytes it will be rounded up to 8 bytes. - pub fn allocate(&mut self, size: WordSize) -> Result> { + /// + /// # Arguments + /// + /// - `mem` - a slice representing the linear memory on which this allocator operates. + /// - `size` - size in bytes of the allocation request + pub fn allocate(&mut self, mem: &mut [u8], size: WordSize) -> Result> { + let mem_size = u32::try_from(mem.len()) + .expect("size of Wasm linear memory is <2^32"); + let max_heap_size = mem_size - self.ptr_offset; + if size > MAX_POSSIBLE_ALLOCATION { return Err(Error::RequestedAllocationTooLarge); } let size = size.max(MIN_POSSIBLE_ALLOCATION); let item_size = size.next_power_of_two(); - if item_size + PREFIX_SIZE + self.total_size > self.max_heap_size { + if item_size + PREFIX_SIZE + self.total_size > max_heap_size { return Err(Error::AllocatorOutOfSpace); } @@ -135,23 +131,21 @@ impl FreeingBumpHeapAllocator { let item = self.heads[list_index]; let ptr = item + PREFIX_SIZE; assert!( - ptr + item_size <= self.max_heap_size, + ptr + item_size <= max_heap_size, "Pointer is looked up in list of free entries, into which only valid values are inserted; qed" ); - let four_bytes = self.get_heap_4bytes(item)?; - self.heads[list_index] = Self::le_bytes_to_u32(four_bytes); + self.heads[list_index] = self.get_heap_u64(mem, item)? + .try_into() + .map_err(|_| error("read invalid free list pointer"))?; ptr } else { // Nothing to be freed. Bump. - self.bump(item_size)? + PREFIX_SIZE + self.bump(item_size, max_heap_size)? + PREFIX_SIZE }; - // Reset prefix - (1..PREFIX_SIZE).try_for_each(|i| self.set_heap(ptr - i, 255))?; - - self.set_heap(ptr - PREFIX_SIZE, list_index as u8)?; + self.set_heap_u64(mem, ptr - PREFIX_SIZE, list_index as u64)?; self.total_size = self.total_size + item_size + PREFIX_SIZE; trace!(target: "wasm-heap", "Heap size is {} bytes after allocation", self.total_size); @@ -160,23 +154,26 @@ impl FreeingBumpHeapAllocator { } /// Deallocates the space which was allocated for a pointer. - pub fn deallocate(&mut self, ptr: Pointer) -> Result<()> { + /// + /// # Arguments + /// + /// - `mem` - a slice representing the linear memory on which this allocator operates. + /// - `ptr` - pointer to the allocated chunk + pub fn deallocate(&mut self, mem: &mut [u8], ptr: Pointer) -> Result<()> { let ptr = u32::from(ptr) - self.ptr_offset; if ptr < PREFIX_SIZE { return Err(error("Invalid pointer for deallocation")); } - let list_index = usize::from(self.get_heap_byte(ptr - PREFIX_SIZE)?); - (1..PREFIX_SIZE).try_for_each(|i| - self.get_heap_byte(ptr - i).map(|byte| assert!(byte == 255)) - )?; - let tail = self.heads[list_index]; + let list_index: usize = self.get_heap_u64(mem, ptr - PREFIX_SIZE)? + .try_into() + .map_err(|_| error("read invalid list index"))?; + if list_index > self.heads.len() { + return Err(error("read invalid list index")); + } + self.set_heap_u64(mem, ptr - PREFIX_SIZE, self.heads[list_index] as u64)?; self.heads[list_index] = ptr - PREFIX_SIZE; - let mut slice = self.get_heap_4bytes(ptr - PREFIX_SIZE)?; - Self::write_u32_into_le_bytes(tail, &mut slice); - self.set_heap_4bytes(ptr - PREFIX_SIZE, slice)?; - let item_size = Self::get_item_size_from_index(list_index); self.total_size = self.total_size.checked_sub(item_size as u32 + PREFIX_SIZE) .ok_or_else(|| error("Unable to subtract from total heap size without overflow"))?; @@ -190,8 +187,8 @@ impl FreeingBumpHeapAllocator { /// Returns the `bumper` from before the increase. /// Returns an `Error::AllocatorOutOfSpace` if the operation /// would exhaust the heap. - fn bump(&mut self, item_size: u32) -> Result { - if self.bumper + PREFIX_SIZE + item_size > self.max_heap_size { + fn bump(&mut self, item_size: u32, max_heap_size: u32) -> Result { + if self.bumper + PREFIX_SIZE + item_size > max_heap_size { return Err(Error::AllocatorOutOfSpace); } @@ -200,45 +197,48 @@ impl FreeingBumpHeapAllocator { Ok(res) } - fn le_bytes_to_u32(arr: [u8; 4]) -> u32 { - u32::from_le_bytes(arr) - } - - fn write_u32_into_le_bytes(bytes: u32, slice: &mut [u8]) { - slice[..4].copy_from_slice(&bytes.to_le_bytes()); - } - fn get_item_size_from_index(index: usize) -> usize { // we shift 1 by three places, since the first possible item size is 8 1 << 3 << index } - fn get_heap_4bytes(&mut self, ptr: u32) -> Result<[u8; 4]> { - let mut arr = [0u8; 4]; - self.heap.get_into(self.ptr_offset + ptr, &mut arr)?; - Ok(arr) + // Read a u64 from the heap in LE form. Used to read heap allocation prefixes. + fn get_heap_u64(&self, heap: &[u8], offset: u32) -> Result { + let range = self.heap_range(offset, 8, heap.len()) + .ok_or_else(|| error("read out of heap bounds"))?; + let bytes = heap[range].try_into() + .expect("[u8] slice of length 8 must be convertible to [u8; 8]"); + Ok(u64::from_le_bytes(bytes)) } - fn get_heap_byte(&mut self, ptr: u32) -> Result { - let mut arr = [0u8; 1]; - self.heap.get_into(self.ptr_offset + ptr, &mut arr)?; - Ok(arr[0]) + // Write a u64 to the heap in LE form. Used to write heap allocation prefixes. + fn set_heap_u64(&self, heap: &mut [u8], offset: u32, val: u64) -> Result<()> { + let range = self.heap_range(offset, 8, heap.len()) + .ok_or_else(|| error("write out of heap bounds"))?; + let bytes = val.to_le_bytes(); + &mut heap[range].copy_from_slice(&bytes[..]); + Ok(()) } - fn set_heap(&mut self, ptr: u32, value: u8) -> Result<()> { - self.heap.set(self.ptr_offset + ptr, &[value]).map_err(Into::into) - } - - fn set_heap_4bytes(&mut self, ptr: u32, value: [u8; 4]) -> Result<()> { - self.heap.set(self.ptr_offset + ptr, &value).map_err(Into::into) + fn heap_range(&self, offset: u32, length: u32, heap_len: usize) -> Option> { + let start = offset + .checked_add(self.ptr_offset)? + as usize; + let end = offset + .checked_add(self.ptr_offset)? + .checked_add(length)? + as usize; + if end <= heap_len { + Some(start..end) + } else { + None + } } } #[cfg(test)] mod tests { use super::*; - use wasmi::MemoryInstance; - use wasmi::memory_units::*; const PAGE_SIZE: u32 = 65536; @@ -250,11 +250,11 @@ mod tests { #[test] fn should_allocate_properly() { // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem, 0); + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(0); // when - let ptr = heap.allocate(1).unwrap(); + let ptr = heap.allocate(&mut mem[..], 1).unwrap(); // then // returned pointer must start right after `PREFIX_SIZE` @@ -264,11 +264,11 @@ mod tests { #[test] fn should_always_align_pointers_to_multiples_of_8() { // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem, 13); + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(13); // when - let ptr = heap.allocate(1).unwrap(); + let ptr = heap.allocate(&mut mem[..], 1).unwrap(); // then // the pointer must start at the next multiple of 8 from 13 @@ -279,13 +279,13 @@ mod tests { #[test] fn should_increment_pointers_properly() { // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem, 0); + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(0); // when - let ptr1 = heap.allocate(1).unwrap(); - let ptr2 = heap.allocate(9).unwrap(); - let ptr3 = heap.allocate(1).unwrap(); + let ptr1 = heap.allocate(&mut mem[..], 1).unwrap(); + let ptr2 = heap.allocate(&mut mem[..], 9).unwrap(); + let ptr3 = heap.allocate(&mut mem[..], 1).unwrap(); // then // a prefix of 8 bytes is prepended to each pointer @@ -302,18 +302,18 @@ mod tests { #[test] fn should_free_properly() { // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem, 0); - let ptr1 = heap.allocate(1).unwrap(); + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(0); + let ptr1 = heap.allocate(&mut mem[..], 1).unwrap(); // the prefix of 8 bytes is prepended to the pointer assert_eq!(ptr1, to_pointer(PREFIX_SIZE)); - let ptr2 = heap.allocate(1).unwrap(); + let ptr2 = heap.allocate(&mut mem[..], 1).unwrap(); // the prefix of 8 bytes + the content of ptr 1 is prepended to the pointer assert_eq!(ptr2, to_pointer(24)); // when - heap.deallocate(ptr2).unwrap(); + heap.deallocate(&mut mem[..], ptr2).unwrap(); // then // then the heads table should contain a pointer to the @@ -324,23 +324,23 @@ mod tests { #[test] fn should_deallocate_and_reallocate_properly() { // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); + let mut mem = [0u8; PAGE_SIZE as usize]; let padded_offset = 16; - let mut heap = FreeingBumpHeapAllocator::new(mem, 13); + let mut heap = FreeingBumpHeapAllocator::new(13); - let ptr1 = heap.allocate(1).unwrap(); + let ptr1 = heap.allocate(&mut mem[..], 1).unwrap(); // the prefix of 8 bytes is prepended to the pointer assert_eq!(ptr1, to_pointer(padded_offset + PREFIX_SIZE)); - let ptr2 = heap.allocate(9).unwrap(); + let ptr2 = heap.allocate(&mut mem[..], 9).unwrap(); // the padded_offset + the previously allocated ptr (8 bytes prefix + // 8 bytes content) + the prefix of 8 bytes which is prepended to the // current pointer assert_eq!(ptr2, to_pointer(padded_offset + 16 + PREFIX_SIZE)); // when - heap.deallocate(ptr2).unwrap(); - let ptr3 = heap.allocate(9).unwrap(); + heap.deallocate(&mut mem[..], ptr2).unwrap(); + let ptr3 = heap.allocate(&mut mem[..], 9).unwrap(); // then // should have re-allocated @@ -351,22 +351,22 @@ mod tests { #[test] fn should_build_linked_list_of_free_areas_properly() { // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem, 0); + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(0); - let ptr1 = heap.allocate(8).unwrap(); - let ptr2 = heap.allocate(8).unwrap(); - let ptr3 = heap.allocate(8).unwrap(); + let ptr1 = heap.allocate(&mut mem[..], 8).unwrap(); + let ptr2 = heap.allocate(&mut mem[..], 8).unwrap(); + let ptr3 = heap.allocate(&mut mem[..], 8).unwrap(); // when - heap.deallocate(ptr1).unwrap(); - heap.deallocate(ptr2).unwrap(); - heap.deallocate(ptr3).unwrap(); + heap.deallocate(&mut mem[..], ptr1).unwrap(); + heap.deallocate(&mut mem[..], ptr2).unwrap(); + heap.deallocate(&mut mem[..], ptr3).unwrap(); // then assert_eq!(heap.heads[0], u32::from(ptr3) - PREFIX_SIZE); - let ptr4 = heap.allocate(8).unwrap(); + let ptr4 = heap.allocate(&mut mem[..], 8).unwrap(); assert_eq!(ptr4, ptr3); assert_eq!(heap.heads[0], u32::from(ptr2) - PREFIX_SIZE); @@ -375,11 +375,11 @@ mod tests { #[test] fn should_not_allocate_if_too_large() { // given - let mem = MemoryInstance::alloc(Pages(1), Some(Pages(1))).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem, 13); + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(13); // when - let ptr = heap.allocate(PAGE_SIZE - 13); + let ptr = heap.allocate(&mut mem[..], PAGE_SIZE - 13); // then match ptr.unwrap_err() { @@ -391,13 +391,13 @@ mod tests { #[test] fn should_not_allocate_if_full() { // given - let mem = MemoryInstance::alloc(Pages(1), Some(Pages(1))).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem, 0); - let ptr1 = heap.allocate((PAGE_SIZE / 2) - PREFIX_SIZE).unwrap(); + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(0); + let ptr1 = heap.allocate(&mut mem[..], (PAGE_SIZE / 2) - PREFIX_SIZE).unwrap(); assert_eq!(ptr1, to_pointer(PREFIX_SIZE)); // when - let ptr2 = heap.allocate(PAGE_SIZE / 2); + let ptr2 = heap.allocate(&mut mem[..], PAGE_SIZE / 2); // then // there is no room for another half page incl. its 8 byte prefix @@ -410,12 +410,11 @@ mod tests { #[test] fn should_allocate_max_possible_allocation_size() { // given - let pages_needed = (MAX_POSSIBLE_ALLOCATION as usize / PAGE_SIZE as usize) + 1; - let mem = MemoryInstance::alloc(Pages(pages_needed), Some(Pages(pages_needed))).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem, 0); + let mut mem = vec![0u8; (MAX_POSSIBLE_ALLOCATION + PAGE_SIZE) as usize]; + let mut heap = FreeingBumpHeapAllocator::new(0); // when - let ptr = heap.allocate(MAX_POSSIBLE_ALLOCATION).unwrap(); + let ptr = heap.allocate(&mut mem[..], MAX_POSSIBLE_ALLOCATION).unwrap(); // then assert_eq!(ptr, to_pointer(PREFIX_SIZE)); @@ -424,11 +423,11 @@ mod tests { #[test] fn should_not_allocate_if_requested_size_too_large() { // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem, 0); + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(0); // when - let ptr = heap.allocate(MAX_POSSIBLE_ALLOCATION + 1); + let ptr = heap.allocate(&mut mem[..], MAX_POSSIBLE_ALLOCATION + 1); // then match ptr.unwrap_err() { @@ -440,19 +439,18 @@ mod tests { #[test] fn should_return_error_when_bumper_greater_than_heap_size() { // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem, 0); - heap.max_heap_size = 64; + let mut mem = [0u8; 64]; + let mut heap = FreeingBumpHeapAllocator::new(0); - let ptr1 = heap.allocate(32).unwrap(); + let ptr1 = heap.allocate(&mut mem[..], 32).unwrap(); assert_eq!(ptr1, to_pointer(PREFIX_SIZE)); - heap.deallocate(ptr1).expect("failed freeing ptr1"); + heap.deallocate(&mut mem[..], ptr1).expect("failed freeing ptr1"); assert_eq!(heap.total_size, 0); assert_eq!(heap.bumper, 40); - let ptr2 = heap.allocate(16).unwrap(); + let ptr2 = heap.allocate(&mut mem[..], 16).unwrap(); assert_eq!(ptr2, to_pointer(48)); - heap.deallocate(ptr2).expect("failed freeing ptr2"); + heap.deallocate(&mut mem[..], ptr2).expect("failed freeing ptr2"); assert_eq!(heap.total_size, 0); assert_eq!(heap.bumper, 64); @@ -461,7 +459,7 @@ mod tests { // further allocation which would increment the bumper must fail. // we try to allocate 8 bytes here, which will increment the // bumper since no 8 byte item has been allocated+freed before. - let ptr = heap.allocate(8); + let ptr = heap.allocate(&mut mem[..], 8); // then match ptr.unwrap_err() { @@ -473,12 +471,12 @@ mod tests { #[test] fn should_include_prefixes_in_total_heap_size() { // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem, 1); + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(1); // when // an item size of 16 must be used then - heap.allocate(9).unwrap(); + heap.allocate(&mut mem[..], 9).unwrap(); // then assert_eq!(heap.total_size, PREFIX_SIZE + 16); @@ -487,13 +485,13 @@ mod tests { #[test] fn should_calculate_total_heap_size_to_zero() { // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem, 13); + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(13); // when - let ptr = heap.allocate(42).unwrap(); + let ptr = heap.allocate(&mut mem[..], 42).unwrap(); assert_eq!(ptr, to_pointer(16 + PREFIX_SIZE)); - heap.deallocate(ptr).unwrap(); + heap.deallocate(&mut mem[..], ptr).unwrap(); // then assert_eq!(heap.total_size, 0); @@ -502,13 +500,13 @@ mod tests { #[test] fn should_calculate_total_size_of_zero() { // given - let mem = MemoryInstance::alloc(Pages(1), None).unwrap(); - let mut heap = FreeingBumpHeapAllocator::new(mem, 19); + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(19); // when for _ in 1..10 { - let ptr = heap.allocate(42).unwrap(); - heap.deallocate(ptr).unwrap(); + let ptr = heap.allocate(&mut mem[..], 42).unwrap(); + heap.deallocate(&mut mem[..], ptr).unwrap(); } // then @@ -516,27 +514,17 @@ mod tests { } #[test] - fn should_write_u32_correctly_into_le() { + fn should_read_and_write_u64_correctly() { // given - let mut heap = vec![0; 5]; + let mut mem = [0u8; PAGE_SIZE as usize]; + let heap = FreeingBumpHeapAllocator::new(16); // when - FreeingBumpHeapAllocator::write_u32_into_le_bytes(1, &mut heap[0..4]); + heap.set_heap_u64(&mut mem[..], 40, 4480113).unwrap(); // then - assert_eq!(heap, [1, 0, 0, 0, 0]); - } - - #[test] - fn should_write_u32_max_correctly_into_le() { - // given - let mut heap = vec![0; 5]; - - // when - FreeingBumpHeapAllocator::write_u32_into_le_bytes(u32::max_value(), &mut heap[0..4]); - - // then - assert_eq!(heap, [255, 255, 255, 255, 0]); + let value = heap.get_heap_u64(&mut mem[..], 40).unwrap(); + assert_eq!(value, 4480113); } #[test] diff --git a/substrate/core/executor/src/sandbox.rs b/substrate/core/executor/src/sandbox.rs index 2c38499ec8..f09c246679 100644 --- a/substrate/core/executor/src/sandbox.rs +++ b/substrate/core/executor/src/sandbox.rs @@ -23,7 +23,7 @@ use std::{collections::HashMap, rc::Rc}; use codec::{Decode, Encode}; use primitives::sandbox as sandbox_primitives; use wasmi::{ - Externals, FuncRef, ImportResolver, MemoryInstance, MemoryRef, Module, ModuleInstance, + Externals, ImportResolver, MemoryInstance, MemoryRef, Module, ModuleInstance, ModuleRef, RuntimeArgs, RuntimeValue, Trap, TrapKind, memory_units::Pages, }; use wasm_interface::{Pointer, WordSize}; @@ -33,7 +33,13 @@ use wasm_interface::{Pointer, WordSize}; /// This is a typically an index in the default table of the supervisor, however /// the exact meaning of this index is depends on the implementation of dispatch function. #[derive(Copy, Clone, Debug, PartialEq)] -struct SupervisorFuncIndex(usize); +pub struct SupervisorFuncIndex(usize); + +impl From for usize { + fn from(index: SupervisorFuncIndex) -> Self { + index.0 + } +} /// Index of a function within guest index space. /// @@ -73,7 +79,7 @@ impl ImportResolver for Imports { module_name: &str, field_name: &str, signature: &::wasmi::Signature, - ) -> std::result::Result { + ) -> std::result::Result { let key = ( module_name.as_bytes().to_owned(), field_name.as_bytes().to_owned(), @@ -138,11 +144,14 @@ impl ImportResolver for Imports { /// /// Note that this functions are only called in the `supervisor` context. pub trait SandboxCapabilities { + /// Represents a function reference into the supervisor environment. + type SupervisorFuncRef; + /// Returns a reference to an associated sandbox `Store`. - fn store(&self) -> &Store; + fn store(&self) -> &Store; /// Returns a mutable reference to an associated sandbox `Store`. - fn store_mut(&mut self) -> &mut Store; + fn store_mut(&mut self) -> &mut Store; /// Allocate space of the specified length in the supervisor memory. /// @@ -175,15 +184,37 @@ pub trait SandboxCapabilities { /// /// Returns `Err` if `ptr + len` is out of bounds. fn read_memory(&self, ptr: Pointer, len: WordSize) -> Result>; + + /// Invoke a function in the supervisor environment. + /// + /// This first invokes the dispatch_thunk function, passing in the function index of the + /// desired function to call and serialized arguments. The thunk calls the desired function + /// with the deserialized arguments, then serializes the result into memory and returns + /// reference. The pointer to and length of the result in linear memory is encoded into an i64, + /// with the upper 32 bits representing the pointer and the lower 32 bits representing the + /// length. + /// + /// # Errors + /// + /// Returns `Err` if the dispatch_thunk function has an incorrect signature or traps during + /// execution. + fn invoke( + &mut self, + dispatch_thunk: &Self::SupervisorFuncRef, + invoke_args_ptr: Pointer, + invoke_args_len: WordSize, + state: u32, + func_idx: SupervisorFuncIndex, + ) -> Result; } /// Implementation of [`Externals`] that allows execution of guest module with /// [externals][`Externals`] that might refer functions defined by supervisor. /// /// [`Externals`]: ../../wasmi/trait.Externals.html -pub struct GuestExternals<'a, FE: SandboxCapabilities + Externals + 'a> { +pub struct GuestExternals<'a, FE: SandboxCapabilities + 'a> { supervisor_externals: &'a mut FE, - sandbox_instance: &'a SandboxInstance, + sandbox_instance: &'a SandboxInstance, state: u32, } @@ -205,7 +236,7 @@ fn deserialize_result(serialized_result: &[u8]) -> std::result::Result Externals for GuestExternals<'a, FE> { +impl<'a, FE: SandboxCapabilities + 'a> Externals for GuestExternals<'a, FE> { fn invoke_index( &mut self, index: usize, @@ -214,7 +245,6 @@ impl<'a, FE: SandboxCapabilities + Externals + 'a> Externals for GuestExternals< // Make `index` typesafe again. let index = GuestFuncIndex(index); - let dispatch_thunk = self.sandbox_instance.dispatch_thunk.clone(); let func_idx = self.sandbox_instance .guest_to_supervisor_mapping .func_by_guest_index(index) @@ -237,34 +267,26 @@ impl<'a, FE: SandboxCapabilities + Externals + 'a> Externals for GuestExternals< // Move serialized arguments inside the memory and invoke dispatch thunk and // then free allocated memory. - let invoke_args_ptr = self.supervisor_externals - .allocate(invoke_args_data.len() as u32)?; - self.supervisor_externals - .write_memory(invoke_args_ptr, &invoke_args_data)?; - let result = ::wasmi::FuncInstance::invoke( - &dispatch_thunk, - &[ - RuntimeValue::I32(u32::from(invoke_args_ptr) as i32), - RuntimeValue::I32(invoke_args_data.len() as i32), - RuntimeValue::I32(state as i32), - RuntimeValue::I32(func_idx.0 as i32), - ], - self.supervisor_externals, - ); + let invoke_args_len = invoke_args_data.len() as WordSize; + let invoke_args_ptr = self.supervisor_externals.allocate(invoke_args_len)?; + self.supervisor_externals.write_memory(invoke_args_ptr, &invoke_args_data)?; + let result = self.supervisor_externals.invoke( + &self.sandbox_instance.dispatch_thunk, + invoke_args_ptr, + invoke_args_len, + state, + func_idx, + )?; self.supervisor_externals.deallocate(invoke_args_ptr)?; // dispatch_thunk returns pointer to serialized arguments. - let (serialized_result_val_ptr, serialized_result_val_len) = match result { - // Unpack pointer and len of the serialized result data. - Ok(Some(RuntimeValue::I64(v))) => { - // Cast to u64 to use zero-extension. - let v = v as u64; - let ptr = (v as u64 >> 32) as u32; - let len = (v & 0xFFFFFFFF) as u32; - (Pointer::new(ptr), len) - } - Ok(_) => return Err(trap("Supervisor function returned unexpected result!")), - Err(_) => return Err(trap("Supervisor function trapped!")), + // Unpack pointer and len of the serialized result data. + let (serialized_result_val_ptr, serialized_result_val_len) = { + // Cast to u64 to use zero-extension. + let v = result as u64; + let ptr = (v as u64 >> 32) as u32; + let len = (v & 0xFFFFFFFF) as u32; + (Pointer::new(ptr), len) }; let serialized_result_val = self.supervisor_externals @@ -272,21 +294,18 @@ impl<'a, FE: SandboxCapabilities + Externals + 'a> Externals for GuestExternals< self.supervisor_externals .deallocate(serialized_result_val_ptr)?; - // We do not have to check the signature here, because it's automatically - // checked by wasmi. - deserialize_result(&serialized_result_val) } } fn with_guest_externals( supervisor_externals: &mut FE, - sandbox_instance: &SandboxInstance, + sandbox_instance: &SandboxInstance, state: u32, f: F, ) -> R where - FE: SandboxCapabilities + Externals, + FE: SandboxCapabilities, F: FnOnce(&mut GuestExternals) -> R, { let mut guest_externals = GuestExternals { @@ -308,14 +327,16 @@ where /// it's required to provide supervisor externals: it will be used to execute /// code in the supervisor context. /// +/// This is generic over a supervisor function reference type. +/// /// [`invoke`]: #method.invoke -pub struct SandboxInstance { +pub struct SandboxInstance { instance: ModuleRef, - dispatch_thunk: FuncRef, + dispatch_thunk: FR, guest_to_supervisor_mapping: GuestToSupervisorFunctionMapping, } -impl SandboxInstance { +impl SandboxInstance { /// Invoke an exported function by a name. /// /// `supervisor_externals` is required to execute the implementations @@ -323,7 +344,7 @@ impl SandboxInstance { /// /// The `state` parameter can be used to provide custom data for /// these syscall implementations. - pub fn invoke( + pub fn invoke>( &self, export_name: &str, args: &[RuntimeValue], @@ -412,9 +433,9 @@ fn decode_environment_definition( /// - Module in `wasm` is invalid or couldn't be instantiated. /// /// [`EnvironmentDefinition`]: ../../sandbox/struct.EnvironmentDefinition.html -pub fn instantiate( +pub fn instantiate( supervisor_externals: &mut FE, - dispatch_thunk: FuncRef, + dispatch_thunk: FE::SupervisorFuncRef, wasm: &[u8], raw_env_def: &[u8], state: u32, @@ -453,15 +474,17 @@ pub fn instantiate( } /// This struct keeps track of all sandboxed components. -pub struct Store { +/// +/// This is generic over a supervisor function reference type. +pub struct Store { // Memories and instances are `Some` untill torndown. - instances: Vec>>, + instances: Vec>>>, memories: Vec>, } -impl Store { +impl Store { /// Create a new empty sandbox store. - pub fn new() -> Store { + pub fn new() -> Self { Store { instances: Vec::new(), memories: Vec::new(), @@ -497,7 +520,7 @@ impl Store { /// /// Returns `Err` If `instance_idx` isn't a valid index of an instance or /// instance is already torndown. - pub fn instance(&self, instance_idx: u32) -> Result> { + pub fn instance(&self, instance_idx: u32) -> Result>> { self.instances .get(instance_idx as usize) .cloned() @@ -553,7 +576,7 @@ impl Store { } } - fn register_sandbox_instance(&mut self, sandbox_instance: Rc) -> u32 { + fn register_sandbox_instance(&mut self, sandbox_instance: Rc>) -> u32 { let instance_idx = self.instances.len(); self.instances.push(Some(sandbox_instance)); instance_idx as u32 diff --git a/substrate/core/executor/src/wasm_executor.rs b/substrate/core/executor/src/wasm_executor.rs index cc7d5d9344..9f8d678d1a 100644 --- a/substrate/core/executor/src/wasm_executor.rs +++ b/substrate/core/executor/src/wasm_executor.rs @@ -54,7 +54,7 @@ macro_rules! debug_trace { } struct FunctionExecutor { - sandbox_store: sandbox::Store, + sandbox_store: sandbox::Store, heap: allocator::FreeingBumpHeapAllocator, memory: MemoryRef, table: Option, @@ -64,7 +64,7 @@ impl FunctionExecutor { fn new(m: MemoryRef, heap_base: u32, t: Option) -> Result { Ok(FunctionExecutor { sandbox_store: sandbox::Store::new(), - heap: allocator::FreeingBumpHeapAllocator::new(m.clone(), heap_base), + heap: allocator::FreeingBumpHeapAllocator::new(heap_base), memory: m, table: t, }) @@ -72,17 +72,25 @@ impl FunctionExecutor { } impl sandbox::SandboxCapabilities for FunctionExecutor { - fn store(&self) -> &sandbox::Store { + type SupervisorFuncRef = wasmi::FuncRef; + + fn store(&self) -> &sandbox::Store { &self.sandbox_store } - fn store_mut(&mut self) -> &mut sandbox::Store { + fn store_mut(&mut self) -> &mut sandbox::Store { &mut self.sandbox_store } fn allocate(&mut self, len: WordSize) -> Result> { - self.heap.allocate(len) + let heap = &mut self.heap; + self.memory.with_direct_access_mut(|mem| { + heap.allocate(mem, len) + }) } fn deallocate(&mut self, ptr: Pointer) -> Result<()> { - self.heap.deallocate(ptr) + let heap = &mut self.heap; + self.memory.with_direct_access_mut(|mem| { + heap.deallocate(mem, ptr) + }) } fn write_memory(&mut self, ptr: Pointer, data: &[u8]) -> Result<()> { self.memory.set(ptr.into(), data).map_err(Into::into) @@ -90,6 +98,32 @@ impl sandbox::SandboxCapabilities for FunctionExecutor { fn read_memory(&self, ptr: Pointer, len: WordSize) -> Result> { self.memory.get(ptr.into(), len as usize).map_err(Into::into) } + + fn invoke( + &mut self, + dispatch_thunk: &Self::SupervisorFuncRef, + invoke_args_ptr: Pointer, + invoke_args_len: WordSize, + state: u32, + func_idx: sandbox::SupervisorFuncIndex, + ) -> Result + { + let result = wasmi::FuncInstance::invoke( + dispatch_thunk, + &[ + RuntimeValue::I32(u32::from(invoke_args_ptr) as i32), + RuntimeValue::I32(invoke_args_len as i32), + RuntimeValue::I32(state as i32), + RuntimeValue::I32(usize::from(func_idx) as i32), + ], + self, + ); + match result { + Ok(Some(RuntimeValue::I64(val))) => Ok(val), + Ok(_) => return Err("Supervisor function returned unexpected result!".into()), + Err(err) => Err(Error::Trap(err)), + } + } } impl FunctionContext for FunctionExecutor { @@ -102,11 +136,17 @@ impl FunctionContext for FunctionExecutor { } fn allocate_memory(&mut self, size: WordSize) -> WResult> { - self.heap.allocate(size).map_err(|e| format!("{:?}", e)) + let heap = &mut self.heap; + self.memory.with_direct_access_mut(|mem| { + heap.allocate(mem, size).map_err(|e| format!("{:?}", e)) + }) } fn deallocate_memory(&mut self, ptr: Pointer) -> WResult<()> { - self.heap.deallocate(ptr).map_err(|e| format!("{:?}", e)) + let heap = &mut self.heap; + self.memory.with_direct_access_mut(|mem| { + heap.deallocate(mem, ptr).map_err(|e| format!("{:?}", e)) + }) } fn sandbox(&mut self) -> &mut dyn Sandbox { diff --git a/substrate/node/executor/src/lib.rs b/substrate/node/executor/src/lib.rs index bcff17683e..6c4fdc5897 100644 --- a/substrate/node/executor/src/lib.rs +++ b/substrate/node/executor/src/lib.rs @@ -38,7 +38,6 @@ mod tests { use super::Executor; use {balances, contracts, indices, system, timestamp}; use runtime_io; - use substrate_executor::WasmExecutor; use codec::{Encode, Decode, Joiner}; use runtime_support::{ Hashable, StorageValue, StorageMap, assert_eq_error_rate, traits::Currency, @@ -46,7 +45,7 @@ mod tests { use state_machine::TestExternalities as CoreTestExternalities; use primitives::{ Blake2Hasher, NeverNativeValue, NativeOrEncoded, map, - traits::{CodeExecutor, Externalities}, + traits::{CodeExecutor, Externalities}, storage::well_known_keys, }; use sr_primitives::{ traits::{Header as HeaderT, Hash as HashT, Convert}, ApplyResult, @@ -121,6 +120,10 @@ mod tests { substrate_executor::NativeExecutor::new(None) } + fn set_heap_pages>(ext: &mut E, heap_pages: u64) { + ext.place_storage(well_known_keys::HEAP_PAGES.to_vec(), Some(heap_pages.encode())); + } + #[test] fn panic_execution_with_foreign_code_gives_error() { let mut t = TestExternalities::::new_with_code(BLOATY_CODE, (map![ @@ -529,14 +532,26 @@ mod tests { let (block1, block2) = blocks(); - WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &block1.0).unwrap(); + executor().call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "Core_execute_block", + &block1.0, + false, + None, + ).0.unwrap(); runtime_io::with_externalities(&mut t, || { assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - transfer_fee(&xt())); assert_eq!(Balances::total_balance(&bob()), 169 * DOLLARS); }); - WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &block2.0).unwrap(); + executor().call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "Core_execute_block", + &block2.0, + false, + None, + ).0.unwrap(); runtime_io::with_externalities(&mut t, || { assert_eq_error_rate!( @@ -692,7 +707,13 @@ mod tests { let mut t = new_test_ext(COMPACT_CODE, false); - WasmExecutor::new().call(&mut t, 8, COMPACT_CODE,"Core_execute_block", &b.0).unwrap(); + executor().call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "Core_execute_block", + &b.0, + false, + None, + ).0.unwrap(); runtime_io::with_externalities(&mut t, || { // Verify that the contract constructor worked well and code of TRANSFER contract is actually deployed. @@ -710,13 +731,15 @@ mod tests { fn wasm_big_block_import_fails() { let mut t = new_test_ext(COMPACT_CODE, false); - let result = WasmExecutor::new().call( + set_heap_pages(&mut t, 4); + + let result = executor().call::<_, NeverNativeValue, fn() -> _>( &mut t, - 4, - COMPACT_CODE, "Core_execute_block", - &block_with_size(42, 0, 120_000).0 - ); + &block_with_size(42, 0, 120_000).0, + false, + None, + ).0; assert!(result.is_err()); // Err(Wasmi(Trap(Trap { kind: Host(AllocatorOutOfSpace) }))) } @@ -761,11 +784,21 @@ mod tests { >::hashed_key_for(0) => vec![0u8; 32] ], map![])); - let r = WasmExecutor::new() - .call(&mut t, 8, COMPACT_CODE, "Core_initialize_block", &vec![].and(&from_block_number(1u32))); + let r = executor().call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u32)), + false, + None, + ).0; assert!(r.is_ok()); - let r = WasmExecutor::new() - .call(&mut t, 8, COMPACT_CODE, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).unwrap(); + let r = executor().call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + false, + None, + ).0.unwrap().into_encoded(); let r = ApplyResult::decode(&mut &r[..]).unwrap(); assert_eq!(r, Err(InvalidTransaction::Payment.into())); } @@ -783,11 +816,21 @@ mod tests { >::hashed_key_for(0) => vec![0u8; 32] ], map![])); - let r = WasmExecutor::new() - .call(&mut t, 8, COMPACT_CODE, "Core_initialize_block", &vec![].and(&from_block_number(1u32))); + let r = executor().call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u32)), + false, + None, + ).0; assert!(r.is_ok()); - let r = WasmExecutor::new() - .call(&mut t, 8, COMPACT_CODE, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).unwrap(); + let r = executor().call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + false, + None, + ).0.unwrap().into_encoded(); ApplyResult::decode(&mut &r[..]) .unwrap() .expect("Extrinsic could be applied") @@ -822,7 +865,13 @@ mod tests { let block1 = changes_trie_block(); let mut t = new_test_ext(COMPACT_CODE, true); - WasmExecutor::new().call(&mut t, 8, COMPACT_CODE, "Core_execute_block", &block1.0).unwrap(); + executor().call::<_, NeverNativeValue, fn() -> _>( + &mut t, + "Core_execute_block", + &block1.0, + false, + None, + ).0.unwrap(); assert!(t.storage_changes_root(GENESIS_HASH.into()).unwrap().is_some()); }