diff --git a/rts/motoko-rts/src/persistence/stable_functions.rs b/rts/motoko-rts/src/persistence/stable_functions.rs index a266c273921..a1ced998bd5 100644 --- a/rts/motoko-rts/src/persistence/stable_functions.rs +++ b/rts/motoko-rts/src/persistence/stable_functions.rs @@ -94,21 +94,19 @@ //! flexible contexts or not even using imported classes or stable functions. Moreover, it allows //! programs to drop stable functions and classes, if they are no longer used for persistence. +pub mod gc; mod mark_stack; use core::{marker::PhantomData, mem::size_of, ptr::null_mut, str::from_utf8}; -use mark_stack::{MarkStack, StackEntry}; -use motoko_rts_macros::ic_mem_fn; +use gc::garbage_collect_functions; use crate::{ algorithms::SortedArray, barriers::{allocation_barrier, write_with_barrier}, - gc::remembered_set::RememberedSet, memory::{alloc_blob, Memory}, rts_trap_with, - types::{Blob, Bytes, Value, NULL_POINTER, TAG_BLOB_B, TAG_CLOSURE, TAG_OBJECT, TAG_SOME}, - visitor::enhanced::visit_pointer_fields, + types::{Blob, Bytes, Value, NULL_POINTER, TAG_BLOB_B, TAG_CLOSURE}, }; use super::{compatibility::MemoryCompatibilityTest, stable_function_state}; @@ -123,7 +121,7 @@ type FunctionId = isize; const NULL_FUNCTION_ID: FunctionId = FunctionId::MAX; -fn is_flexible_function_id(function_id: FunctionId) -> bool { +pub fn is_flexible_function_id(function_id: FunctionId) -> bool { function_id < 0 } @@ -143,6 +141,15 @@ fn to_flexible_function_id(wasm_table_index: WasmTableIndex) -> FunctionId { -(wasm_table_index as FunctionId) - 1 } +pub unsafe fn is_flexible_closure(value: Value) -> bool { + if value.tag() == TAG_CLOSURE { + let closure = value.as_closure(); + is_flexible_function_id((*closure).funid) + } else { + false + } +} + /// Part of the persistent metadata. Contains GC-managed references to blobs. #[repr(C)] pub struct StableFunctionState { @@ -174,8 +181,12 @@ impl StableFunctionState { write_with_barrier(mem, self.virtual_table_location(), initial_virtual_table); } + pub fn virtual_table(&self) -> Value { + self.virtual_table + } + /// The returned low-level pointer can only be used within the same IC message. - unsafe fn get_virtual_table(&mut self) -> *mut PersistentVirtualTable { + pub unsafe fn get_virtual_table(&mut self) -> *mut PersistentVirtualTable { assert_ne!(self.virtual_table, DEFAULT_VALUE); assert_ne!(self.virtual_table, NULL_POINTER); self.virtual_table.as_blob_mut() as *mut PersistentVirtualTable @@ -199,13 +210,17 @@ impl StableFunctionState { } #[repr(C)] -struct IndexedTable { +pub struct IndexedTable { header: Blob, _phantom: PhantomData, // not materialized, just to use generic type. // Series of `T` } impl IndexedTable { + pub unsafe fn from_blob(value: Value) -> *mut Self { + value.as_blob_mut() as *mut Self + } + unsafe fn length(self: *const Self) -> usize { let payload_length = (self as *const Blob).len(); debug_assert_eq!(payload_length.as_usize() % Self::get_entry_size(), 0); @@ -229,7 +244,7 @@ impl IndexedTable { } /// Indexed by function id. -type PersistentVirtualTable = IndexedTable; +pub type PersistentVirtualTable = IndexedTable; impl PersistentVirtualTable { unsafe fn new(mem: &mut M) -> Value { @@ -241,7 +256,7 @@ impl PersistentVirtualTable { #[repr(C)] #[derive(Clone)] -struct VirtualTableEntry { +pub struct VirtualTableEntry { function_name_hash: NameHash, closure_type_index: TypeIndex, // Referring to the persisted type table. wasm_table_index: WasmTableIndex, @@ -279,7 +294,7 @@ pub unsafe fn resolve_function_literal(wasm_table_index: WasmTableIndex) -> Func } #[repr(C)] -struct StableFunctionEntry { +pub struct StableFunctionEntry { function_name_hash: NameHash, wasm_table_index: WasmTableIndex, // Referring to the type table of the new prorgram version. @@ -291,7 +306,7 @@ struct StableFunctionEntry { } /// Sorted by hash name. -type StableFunctionMap = IndexedTable; +pub type StableFunctionMap = IndexedTable; impl SortedArray for *mut StableFunctionMap { fn get_length(&self) -> usize { @@ -322,35 +337,58 @@ pub unsafe fn register_stable_functions( type_test: Option<&MemoryCompatibilityTest>, old_actor: Option, ) { - let stable_functions = stable_functions_map.as_blob_mut() as *mut StableFunctionMap; + let stable_functions = StableFunctionMap::from_blob(stable_functions_map); + // Retrieve the persistent virtual, or, if not present, initialize an empty one. + let virtual_table = prepare_virtual_table(mem); + // Garbage collect the stable functions in the old version on an upgrade. + garbage_collect_functions(mem, virtual_table, old_actor); + // Check and upgrade the alive stable functions, register new stablefunction. + upgrade_stable_functions(mem, virtual_table, stable_functions, type_test); +} + +/// Retrieve the persistent virtual, or, if not present, initialize an empty one. +unsafe fn prepare_virtual_table(mem: &mut M) -> *mut PersistentVirtualTable { + let state = stable_function_state(); + if state.is_default() { + state.initialize_virtual_table(mem); + } + state.get_virtual_table() +} + +/// Upgrade and extend the persistent virtual table and set the new function literal table. +/// The stable function GC has already marked all alive stable functions in the virtual table. +/// Check that the necessary stable functions exist in the new version and +/// that their closure types are compatible. +pub unsafe fn upgrade_stable_functions( + mem: &mut M, + virtual_table: *mut PersistentVirtualTable, + stable_functions: *mut StableFunctionMap, + type_test: Option<&MemoryCompatibilityTest>, +) { // O(n*log(n)) runtime costs: // 1. Initialize all function ids in stable functions map to null sentinel. prepare_stable_function_map(stable_functions); - // 2. Retrieve the persistent virtual, or, if not present, initialize an empty one. - let virtual_table = prepare_virtual_table(mem); - // 3. Garbage collect the stable functions in the old version on an upgrade. - garbage_collect_functions(mem, virtual_table, old_actor); - // 4. Scan the persistent virtual table and match/update all entries against - // `stable_functions_map`. Check the compatibility of the closure types. - // Assign the function ids in stable function map. + // 2. Scan the persistent virtual table and match all marked entries with `stable_functions_map`. + // Check the all necessary stable functions exist in the new version and that their closure types are + // compatible. Assign the function ids in the stable function map. update_existing_functions(virtual_table, stable_functions, type_test); - // 5. Scan stable functions map and determine number of new stable functions that are yet + // 3. Scan stable functions map and determine number of new stable functions that are yet // not part of the persistent virtual table. let extension_size = count_new_functions(stable_functions); - // 6. Extend the persistent virtual table by the new stable functions. + // 4. Extend the persistent virtual table by the new stable functions. // Assign the function ids in stable function map. let new_virtual_table = add_new_functions(mem, virtual_table, extension_size, stable_functions); - // 7. Create the function literal table by scanning the stable functions map and + // 5. Create the function literal table by scanning the stable functions map and // mapping Wasm table indices to their assigned function id. let new_literal_table = create_function_literal_table(mem, stable_functions); - // 8. Store the new persistent virtual table and function literal table. + // 6. Store the new persistent virtual table and function literal table. // Apply write barriers! let state = stable_function_state(); write_with_barrier(mem, state.virtual_table_location(), new_virtual_table); write_with_barrier(mem, state.literal_table_location(), new_literal_table); } -/// Step 1: Initialize all function ids in the stable function map to null. +/// Step 1. Initialize all function ids in the stable function map to null. unsafe fn prepare_stable_function_map(stable_functions: *mut StableFunctionMap) { for index in 0..stable_functions.length() { let entry = stable_functions.get(index); @@ -358,128 +396,9 @@ unsafe fn prepare_stable_function_map(stable_functions: *mut StableFunctionMap) } } -// Step 2. Retrieve the persistent virtual, or, if not present, initialize an empty one. -unsafe fn prepare_virtual_table(mem: &mut M) -> *mut PersistentVirtualTable { - let state = stable_function_state(); - if state.is_default() { - state.initialize_virtual_table(mem); - } - state.get_virtual_table() -} - -extern "C" { - fn moc_visit_stable_functions(object: Value, type_id: u64); -} - -struct FunctionGC { - mark_set: RememberedSet, - mark_stack: MarkStack, - virtual_table: *mut PersistentVirtualTable, -} - -// Currently fields in closure (captures) are not yet discovered in a type-directed way. -// This sentinel denotes that there is no static type known and the generic visitor is to be invoked. -// TODO: Optimization: Use expected closure types to select a compiler-generated specialized visitor. -const UNKNOWN_TYPE_ID: u64 = u64::MAX; - -impl FunctionGC { - unsafe fn new( - mem: &mut M, - virtual_table: *mut PersistentVirtualTable, - ) -> FunctionGC { - let mark_set = RememberedSet::new(mem); - let mark_stack = MarkStack::new(mem); - FunctionGC { - mark_set, - mark_stack, - virtual_table, - } - } - - unsafe fn run(&mut self, mem: &mut M) { - loop { - self.clear_mark_bits(); - match self.mark_stack.pop() { - None => return, - Some(StackEntry { object, type_id }) => { - debug_assert_ne!(object, NULL_POINTER); - if object.tag() == TAG_SOME { - // skip null boxes, not visited - } else if object.tag() == TAG_CLOSURE { - self.visit_stable_closure(mem, object); - } else if type_id == UNKNOWN_TYPE_ID { - self.generic_visit(mem, object); - } else { - // Specialized field visitor, as optimization. - moc_visit_stable_functions(object, type_id); - } - } - } - } - } - - unsafe fn generic_visit(&mut self, mem: &mut M, object: Value) { - visit_pointer_fields( - mem, - object.as_obj(), - object.tag(), - |mem, field| { - collect_stable_functions(mem, *field, UNKNOWN_TYPE_ID); - }, - |_, slice_start, arr| { - assert!(slice_start == 0); - arr.len() - }, - ); - } - - unsafe fn visit_stable_closure(&mut self, mem: &mut M, object: Value) { - let closure = object.as_closure(); - let function_id = (*closure).funid; - assert!(!is_flexible_function_id(function_id)); - self.generic_visit(mem, object); - } - - unsafe fn clear_mark_bits(&mut self) { - for index in 0..self.virtual_table.length() { - let entry = self.virtual_table.get(index); - (*entry).marked = false; - } - } -} - -static mut COLLECTOR_STATE: Option = None; - -// Step 3. Garbage collect the stable functions in the old version on an upgrade. -unsafe fn garbage_collect_functions( - mem: &mut M, - virtual_table: *mut PersistentVirtualTable, - old_actor: Option, -) { - if old_actor.is_none() { - return; - } - let old_actor = old_actor.unwrap(); - assert_eq!(old_actor.tag(), TAG_OBJECT); - COLLECTOR_STATE = Some(FunctionGC::new(mem, virtual_table)); - const ACTOR_TYPE_ID: u64 = 0; - collect_stable_functions(mem, old_actor, ACTOR_TYPE_ID); - COLLECTOR_STATE.as_mut().unwrap().run(mem); - COLLECTOR_STATE = None; -} - -#[ic_mem_fn] -unsafe fn collect_stable_functions(mem: &mut M, object: Value, type_id: u64) { - let state = COLLECTOR_STATE.as_mut().unwrap(); - if object != NULL_POINTER && !state.mark_set.contains(object) { - state.mark_set.insert(mem, object); - state.mark_stack.push(mem, StackEntry { object, type_id }); - } -} - -// Step 4: Scan the persistent virtual table and match/update all entries against -// `stable_functions_map`. Check the compatibility of the closure types. -// Assign the function ids in stable function map. +/// Step 2. Scan the persistent virtual table and match all marked entries with `stable_functions_map`. +/// Check the all necessary stable functions exist in the new version and that their closure types are +/// compatible. Assign the function ids in the stable function map. unsafe fn update_existing_functions( virtual_table: *mut PersistentVirtualTable, stable_functions: *mut StableFunctionMap, @@ -522,7 +441,7 @@ unsafe fn update_existing_functions( } } -// Step 5. Scan stable functions map and determine number of new stable functions that are not yet +// Step 3. Scan stable functions map and determine number of new stable functions that are not yet // part of the persistent virtual table. unsafe fn count_new_functions(stable_functions: *mut StableFunctionMap) -> usize { let mut count = 0; @@ -535,7 +454,7 @@ unsafe fn count_new_functions(stable_functions: *mut StableFunctionMap) -> usize count } -// Step 6. Extend the persistent virtual table by the new stable functions. +// Step 4. Extend the persistent virtual table by the new stable functions. // Assign the function ids in stable function map. unsafe fn add_new_functions( mem: &mut M, @@ -548,7 +467,7 @@ unsafe fn add_new_functions( } let new_length = old_virtual_table.length() + new_function_count; let new_blob = extend_virtual_table(mem, old_virtual_table, new_length); - let new_virtual_table = new_blob.as_blob_mut() as *mut PersistentVirtualTable; + let new_virtual_table = PersistentVirtualTable::from_blob(new_blob); let mut function_id = old_virtual_table.length() as FunctionId; for index in 0..stable_functions.length() { let stable_function_entry = stable_functions.get(index); @@ -589,7 +508,7 @@ unsafe fn extend_virtual_table( Bytes(new_length * PersistentVirtualTable::get_entry_size()), ); allocation_barrier(new_blob); - let new_virtual_table = new_blob.as_blob_mut() as *mut PersistentVirtualTable; + let new_virtual_table = PersistentVirtualTable::from_blob(new_blob); for index in 0..old_virtual_table.length() { let old_entry = old_virtual_table.get(index); new_virtual_table.set(index, (*old_entry).clone()); @@ -598,7 +517,7 @@ unsafe fn extend_virtual_table( new_blob } -// Step 7. Create the function literal table by scanning the stable functions map and +// Step 5. Create the function literal table by scanning the stable functions map and // mapping Wasm table indices to their assigned function id. unsafe fn create_function_literal_table( mem: &mut M, @@ -622,7 +541,7 @@ unsafe fn create_empty_literal_table( let byte_length = Bytes(table_length * FunctionLiteralTable::get_entry_size()); let new_blob = alloc_blob(mem, TAG_BLOB_B, byte_length); allocation_barrier(new_blob); - let function_literal_table = new_blob.as_blob_mut() as *mut FunctionLiteralTable; + let function_literal_table = FunctionLiteralTable::from_blob(new_blob); for index in 0..function_literal_table.length() { function_literal_table.set(index, NULL_FUNCTION_ID); } diff --git a/rts/motoko-rts/src/persistence/stable_functions/gc.rs b/rts/motoko-rts/src/persistence/stable_functions/gc.rs new file mode 100644 index 00000000000..637c25dc404 --- /dev/null +++ b/rts/motoko-rts/src/persistence/stable_functions/gc.rs @@ -0,0 +1,121 @@ +use motoko_rts_macros::ic_mem_fn; + +use crate::{gc::remembered_set::RememberedSet, memory::Memory, persistence::stable_functions::is_flexible_function_id, types::{Value, NULL_POINTER, TAG_CLOSURE, TAG_OBJECT, TAG_SOME}, visitor::enhanced::visit_pointer_fields}; + +use super::{mark_stack::{MarkStack, StackEntry}, resolve_stable_function_id, FunctionId, PersistentVirtualTable}; + +// Currently fields in closure (captures) are not yet discovered in a type-directed way. +// This sentinel denotes that there is no static type known and the generic visitor is to be invoked. +// TODO: Optimization: Use expected closure types to select a compiler-generated specialized visitor. +const UNKNOWN_TYPE_ID: u64 = u64::MAX; + +extern "C" { + fn moc_visit_stable_functions(object: Value, type_id: u64); +} + +pub struct FunctionGC { + mark_set: RememberedSet, + mark_stack: MarkStack, + virtual_table: *mut PersistentVirtualTable, +} + +impl FunctionGC { + unsafe fn new( + mem: &mut M, + virtual_table: *mut PersistentVirtualTable, + ) -> FunctionGC { + let mark_set = RememberedSet::new(mem); + let mark_stack = MarkStack::new(mem); + FunctionGC { + mark_set, + mark_stack, + virtual_table, + } + } + + unsafe fn run(&mut self, mem: &mut M) { + self.clear_mark_bits(); + loop { + match self.mark_stack.pop() { + None => return, + Some(StackEntry { object, type_id }) => { + debug_assert_ne!(object, NULL_POINTER); + if object.tag() == TAG_SOME { + // skip null boxes, not visited + } else if object.tag() == TAG_CLOSURE { + self.visit_stable_closure(mem, object); + } else if type_id == UNKNOWN_TYPE_ID { + self.generic_visit(mem, object); + } else { + // Specialized field visitor, as optimization. + moc_visit_stable_functions(object, type_id); + } + } + } + } + } + + unsafe fn generic_visit(&mut self, mem: &mut M, object: Value) { + visit_pointer_fields( + mem, + object.as_obj(), + object.tag(), + |mem, field| { + collect_stable_functions(mem, *field, UNKNOWN_TYPE_ID); + }, + |_, slice_start, arr| { + assert!(slice_start == 0); + arr.len() + }, + ); + } + + unsafe fn visit_stable_closure(&mut self, mem: &mut M, object: Value) { + let closure = object.as_closure(); + let function_id = (*closure).funid; + assert!(!is_flexible_function_id(function_id)); + self.mark_function(function_id); + self.generic_visit(mem, object); + } + + unsafe fn mark_function(&mut self, function_id: FunctionId) { + let entry = self.virtual_table.get(resolve_stable_function_id(function_id)); + (*entry).marked = true; + } + + unsafe fn clear_mark_bits(&mut self) { + for index in 0..self.virtual_table.length() { + let entry = self.virtual_table.get(index); + (*entry).marked = false; + } + } +} + +static mut COLLECTOR_STATE: Option = None; + +// Garbage collect the stable functions in the old version on an upgrade. +pub unsafe fn garbage_collect_functions( + mem: &mut M, + virtual_table: *mut PersistentVirtualTable, + old_actor: Option, +) { + if old_actor.is_none() { + return; + } + let old_actor = old_actor.unwrap(); + assert_eq!(old_actor.tag(), TAG_OBJECT); + COLLECTOR_STATE = Some(FunctionGC::new(mem, virtual_table)); + const ACTOR_TYPE_ID: u64 = 0; + collect_stable_functions(mem, old_actor, ACTOR_TYPE_ID); + COLLECTOR_STATE.as_mut().unwrap().run(mem); + COLLECTOR_STATE = None; +} + +#[ic_mem_fn] +unsafe fn collect_stable_functions(mem: &mut M, object: Value, type_id: u64) { + let state = COLLECTOR_STATE.as_mut().unwrap(); + if object != NULL_POINTER && !state.mark_set.contains(object) { + state.mark_set.insert(mem, object); + state.mark_stack.push(mem, StackEntry { object, type_id }); + } +} diff --git a/rts/motoko-rts/src/stabilization/ic.rs b/rts/motoko-rts/src/stabilization/ic.rs index 45f473e754c..7382a006d11 100644 --- a/rts/motoko-rts/src/stabilization/ic.rs +++ b/rts/motoko-rts/src/stabilization/ic.rs @@ -7,8 +7,7 @@ use crate::{ gc::incremental::{is_gc_stopped, resume_gc, stop_gc}, memory::Memory, persistence::{ - compatibility::{MemoryCompatibilityTest, TypeDescriptor}, - set_upgrade_instructions, + compatibility::{MemoryCompatibilityTest, TypeDescriptor}, set_upgrade_instructions, stable_function_state, stable_functions::{gc::garbage_collect_functions, upgrade_stable_functions, PersistentVirtualTable, StableFunctionMap} }, rts_trap_with, stabilization::ic::metadata::StabilizationMetadata, @@ -24,6 +23,7 @@ use super::{deserialization::Deserialization, serialization::Serialization}; struct StabilizationState { old_candid_data: Value, old_type_offsets: Value, + old_virtual_table: Value, completed: bool, serialization: Serialization, instruction_meter: InstructionMeter, @@ -34,10 +34,12 @@ impl StabilizationState { serialization: Serialization, old_candid_data: Value, old_type_offsets: Value, + old_virtual_table: Value, ) -> StabilizationState { StabilizationState { old_candid_data, old_type_offsets, + old_virtual_table, completed: false, serialization, instruction_meter: InstructionMeter::new(), @@ -67,17 +69,23 @@ pub unsafe fn start_graph_stabilization( stable_actor: Value, old_candid_data: Value, old_type_offsets: Value, - stable_functions_map: Value, + _old_function_map: Value, ) { assert!(STABILIZATION_STATE.is_none()); assert!(is_gc_stopped()); + let function_state = stable_function_state(); + garbage_collect_functions(mem, function_state.get_virtual_table(), Some(stable_actor)); let stable_memory_pages = stable_mem::size(); // Backup the virtual size. let serialized_data_start = stable_memory_pages * PAGE_SIZE; let serialization = Serialization::start(mem, stable_actor, serialized_data_start); + // Mark the alive stable functions before stabilization such that destabilization can later check + // their existence in the new program version. + let old_virtual_table = function_state.virtual_table(); STABILIZATION_STATE = Some(StabilizationState::new( serialization, old_candid_data, old_type_offsets, + old_virtual_table, )); } @@ -121,10 +129,12 @@ unsafe fn write_metadata() { let serialized_data_length = state.serialization.serialized_data_length(); let type_descriptor = TypeDescriptor::new(state.old_candid_data, state.old_type_offsets); + let persistent_virtual_table = state.old_virtual_table; let metadata = StabilizationMetadata { serialized_data_start, serialized_data_length, type_descriptor, + persistent_virtual_table, }; state.instruction_meter.stop(); metadata.store(&mut state.instruction_meter); @@ -156,7 +166,7 @@ pub unsafe fn start_graph_destabilization( mem: &mut M, new_candid_data: Value, new_type_offsets: Value, - stable_functions_map: Value, + new_function_map: Value, ) { assert!(DESTABILIZATION_STATE.is_none()); @@ -170,6 +180,11 @@ pub unsafe fn start_graph_destabilization( if !type_test.compatible_stable_actor() { rts_trap_with("Memory-incompatible program upgrade"); } + // Upgrade the stable functions and check their compatibility. + // The alive stable functions have been marked by the GC in `start_graph_stabilization`. + let virtual_table = PersistentVirtualTable::from_blob(metadata.persistent_virtual_table); + let stable_functions = StableFunctionMap::from_blob(new_function_map); + upgrade_stable_functions(mem, virtual_table, stable_functions, Some(&type_test)); // Restore the virtual size. moc_stable_mem_set_size(metadata.serialized_data_start / PAGE_SIZE); diff --git a/rts/motoko-rts/src/stabilization/ic/metadata.rs b/rts/motoko-rts/src/stabilization/ic/metadata.rs index 2ddcf99b5ad..1a8fd553868 100644 --- a/rts/motoko-rts/src/stabilization/ic/metadata.rs +++ b/rts/motoko-rts/src/stabilization/ic/metadata.rs @@ -14,6 +14,9 @@ //! Type offset table //! Byte length (u64) //! Data +//! Persistent virtual table (only available with stable functions) +//! Byte length (u64) +//! Data //! (possible zero padding) //! -- Last physical page (metadata): //! (zero padding to align at page end) @@ -64,6 +67,7 @@ pub struct StabilizationMetadata { pub serialized_data_start: u64, pub serialized_data_length: u64, pub type_descriptor: TypeDescriptor, + pub persistent_virtual_table: Value, // refers to `PersistentVirtualTable` } impl StabilizationMetadata { @@ -104,6 +108,10 @@ impl StabilizationMetadata { Self::write_blob(offset, descriptor.type_offsets()); } + fn save_stable_functions(offset: &mut u64, virtual_table: Value) { + Self::write_blob(offset, virtual_table); + } + fn read_length(offset: &mut u64) -> u64 { let length = read_u64(*offset); // Note: Do not use `types::size_of()` as it rounds to 64-bit words. @@ -134,6 +142,20 @@ impl StabilizationMetadata { TypeDescriptor::new(candid_data, type_offsets) } + fn load_peristent_virtual_table(mem: &mut M, offset: &mut u64) -> Value { + assert!(*offset <= Self::metadata_location()); + // Backwards compatibility: The persistent virtual table may be missing, + // in which case the metadata directly follows the offset, or there is zero padding. + if *offset < Self::metadata_location() { + // There is either an existing virtual table, or if it is missing, there is zero padding + // which is decoded as an empty blob. + Self::read_blob(mem, TAG_BLOB_B, offset) + } else { + // No space for persistent virtual table. + unsafe { alloc_blob(mem, TAG_BLOB_B, Bytes(0)) } + } + } + fn metadata_location() -> u64 { let physical_pages = unsafe { ic0_stable64_size() }; assert!(physical_pages > 0); @@ -172,6 +194,7 @@ impl StabilizationMetadata { Self::align_page_start(&mut offset); let type_descriptor_address = offset; Self::save_type_descriptor(&mut offset, &self.type_descriptor); + Self::save_stable_functions(&mut offset, self.persistent_virtual_table); Self::align_page_start(&mut offset); let first_word_backup = read_u32(0); // Clear very first word that is backed up in the last page. @@ -202,10 +225,12 @@ impl StabilizationMetadata { write_u32(0, last_page_record.first_word_backup); let mut offset = last_page_record.type_descriptor_address; let type_descriptor = Self::load_type_descriptor(mem, &mut offset); + let persistent_virtual_table = Self::load_peristent_virtual_table(mem, &mut offset); let metadata = StabilizationMetadata { serialized_data_start: last_page_record.serialized_data_address, serialized_data_length: last_page_record.serialized_data_length, type_descriptor, + persistent_virtual_table, }; (metadata, last_page_record.statistics) } diff --git a/rts/motoko-rts/src/stabilization/layout.rs b/rts/motoko-rts/src/stabilization/layout.rs index 49a9e5d24b0..7d28209f861 100644 --- a/rts/motoko-rts/src/stabilization/layout.rs +++ b/rts/motoko-rts/src/stabilization/layout.rs @@ -23,6 +23,8 @@ //! offsets can be scaled down by a factor `8` during the destabilization //! such that they fit into 32-bit values during Cheney's graph-copy. +use stable_closure::StableClosure; + use crate::{ barriers::allocation_barrier, constants::WORD_SIZE, @@ -31,8 +33,8 @@ use crate::{ types::{ base_array_tag, size_of, Tag, Value, TAG_ARRAY_I, TAG_ARRAY_M, TAG_ARRAY_S, TAG_ARRAY_SLICE_MIN, TAG_ARRAY_T, TAG_BIGINT, TAG_BITS64_F, TAG_BITS64_S, TAG_BITS64_U, - TAG_BLOB_A, TAG_BLOB_B, TAG_BLOB_P, TAG_BLOB_T, TAG_CONCAT, TAG_MUTBOX, TAG_OBJECT, - TAG_REGION, TAG_SOME, TAG_VARIANT, TRUE_VALUE, + TAG_BLOB_A, TAG_BLOB_B, TAG_BLOB_P, TAG_BLOB_T, TAG_CLOSURE, TAG_CONCAT, TAG_MUTBOX, + TAG_OBJECT, TAG_REGION, TAG_SOME, TAG_VARIANT, TRUE_VALUE, }, }; @@ -55,6 +57,7 @@ mod stable_array; mod stable_bigint; mod stable_bits64; mod stable_blob; +mod stable_closure; mod stable_concat; mod stable_mutbox; mod stable_object; @@ -84,6 +87,8 @@ pub enum StableObjectKind { Concat = 16, BigInt = 17, Some = 18, + // Extension: + Closure = 19, } #[repr(C)] @@ -115,6 +120,7 @@ impl StableTag { const STABLE_TAG_CONCAT: u64 = StableObjectKind::Concat as u64; const STABLE_TAG_BIGINT: u64 = StableObjectKind::BigInt as u64; const STABLE_TAG_SOME: u64 = StableObjectKind::Some as u64; + const STABLE_TAG_CLOSURE: u64 = StableObjectKind::Closure as u64; match self.0 { STABLE_TAG_ARRAY_IMMUTABLE => StableObjectKind::ArrayImmutable, STABLE_TAG_ARRAY_MUTABLE => StableObjectKind::ArrayMutable, @@ -134,6 +140,7 @@ impl StableTag { STABLE_TAG_CONCAT => StableObjectKind::Concat, STABLE_TAG_BIGINT => StableObjectKind::BigInt, STABLE_TAG_SOME => StableObjectKind::Some, + STABLE_TAG_CLOSURE => StableObjectKind::Closure, _ => unsafe { rts_trap_with("Invalid tag") }, } } @@ -167,6 +174,7 @@ impl StableObjectKind { TAG_CONCAT => StableObjectKind::Concat, TAG_BIGINT => StableObjectKind::BigInt, TAG_SOME => StableObjectKind::Some, + TAG_CLOSURE => StableObjectKind::Closure, _ => unreachable!("invalid tag"), } } @@ -369,6 +377,7 @@ pub fn scan_serialized< StableObjectKind::Concat => StableConcat::scan_serialized(context, translate), StableObjectKind::BigInt => StableBigInt::scan_serialized(context, translate), StableObjectKind::Some => StableSome::scan_serialized(context, translate), + StableObjectKind::Closure => StableClosure::scan_serialized(context, translate), } } @@ -394,6 +403,7 @@ pub unsafe fn serialize(stable_memory: &mut StableMemoryStream, main_object: Val StableObjectKind::Concat => StableConcat::serialize(stable_memory, main_object), StableObjectKind::BigInt => StableBigInt::serialize(stable_memory, main_object), StableObjectKind::Some => StableSome::serialize(stable_memory, main_object), + StableObjectKind::Closure => StableClosure::serialize(stable_memory, main_object), } } @@ -443,5 +453,8 @@ pub unsafe fn deserialize( StableObjectKind::Some => { StableSome::deserialize(main_memory, stable_memory, stable_object, object_kind) } + StableObjectKind::Closure => { + StableClosure::deserialize(main_memory, stable_memory, stable_object, object_kind) + } } } diff --git a/rts/motoko-rts/src/stabilization/layout/stable_closure.rs b/rts/motoko-rts/src/stabilization/layout/stable_closure.rs new file mode 100644 index 00000000000..463b13d33b6 --- /dev/null +++ b/rts/motoko-rts/src/stabilization/layout/stable_closure.rs @@ -0,0 +1,107 @@ +use crate::{ + memory::Memory, + persistence::stable_functions::is_flexible_function_id, + stabilization::{ + deserialization::stable_memory_access::StableMemoryAccess, + layout::StableObjectKind, + serialization::{ + stable_memory_stream::{ScanStream, StableMemoryStream, WriteStream}, + SerializationContext, + }, + }, + types::{size_of, Closure, Value, Words, TAG_CLOSURE}, +}; + +use super::{Serializer, StableToSpace, StableValue, StaticScanner}; + +#[repr(C)] +pub struct StableClosure { + function_id: i64, // Stable function id. + size: u64, // Number of fields. + // Dynamically sized body with `size` fields, each of `StableValue` being a captured variable. +} + +impl StaticScanner for StableClosure {} + +impl Serializer for StableClosure { + unsafe fn serialize_static_part( + _stable_memory: &mut StableMemoryStream, + main_object: *mut Closure, + ) -> Self { + debug_assert!(!is_flexible_function_id((*main_object).funid)); + StableClosure { + function_id: (*main_object).funid as i64, + size: (*main_object).size as u64, + } + } + + unsafe fn serialize_dynamic_part( + stable_memory: &mut StableMemoryStream, + main_object: *mut Closure, + ) { + for index in 0..(*main_object).size { + let main_field = main_object.get(index); + let stable_field = StableValue::serialize(main_field); + stable_memory.write(&stable_field); + } + } + + fn scan_serialized_dynamic< + 'a, + M, + F: Fn(&mut SerializationContext<'a, M>, StableValue) -> StableValue, + >( + &self, + context: &mut SerializationContext<'a, M>, + translate: &F, + ) { + for _ in 0..self.size { + let old_value = context.serialization.to_space().read::(); + let new_value = translate(context, old_value); + context.serialization.to_space().update(&new_value); + } + } + + unsafe fn allocate_deserialized( + &self, + main_memory: &mut M, + object_kind: StableObjectKind, + ) -> Value { + debug_assert_eq!(object_kind, StableObjectKind::Closure); + let total_size = size_of::() + Words(self.size as usize); + main_memory.alloc_words(total_size) + } + + unsafe fn deserialize_static_part( + &self, + target_object: *mut Closure, + object_kind: StableObjectKind, + ) { + debug_assert_eq!(object_kind, StableObjectKind::Closure); + debug_assert!(!is_flexible_function_id(self.function_id as isize)); + (*target_object).header.tag = TAG_CLOSURE; + (*target_object) + .header + .init_forward(Value::from_ptr(target_object as usize)); + (*target_object).size = self.size as usize; + (*target_object).funid = self.function_id as isize; + } + + unsafe fn deserialize_dynamic_part( + &self, + _main_memory: &mut M, + stable_memory: &StableMemoryAccess, + stable_object: StableValue, + target_object: *mut Closure, + ) { + let stable_address = stable_object.payload_address(); + for index in 0..self.size { + let field_address = stable_address + + size_of::().to_bytes().as_usize() as u64 + + (index * size_of::().to_bytes().as_usize() as u64); + let field = stable_memory.read::(field_address); + let target_field_address = target_object.payload_addr().add(index as usize); + *target_field_address = field.deserialize(); + } + } +} diff --git a/rts/motoko-rts/src/stabilization/serialization.rs b/rts/motoko-rts/src/stabilization/serialization.rs index fcf23b75671..6fb3bf995b0 100644 --- a/rts/motoko-rts/src/stabilization/serialization.rs +++ b/rts/motoko-rts/src/stabilization/serialization.rs @@ -1,9 +1,7 @@ pub mod stable_memory_stream; use crate::{ - memory::Memory, - stabilization::layout::serialize, - types::{FwdPtr, Tag, Value, TAG_CLOSURE, TAG_FWD_PTR}, + memory::Memory, persistence::stable_functions::is_flexible_closure, stabilization::layout::serialize, types::{FwdPtr, Tag, Value, TAG_FWD_PTR} }; use self::stable_memory_stream::{ScanStream, StableMemoryStream}; @@ -101,7 +99,7 @@ impl Serialization { } fn has_non_stable_type(old_field: Value) -> bool { - unsafe { old_field.tag() == TAG_CLOSURE } + unsafe { is_flexible_closure(old_field) } } pub fn pending_array_scanning(&self) -> bool { @@ -164,8 +162,8 @@ impl GraphCopy for Serialization { let old_value = original.deserialize(); if old_value.is_non_null_ptr() { if Self::has_non_stable_type(old_value) { - // Due to structural subtyping or `Any`-subtyping, a non-stable object (such as a closure) may be - // be dynamically reachable from a stable varibale. The value is not accessible in the new program version. + // Due to structural subtyping or `Any`-subtyping, a non-stable object (such as a closure of a flexible function) + // may be be dynamically reachable from a stable variable. The value is not accessible in the new program version. // Therefore, the content of these fields can serialized with a dummy value that is also ignored by the GC. DUMMY_VALUE } else { diff --git a/rts/motoko-rts/src/types.rs b/rts/motoko-rts/src/types.rs index e2f06be8e98..fd685dadff1 100644 --- a/rts/motoko-rts/src/types.rs +++ b/rts/motoko-rts/src/types.rs @@ -863,6 +863,11 @@ impl Closure { pub(crate) unsafe fn size(self: *mut Self) -> usize { (*self).size } + + #[allow(unused)] + pub(crate) unsafe fn get(self: *mut Self, index: usize) -> Value { + *self.payload_addr().add(index) + } } #[repr(C)] // See the note at the beginning of this module diff --git a/src/codegen/compile_enhanced.ml b/src/codegen/compile_enhanced.ml index 1ac0c47e77a..f48edd743c9 100644 --- a/src/codegen/compile_enhanced.ml +++ b/src/codegen/compile_enhanced.ml @@ -13525,7 +13525,14 @@ and conclude_module env actor_type set_serialization_globals set_eop_globals sta let register_stable_type = EnhancedOrthogonalPersistence.register_stable_type env in let register_static_variables = GCRoots.register_static_variables env in E.call_import env "rts" ("initialize_incremental_gc") ^^ - register_stable_type ^^ (* cannot use stable variables *) + (* The stable type is registered upfront as the stable function literals need to be defined before + te object pool can be set up. This is because the object pool contains stable closures. + On EOP, the new functions and types can be directly registered on program start. + On graph copy, the new stable type is first temporarily set in the cleared persistent memory. + Later, on `start_graph_destabilization`, the persistent memory compatibility is checked + and the stable functions are properly upgraded based on the previous program version. + The check happens atomically in the upgrade, even for incremental graph copy destabilization. *) + register_stable_type ^^ (* cannot use stable variables. *) register_static_variables ^^ (* already uses stable function literals *) match start_fi_o with | Some fi -> diff --git a/test/run-drun/ok/stabilize-stable-functions.drun-run.ok b/test/run-drun/ok/stabilize-stable-functions.drun-run.ok new file mode 100644 index 00000000000..ba8c477dd13 --- /dev/null +++ b/test/run-drun/ok/stabilize-stable-functions.drun-run.ok @@ -0,0 +1,10 @@ +ingress Completed: Reply: 0x4449444c016c01b3c4b1f204680100010a00000000000000000101 +debug.print: Initial function +debug.print: Result: initial 123 +ingress Completed: Reply: 0x4449444c0000 +ingress Completed: Reply: 0x4449444c0000 +debug.print: Initial function +debug.print: Result: initial 123 +ingress Completed: Reply: 0x4449444c0000 +ingress Completed: Reply: 0x4449444c0000 +ingress Completed: Reply: 0x4449444c0000 diff --git a/test/run-drun/reachable-stable-functions/version2.mo b/test/run-drun/reachable-stable-functions/version2.mo index 2a8f49e6d2f..50dacc10e62 100644 --- a/test/run-drun/reachable-stable-functions/version2.mo +++ b/test/run-drun/reachable-stable-functions/version2.mo @@ -18,7 +18,8 @@ actor { }; stable let stableObject = StableClass ()>(stableActorFunction1); - stableObject.set(stableActorFunction1); // No longer use stableActorFunction2 + stableObject.set(stableActorFunction2); // Keep stable function reference + stableObject.set(stableActorFunction1); // But then make stableActorFunction2 unreachable stable let stableFunction = stableActorFunction3; // Drop all `flexibleActorFunctionX` and `FlexibleClass`. diff --git a/test/run-drun/stabilize-stable-functions.mo b/test/run-drun/stabilize-stable-functions.mo new file mode 100644 index 00000000000..e40737c9933 --- /dev/null +++ b/test/run-drun/stabilize-stable-functions.mo @@ -0,0 +1,43 @@ + +//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY +//MOC-FLAG --stabilization-instruction-limit=10000 +import Prim "mo:prim"; + +actor { + func initialPrint() { + Prim.debugPrint("Initial function"); + }; + + func initialMap(x : Nat) : Text { + "initial " # debug_show (x); + }; + + stable var print : stable () -> () = initialPrint; + stable var map : stable Nat -> Text = initialMap; + + func newPrint() { + Prim.debugPrint("New function"); + }; + + func newMap(x : Nat) : Text { + "new " # debug_show (x); + }; + + public func change() : async () { + print := newPrint; + map := newMap; + }; + + print(); + Prim.debugPrint("Result: " # map(123)); +}; + +//SKIP run +//SKIP run-low +//SKIP run-ir +//SKIP comp-ref + +//CALL ingress __motoko_stabilize_before_upgrade "DIDL\x00\x00" +//CALL upgrade "" +//CALL ingress __motoko_destabilize_after_upgrade "DIDL\x00\x00" +//CALL ingress change "DIDL\x00\x00" diff --git a/test/run-drun/upgrade-stable-functions.mo b/test/run-drun/upgrade-stable-functions.mo index 3bfe6a2ee05..99bbb846aa9 100644 --- a/test/run-drun/upgrade-stable-functions.mo +++ b/test/run-drun/upgrade-stable-functions.mo @@ -1,4 +1,4 @@ -//ENHANCED-PERSISTENCE-ONLY +//ENHANCED-ORTHOGONAL-PERSISTENCE-ONLY import Prim "mo:prim"; actor {