diff --git a/crates/cranelift/src/compiler/component.rs b/crates/cranelift/src/compiler/component.rs index 4aaae9cbc423..86c80e335df1 100644 --- a/crates/cranelift/src/compiler/component.rs +++ b/crates/cranelift/src/compiler/component.rs @@ -1,6 +1,6 @@ //! Compilation support for the component model. -use crate::{TRAP_ALWAYS, TRAP_CANNOT_ENTER, TRAP_INTERNAL_ASSERT, compiler::Compiler}; +use crate::{TRAP_ALWAYS, TRAP_INTERNAL_ASSERT, compiler::Compiler}; use cranelift_codegen::ir::condcodes::IntCC; use cranelift_codegen::ir::{self, InstBuilder, MemFlags, Value}; use cranelift_codegen::isa::{CallConv, TargetIsa}; @@ -732,9 +732,17 @@ impl<'a> TrampolineCompiler<'a> { |_, _| {}, ); } - Trampoline::CheckBlocking => { + Trampoline::EnterSyncCall => { self.translate_libcall( - host::check_blocking, + host::enter_sync_call, + TrapSentinel::Falsy, + WasmArgs::InRegisters, + |_, _| {}, + ); + } + Trampoline::ExitSyncCall => { + self.translate_libcall( + host::exit_sync_call, TrapSentinel::Falsy, WasmArgs::InRegisters, |_, _| {}, @@ -1138,11 +1146,9 @@ impl<'a> TrampolineCompiler<'a> { // brif should_run_destructor, run_destructor_block, return_block // // run_destructor_block: - // ;; test may_enter, but only if the component instances + // ;; call `enter_sync_call`, but only if the component instances // ;; differ - // flags = load.i32 vmctx+$offset - // masked = band flags, $FLAG_MAY_ENTER - // trapz masked, CANNOT_ENTER_CODE + // ... // // ;; ============================================================ // ;; this is conditionally emitted based on whether the resource @@ -1156,6 +1162,9 @@ impl<'a> TrampolineCompiler<'a> { // call_indirect func_addr, callee_vmctx, vmctx, rep // ;; ============================================================ // + // ;; call `exit_sync_call` if the component instances differ + // ... + // // jump return_block // // return_block: @@ -1186,25 +1195,32 @@ impl<'a> TrampolineCompiler<'a> { self.builder.switch_to_block(run_destructor_block); // If this is a defined resource within the component itself then a - // check needs to be emitted for the `may_enter` flag. Note though + // check needs to be emitted for recursive reentrance. Note though // that this check can be elided if the resource table resides in // the same component instance that defined the resource as the // component is calling itself. - if let Some(def) = resource_def { + let emit_exit = if let Some(def) = resource_def { if self.types[resource].unwrap_concrete_instance() != def.instance { - let flags = self.builder.ins().load( - ir::types::I32, - trusted, + let host_args = vec![ vmctx, - i32::try_from(self.offsets.instance_flags(def.instance)).unwrap(), - ); - let masked = self - .builder - .ins() - .band_imm(flags, i64::from(FLAG_MAY_ENTER)); - self.builder.ins().trapz(masked, TRAP_CANNOT_ENTER); + self.builder + .ins() + .iconst(ir::types::I32, i64::from(instance.as_u32())), + self.builder.ins().iconst(ir::types::I32, i64::from(0)), + self.builder + .ins() + .iconst(ir::types::I32, i64::from(def.instance.as_u32())), + ]; + let call = self.call_libcall(vmctx, host::enter_sync_call, &host_args); + let result = self.builder.func.dfg.inst_results(call).get(0).copied(); + self.raise_if_host_trapped(result.unwrap()); + true + } else { + false } - } + } else { + false + }; // Conditionally emit destructor-execution code based on whether we // statically know that a destructor exists or not. @@ -1253,6 +1269,13 @@ impl<'a> TrampolineCompiler<'a> { &[callee_vmctx, caller_vmctx, rep], ); } + + if emit_exit { + let call = self.call_libcall(vmctx, host::exit_sync_call, &[vmctx]); + let result = self.builder.func.dfg.inst_results(call).get(0).copied(); + self.raise_if_host_trapped(result.unwrap()); + } + self.builder.ins().jump(return_block, &[]); self.builder.seal_block(run_destructor_block); diff --git a/crates/environ/src/component.rs b/crates/environ/src/component.rs index e68c357b11c7..d5795bdfa118 100644 --- a/crates/environ/src/component.rs +++ b/crates/environ/src/component.rs @@ -99,6 +99,9 @@ macro_rules! foreach_builtin_component_function { resource_enter_call(vmctx: vmctx); resource_exit_call(vmctx: vmctx) -> bool; + enter_sync_call(vmctx: vmctx, caller_instance: u32, callee_async: u32, callee_instance: u32) -> bool; + exit_sync_call(vmctx: vmctx) -> bool; + #[cfg(feature = "component-model-async")] backpressure_modify(vmctx: vmctx, caller_instance: u32, increment: u8) -> bool; #[cfg(feature = "component-model-async")] @@ -185,8 +188,6 @@ macro_rules! foreach_builtin_component_function { #[cfg(feature = "component-model-async")] error_context_transfer(vmctx: vmctx, src_idx: u32, src_table: u32, dst_table: u32) -> u64; #[cfg(feature = "component-model-async")] - check_blocking(vmctx: vmctx) -> bool; - #[cfg(feature = "component-model-async")] context_get(vmctx: vmctx, caller_instance: u32, slot: u32) -> u64; #[cfg(feature = "component-model-async")] context_set(vmctx: vmctx, caller_instance: u32, slot: u32, val: u32) -> bool; diff --git a/crates/environ/src/component/dfg.rs b/crates/environ/src/component/dfg.rs index 9bf35ec7e942..aa126d79a2c9 100644 --- a/crates/environ/src/component/dfg.rs +++ b/crates/environ/src/component/dfg.rs @@ -267,6 +267,7 @@ pub enum CoreDef { InstanceFlags(RuntimeComponentInstanceIndex), Trampoline(TrampolineIndex), UnsafeIntrinsic(ModuleInternedTypeIndex, UnsafeIntrinsic), + TaskMayBlock, /// This is a special variant not present in `info::CoreDef` which /// represents that this definition refers to a fused adapter function. This @@ -475,8 +476,9 @@ pub enum Trampoline { FutureTransfer, StreamTransfer, ErrorContextTransfer, - CheckBlocking, Trap, + EnterSyncCall, + ExitSyncCall, ContextGet { instance: RuntimeComponentInstanceIndex, slot: u32, @@ -905,6 +907,7 @@ impl LinearizeDfg<'_> { } info::CoreDef::UnsafeIntrinsic(*i) } + CoreDef::TaskMayBlock => info::CoreDef::TaskMayBlock, } } @@ -1156,8 +1159,9 @@ impl LinearizeDfg<'_> { Trampoline::FutureTransfer => info::Trampoline::FutureTransfer, Trampoline::StreamTransfer => info::Trampoline::StreamTransfer, Trampoline::ErrorContextTransfer => info::Trampoline::ErrorContextTransfer, - Trampoline::CheckBlocking => info::Trampoline::CheckBlocking, Trampoline::Trap => info::Trampoline::Trap, + Trampoline::EnterSyncCall => info::Trampoline::EnterSyncCall, + Trampoline::ExitSyncCall => info::Trampoline::ExitSyncCall, Trampoline::ContextGet { instance, slot } => info::Trampoline::ContextGet { instance: *instance, slot: *slot, diff --git a/crates/environ/src/component/info.rs b/crates/environ/src/component/info.rs index 6be69584fc36..75109d946572 100644 --- a/crates/environ/src/component/info.rs +++ b/crates/environ/src/component/info.rs @@ -389,6 +389,10 @@ pub enum CoreDef { Trampoline(TrampolineIndex), /// An intrinsic for compile-time builtins. UnsafeIntrinsic(UnsafeIntrinsic), + /// Reference to a wasm global which represents a runtime-managed boolean + /// indicating whether the currently-running task may perform a blocking + /// operation. + TaskMayBlock, } impl From> for CoreDef @@ -1105,9 +1109,13 @@ pub enum Trampoline { /// component does not invalidate the handle in the original component. ErrorContextTransfer, - /// An intrinsic used by FACT-generated modules to check whether an - /// async-typed function may be called via a sync lower. - CheckBlocking, + /// An intrinsic used by FACT-generated modules to check whether an instance + /// may be entered for a sync-to-sync call and push a task onto the stack if + /// so. + EnterSyncCall, + /// An intrinsic used by FACT-generated modules to pop the task previously + /// pushed by `EnterSyncCall`. + ExitSyncCall, /// An intrinsic used by FACT-generated modules to trap with a specified /// code. @@ -1242,8 +1250,9 @@ impl Trampoline { FutureTransfer => format!("future-transfer"), StreamTransfer => format!("stream-transfer"), ErrorContextTransfer => format!("error-context-transfer"), - CheckBlocking => format!("check-blocking"), Trap => format!("trap"), + EnterSyncCall => format!("enter-sync-call"), + ExitSyncCall => format!("exit-sync-call"), ContextGet { .. } => format!("context-get"), ContextSet { .. } => format!("context-set"), ThreadIndex => format!("thread-index"), diff --git a/crates/environ/src/component/translate.rs b/crates/environ/src/component/translate.rs index 841c90a87e0b..be3f4d421659 100644 --- a/crates/environ/src/component/translate.rs +++ b/crates/environ/src/component/translate.rs @@ -79,6 +79,14 @@ pub struct Translator<'a, 'data> { /// The top-level import name for Wasmtime's unsafe intrinsics, if any. unsafe_intrinsics_import: Option<&'a str>, + + /// Whether this component contains any module instantiations outside of + /// leaf components. + /// + /// We'll use this when generating fused adapters; if it's false, then we + /// know that no guest-to-guest call can reenter a runtime instance + /// recursively. + has_nonleaf_module_instantiations: bool, } /// Representation of the syntactic scope of a component meaning where it is @@ -171,6 +179,14 @@ struct Translation<'data> { /// component has finished, e.g. for the `inline` pass, but beforehand this /// is set to `None`. types: Option, + + /// Whether we've encountered a module instantiation while parsing this + /// component (disregarding instantiations in subcomponents). + saw_module_instantiation: bool, + + /// Whether we've encountered a component instantation while parsing this + /// component (disregarding instantiations in subcomponents). + saw_component_instantiation: bool, } // NB: the type information contained in `LocalInitializer` should always point @@ -455,6 +471,7 @@ impl<'a, 'data> Translator<'a, 'data> { static_modules: Default::default(), scope_vec, unsafe_intrinsics_import: None, + has_nonleaf_module_instantiations: false, } } @@ -606,6 +623,7 @@ impl<'a, 'data> Translator<'a, 'data> { let known_func = match arg { CoreDef::InstanceFlags(_) => unreachable!("instance flags are not a function"), + CoreDef::TaskMayBlock => unreachable!("task_may_block is not a function"), // We could in theory inline these trampolines, so it could // potentially make sense to record that we know this @@ -694,6 +712,9 @@ impl<'a, 'data> Translator<'a, 'data> { assert!(self.result.types.is_none()); self.result.types = Some(self.validator.end(offset)?); + self.has_nonleaf_module_instantiations |= + self.result.saw_module_instantiation && self.result.saw_component_instantiation; + // Exit the current lexical scope. If there is no parent (no // frame currently on the stack) then translation is finished. // Otherwise that means that a nested component has been @@ -1251,6 +1272,7 @@ impl<'a, 'data> Translator<'a, 'data> { // largely just records the arguments given from wasmparser into a // `HashMap` for processing later during inlining. Payload::InstanceSection(s) => { + self.result.saw_module_instantiation = true; self.validator.instance_section(&s)?; for instance in s { let init = match instance? { @@ -1266,6 +1288,7 @@ impl<'a, 'data> Translator<'a, 'data> { } } Payload::ComponentInstanceSection(s) => { + self.result.saw_component_instantiation = true; let mut index = self.validator.types(0).unwrap().component_instance_count(); self.validator.component_instance_section(&s)?; for instance in s { diff --git a/crates/environ/src/component/translate/adapt.rs b/crates/environ/src/component/translate/adapt.rs index 75758d2558c9..a32faf3e1ec8 100644 --- a/crates/environ/src/component/translate/adapt.rs +++ b/crates/environ/src/component/translate/adapt.rs @@ -205,8 +205,11 @@ impl<'data> Translator<'_, 'data> { // the module using standard core wasm translation, and then fills out // the dfg metadata for each adapter. for (module_id, adapter_module) in state.adapter_modules.iter() { - let mut module = - fact::Module::new(self.types.types(), self.tunables.debug_adapter_modules); + let mut module = fact::Module::new( + self.types.types(), + self.tunables.debug_adapter_modules, + self.has_nonleaf_module_instantiations, + ); let mut names = Vec::with_capacity(adapter_module.adapters.len()); for adapter in adapter_module.adapters.iter() { let name = format!("adapter{}", adapter.as_u32()); @@ -345,8 +348,9 @@ fn fact_import_to_core_def( fact::Import::ErrorContextTransfer => { simple_intrinsic(dfg::Trampoline::ErrorContextTransfer) } - fact::Import::CheckBlocking => simple_intrinsic(dfg::Trampoline::CheckBlocking), fact::Import::Trap => simple_intrinsic(dfg::Trampoline::Trap), + fact::Import::EnterSyncCall => simple_intrinsic(dfg::Trampoline::EnterSyncCall), + fact::Import::ExitSyncCall => simple_intrinsic(dfg::Trampoline::ExitSyncCall), } } @@ -452,7 +456,8 @@ impl PartitionAdapterModules { // These items can't transitively depend on an adapter dfg::CoreDef::Trampoline(_) | dfg::CoreDef::InstanceFlags(_) - | dfg::CoreDef::UnsafeIntrinsic(..) => {} + | dfg::CoreDef::UnsafeIntrinsic(..) + | dfg::CoreDef::TaskMayBlock => {} } } diff --git a/crates/environ/src/component/translate/inline.rs b/crates/environ/src/component/translate/inline.rs index badbfd8e8d0e..3c646de77f45 100644 --- a/crates/environ/src/component/translate/inline.rs +++ b/crates/environ/src/component/translate/inline.rs @@ -538,11 +538,9 @@ impl<'a> Inliner<'a> { // happening within the same component instance. // // In this situation if the `canon.lower`'d function is - // called then it immediately sets `may_enter` to `false`. - // When calling the callee, however, that's `canon.lift` - // which immediately traps if `may_enter` is `false`. That - // means that this pairing of functions creates a function - // that always traps. + // called then it recursively re-enters itself, which is + // defined to trap by the spec. That means that this pairing + // of functions creates a function that always traps. // // When closely reading the spec though the precise trap // that comes out can be somewhat variable. Technically the @@ -1594,7 +1592,7 @@ impl<'a> Inliner<'a> { } } - /// Translatees an `AdapterOptions` into a `CanonicalOptions` where + /// Translates an `AdapterOptions` into a `CanonicalOptions` where /// memories/functions are inserted into the global initializer list for /// use at runtime. This is only used for lowered host functions and lifted /// functions exported to the host. diff --git a/crates/environ/src/component/vmcomponent_offsets.rs b/crates/environ/src/component/vmcomponent_offsets.rs index bbbb0e79a159..ced14be6254d 100644 --- a/crates/environ/src/component/vmcomponent_offsets.rs +++ b/crates/environ/src/component/vmcomponent_offsets.rs @@ -28,13 +28,9 @@ pub const VMCOMPONENT_MAGIC: u32 = u32::from_le_bytes(*b"comp"); /// canonical ABI flag `may_leave` pub const FLAG_MAY_LEAVE: i32 = 1 << 0; -/// Flag for the `VMComponentContext::flags` field which corresponds to the -/// canonical ABI flag `may_enter` -pub const FLAG_MAY_ENTER: i32 = 1 << 1; - /// Flag for the `VMComponentContext::flags` field which is set whenever a /// function is called to indicate that `post_return` must be called next. -pub const FLAG_NEEDS_POST_RETURN: i32 = 1 << 2; +pub const FLAG_NEEDS_POST_RETURN: i32 = 1 << 1; /// Runtime offsets within a `VMComponentContext` for a specific component. #[derive(Debug, Clone, Copy)] @@ -70,6 +66,7 @@ pub struct VMComponentOffsets

{ builtins: u32, vm_store_context: u32, flags: u32, + task_may_block: u32, trampoline_func_refs: u32, intrinsic_func_refs: u32, lowerings: u32, @@ -129,6 +126,7 @@ impl VMComponentOffsets

{ builtins: 0, vm_store_context: 0, flags: 0, + task_may_block: 0, trampoline_func_refs: 0, intrinsic_func_refs: 0, lowerings: 0, @@ -172,6 +170,7 @@ impl VMComponentOffsets

{ size(vm_store_context) = ret.ptr.size(), align(16), size(flags) = cmul(ret.num_runtime_component_instances, ret.ptr.size_of_vmglobal_definition()), + size(task_may_block) = ret.ptr.size(), align(u32::from(ret.ptr.size())), size(trampoline_func_refs) = cmul(ret.num_trampolines, ret.ptr.size_of_vm_func_ref()), size(intrinsic_func_refs) = cmul(ret.num_unsafe_intrinsics, ret.ptr.size_of_vm_func_ref()), @@ -219,6 +218,11 @@ impl VMComponentOffsets

{ self.flags + index.as_u32() * u32::from(self.ptr.size_of_vmglobal_definition()) } + /// The offset of the `task_may_block` field. + pub fn task_may_block(&self) -> u32 { + self.task_may_block + } + /// The offset of the `vm_store_context` field. #[inline] pub fn vm_store_context(&self) -> u32 { diff --git a/crates/environ/src/fact.rs b/crates/environ/src/fact.rs index 74da3aba4abf..7ef7f901b5ed 100644 --- a/crates/environ/src/fact.rs +++ b/crates/environ/src/fact.rs @@ -52,6 +52,8 @@ pub static PREPARE_CALL_FIXED_PARAMS: &[ValType] = &[ /// Representation of an adapter module. pub struct Module<'a> { + might_recursively_reenter: bool, + /// Whether or not debug code is inserted into the adapters themselves. debug: bool, /// Type information from the creator of this `Module` @@ -88,7 +90,8 @@ pub struct Module<'a> { imported_stream_transfer: Option, imported_error_context_transfer: Option, - imported_check_blocking: Option, + imported_enter_sync_call: Option, + imported_exit_sync_call: Option, imported_trap: Option, @@ -102,6 +105,8 @@ pub struct Module<'a> { helper_worklist: Vec<(FunctionId, Helper)>, exports: Vec<(u32, String)>, + + task_may_block: Option, } struct AdapterData { @@ -239,8 +244,13 @@ enum HelperLocation { impl<'a> Module<'a> { /// Creates an empty module. - pub fn new(types: &'a ComponentTypesBuilder, debug: bool) -> Module<'a> { + pub fn new( + types: &'a ComponentTypesBuilder, + debug: bool, + might_recursively_reenter: bool, + ) -> Module<'a> { Module { + might_recursively_reenter, debug, types, core_types: Default::default(), @@ -262,9 +272,11 @@ impl<'a> Module<'a> { imported_future_transfer: None, imported_stream_transfer: None, imported_error_context_transfer: None, - imported_check_blocking: None, imported_trap: None, + imported_enter_sync_call: None, + imported_exit_sync_call: None, exports: Vec::new(), + task_may_block: None, } } @@ -460,6 +472,25 @@ impl<'a> Module<'a> { idx } + fn import_task_may_block(&mut self) -> GlobalIndex { + if let Some(task_may_block) = self.task_may_block { + task_may_block + } else { + let task_may_block = self.import_global( + "instance", + "task_may_block", + GlobalType { + val_type: ValType::I32, + mutable: true, + shared: false, + }, + CoreDef::TaskMayBlock, + ); + self.task_may_block = Some(task_may_block); + task_may_block + } + } + fn import_transcoder(&mut self, transcoder: transcode::Transcoder) -> FuncIndex { *self .imported_transcoders @@ -717,14 +748,25 @@ impl<'a> Module<'a> { ) } - fn import_check_blocking(&mut self) -> FuncIndex { + fn import_enter_sync_call(&mut self) -> FuncIndex { + self.import_simple( + "async", + "enter-sync-call", + &[ValType::I32; 3], + &[], + Import::EnterSyncCall, + |me| &mut me.imported_enter_sync_call, + ) + } + + fn import_exit_sync_call(&mut self) -> FuncIndex { self.import_simple( "async", - "check-blocking", + "exit-sync-call", &[], &[], - Import::CheckBlocking, - |me| &mut me.imported_check_blocking, + Import::ExitSyncCall, + |me| &mut me.imported_exit_sync_call, ) } @@ -882,11 +924,15 @@ pub enum Import { /// An intrinisic used by FACT-generated modules to (partially or entirely) transfer /// ownership of an `error-context`. ErrorContextTransfer, - /// An intrinsic used by FACT-generated modules to check whether an - /// async-typed function may be called via a sync lower. - CheckBlocking, /// An intrinsic for trapping the instance with a specific trap code. Trap, + /// An intrinsic used by FACT-generated modules to check whether an instance + /// may be entered for a sync-to-sync call and push a task onto the stack if + /// so. + EnterSyncCall, + /// An intrinsic used by FACT-generated modules to pop the task previously + /// pushed by `EnterSyncCall`. + ExitSyncCall, } impl Options { diff --git a/crates/environ/src/fact/trampoline.rs b/crates/environ/src/fact/trampoline.rs index 0411e67f78d5..45d2e10f6709 100644 --- a/crates/environ/src/fact/trampoline.rs +++ b/crates/environ/src/fact/trampoline.rs @@ -16,8 +16,8 @@ //! can be somewhat arbitrary, an intentional decision. use crate::component::{ - CanonicalAbiInfo, ComponentTypesBuilder, FLAG_MAY_ENTER, FLAG_MAY_LEAVE, FixedEncoding as FE, - FlatType, InterfaceType, MAX_FLAT_ASYNC_PARAMS, MAX_FLAT_PARAMS, PREPARE_ASYNC_NO_RESULT, + CanonicalAbiInfo, ComponentTypesBuilder, FLAG_MAY_LEAVE, FixedEncoding as FE, FlatType, + InterfaceType, MAX_FLAT_ASYNC_PARAMS, MAX_FLAT_PARAMS, PREPARE_ASYNC_NO_RESULT, PREPARE_ASYNC_WITH_RESULT, START_FLAG_ASYNC_CALLEE, StringEncoding, Transcode, TypeComponentLocalErrorContextTableIndex, TypeEnumIndex, TypeFlagsIndex, TypeFutureTableIndex, TypeListIndex, TypeOptionIndex, TypeRecordIndex, TypeResourceTableIndex, TypeResultIndex, @@ -738,25 +738,63 @@ impl<'a, 'b> Compiler<'a, 'b> { FLAG_MAY_LEAVE, Trap::CannotLeaveComponent, ); - if adapter.called_as_export { - self.trap_if_not_flag( - adapter.lift.flags, - FLAG_MAY_ENTER, - Trap::CannotEnterComponent, - ); - self.set_flag(adapter.lift.flags, FLAG_MAY_ENTER, false); - } else if self.module.debug { - self.assert_not_flag( - adapter.lift.flags, - FLAG_MAY_ENTER, - Trap::DebugAssertMayEnterUnset, - ); - } - - if self.types[adapter.lift.ty].async_ { - let check_blocking = self.module.import_check_blocking(); - self.instruction(Call(check_blocking.as_u32())); - } + let old_task_may_block = + if adapter.called_as_export && self.module.might_recursively_reenter { + // Here we call a runtime intrinsic to check whether the instance + // we're about to enter has already been entered by this task + // (i.e. whether this is a recursive reentrance). If so, we'll trap + // per the component model specification. Otherwise, we'll push a + // task onto the stack and then later pop it off after the call. + // + // Note that, prior to the introduction of concurrency to the + // component model, we were able to handle this via a per-instance + // variable, allowing us to check and set that variable without + // leaving Wasm code. However, now that there may be more than one + // concurrent task running in a given instance, a per-instance + // variable is not sufficient. + // + // Fortunately, we still have a number of opportunities to optimize + // common cases (e.g. components which contain only sync functions, + // and compositions of fewer than 64 transitive subcomponents, for + // which a per-task `uint64` bitset would suffice) and avoid the + // host call. See the discussion of `trap_if_on_the_stack` in + // https://github.com/WebAssembly/component-model/blob/main/design/mvp/CanonicalABI.md + // for details. + // + // TODO: Implement one or more of those optimizations if + // appropriate, but note that we already have a fast path for + // when `self.module.might_recursively_reenter` is false, which + // is true of all real-world components as of this writing. + self.instruction(I32Const( + i32::try_from(adapter.lower.instance.as_u32()).unwrap(), + )); + self.instruction(I32Const(if self.types[adapter.lift.ty].async_ { + 1 + } else { + 0 + })); + self.instruction(I32Const( + i32::try_from(adapter.lift.instance.as_u32()).unwrap(), + )); + let enter_sync_call = self.module.import_enter_sync_call(); + self.instruction(Call(enter_sync_call.as_u32())); + None + } else if self.types[adapter.lift.ty].async_ { + let task_may_block = self.module.import_task_may_block(); + self.instruction(GlobalGet(task_may_block.as_u32())); + self.instruction(I32Eqz); + self.instruction(If(BlockType::Empty)); + self.trap(Trap::CannotBlockSyncTask); + self.instruction(End); + None + } else { + let task_may_block = self.module.import_task_may_block(); + self.instruction(GlobalGet(task_may_block.as_u32())); + let old_task_may_block = self.local_set_new_tmp(ValType::I32); + self.instruction(I32Const(0)); + self.instruction(GlobalSet(task_may_block.as_u32())); + Some(old_task_may_block) + }; if self.emit_resource_call { let enter = self.module.import_resource_enter_call(); @@ -818,9 +856,6 @@ impl<'a, 'b> Compiler<'a, 'b> { } self.instruction(Call(func.as_u32())); } - if adapter.called_as_export { - self.set_flag(adapter.lift.flags, FLAG_MAY_ENTER, true); - } for tmp in temps { self.free_temp_local(tmp); @@ -831,6 +866,16 @@ impl<'a, 'b> Compiler<'a, 'b> { self.instruction(Call(exit.as_u32())); } + if adapter.called_as_export && self.module.might_recursively_reenter { + let exit_sync_call = self.module.import_exit_sync_call(); + self.instruction(Call(exit_sync_call.as_u32())); + } else if let Some(old_task_may_block) = old_task_may_block { + let task_may_block = self.module.import_task_may_block(); + self.instruction(LocalGet(old_task_may_block.idx)); + self.instruction(GlobalSet(task_may_block.as_u32())); + self.free_temp_local(old_task_may_block); + } + self.finish() } @@ -3268,15 +3313,6 @@ impl<'a, 'b> Compiler<'a, 'b> { self.instruction(End); } - fn assert_not_flag(&mut self, flags_global: GlobalIndex, flag_to_test: i32, trap: Trap) { - self.instruction(GlobalGet(flags_global.as_u32())); - self.instruction(I32Const(flag_to_test)); - self.instruction(I32And); - self.instruction(If(BlockType::Empty)); - self.trap(trap); - self.instruction(End); - } - fn set_flag(&mut self, flags_global: GlobalIndex, flag_to_set: i32, value: bool) { self.instruction(GlobalGet(flags_global.as_u32())); if value { diff --git a/crates/environ/src/trap_encoding.rs b/crates/environ/src/trap_encoding.rs index 95bc95c70407..56b0ff3cb8a3 100644 --- a/crates/environ/src/trap_encoding.rs +++ b/crates/environ/src/trap_encoding.rs @@ -128,13 +128,6 @@ pub enum Trap { /// encoding operation. DebugAssertEqualCodeUnits, - /// Debug assertion generated for a fused adapter regarding the expected - /// value of the `may_enter` flag for an instance. - /// - /// TODO: Remove this once - /// https://github.com/bytecodealliance/wasmtime/pull/12153 has been merged. - DebugAssertMayEnterUnset, - /// Debug assertion generated for a fused adapter regarding the alignment of /// a pointer. DebugAssertPointerAligned, @@ -201,7 +194,6 @@ impl Trap { InvalidChar DebugAssertStringEncodingFinished DebugAssertEqualCodeUnits - DebugAssertMayEnterUnset DebugAssertPointerAligned DebugAssertUpperBitsUnset StringOutOfBounds @@ -248,7 +240,6 @@ impl fmt::Display for Trap { InvalidChar => "invalid `char` bit pattern", DebugAssertStringEncodingFinished => "should have finished string encoding", DebugAssertEqualCodeUnits => "code units should be equal", - DebugAssertMayEnterUnset => "`may_enter` flag should be unset", DebugAssertPointerAligned => "pointer should be aligned", DebugAssertUpperBitsUnset => "upper bits should be unset", StringOutOfBounds => "string content out-of-bounds", diff --git a/crates/wasmtime/src/runtime/component/concurrent.rs b/crates/wasmtime/src/runtime/component/concurrent.rs index bd1a8b6f74dd..1e94c6ac827f 100644 --- a/crates/wasmtime/src/runtime/component/concurrent.rs +++ b/crates/wasmtime/src/runtime/component/concurrent.rs @@ -54,11 +54,12 @@ use crate::component::func::{self, Func}; use crate::component::store::StoreComponentInstanceId; use crate::component::{ ComponentInstanceId, HasData, HasSelf, Instance, Resource, ResourceTable, ResourceTableError, + RuntimeInstance, }; use crate::fiber::{self, StoreFiber, StoreFiberYield}; use crate::store::{Store, StoreId, StoreInner, StoreOpaque, StoreToken}; use crate::vm::component::{ - CallContext, ComponentInstance, HandleTable, InstanceFlags, ResourceTables, + CallContext, ComponentInstance, HandleTable, ResourceTables, TaskMayBlock, }; use crate::vm::{AlwaysMut, SendSyncPtr, VMFuncRef, VMMemoryDefinition, VMStore}; use crate::{AsContext, AsContextMut, FuncType, StoreContext, StoreContextMut, ValRaw, ValType}; @@ -93,6 +94,7 @@ use wasmtime_environ::component::{ TypeComponentGlobalErrorContextTableIndex, TypeComponentLocalErrorContextTableIndex, TypeFuncIndex, TypeFutureTableIndex, TypeStreamTableIndex, TypeTupleIndex, }; +use wasmtime_environ::packed_option::ReservedValue; pub use abort::JoinHandle; pub use future_stream_any::{FutureAny, StreamAny}; @@ -704,12 +706,6 @@ pub(crate) enum WaitResult { Completed, } -/// Raise a trap if the calling task is synchronous and trying to block prior to -/// returning a value. -pub(crate) fn check_blocking(store: &mut dyn VMStore) -> Result<()> { - store.concurrent_state_mut().check_blocking() -} - /// Poll the specified future until it completes on behalf of a guest->host call /// using a sync-lowered import. /// @@ -720,7 +716,7 @@ pub(crate) fn check_blocking(store: &mut dyn VMStore) -> Result<()> { pub(crate) fn poll_and_block( store: &mut dyn VMStore, future: impl Future> + Send + 'static, - caller_instance: RuntimeComponentInstanceIndex, + caller_instance: RuntimeInstance, ) -> Result { let state = store.concurrent_state_mut(); @@ -835,7 +831,7 @@ fn handle_guest_call(store: &mut dyn VMStore, call: GuestCall) -> Result<()> { call.thread, ); - let old_thread = state.guest_thread.replace(call.thread); + let old_thread = store.set_thread(Some(call.thread)); log::trace!( "GuestCallKind::DeliverEvent: replaced {old_thread:?} with {:?} as current thread", call.thread @@ -852,7 +848,7 @@ fn handle_guest_call(store: &mut dyn VMStore, call: GuestCall) -> Result<()> { .take() .unwrap(); - let code = callback(store, runtime_instance.index, event, handle)?; + let code = callback(store, event, handle)?; store .concurrent_state_mut() @@ -863,14 +859,14 @@ fn handle_guest_call(store: &mut dyn VMStore, call: GuestCall) -> Result<()> { store.maybe_pop_call_context(call.thread.task)?; + store.set_thread(old_thread); + next = instance.handle_callback_code( store, call.thread, runtime_instance.index, code, )?; - - store.concurrent_state_mut().guest_thread = old_thread; log::trace!( "GuestCallKind::DeliverEvent: restored {old_thread:?} as current thread" ); @@ -1385,12 +1381,6 @@ impl StoreContextMut<'_, T> { } } -#[derive(Debug, Copy, Clone)] -pub struct RuntimeInstance { - pub instance: ComponentInstanceId, - pub index: RuntimeComponentInstanceIndex, -} - impl StoreOpaque { /// Helper function to retrieve the `ConcurrentInstanceState` for the /// specified instance. @@ -1398,7 +1388,6 @@ impl StoreOpaque { StoreComponentInstanceId::new(self.id(), instance.instance) .get_mut(self) .instance_state(instance.index) - .unwrap() .concurrent_state() } @@ -1408,10 +1397,283 @@ impl StoreOpaque { StoreComponentInstanceId::new(self.id(), instance.instance) .get_mut(self) .instance_state(instance.index) - .unwrap() .handle_table() } + fn task_may_block(&mut self, instance: ComponentInstanceId) -> TaskMayBlock { + StoreComponentInstanceId::new(self.id(), instance) + .get_mut(self) + .task_may_block() + } + + /// Set the global variable representing whether the current task may block + /// prior to entering Wasm code. + fn set_task_may_block(&mut self) { + let state = self.concurrent_state_mut(); + let guest_thread = state.guest_thread.unwrap(); + let instance = state.get_mut(guest_thread.task).unwrap().instance.instance; + let mut task_may_block = self.task_may_block(instance); + let may_block = self.concurrent_state_mut().may_block(guest_thread.task); + unsafe { task_may_block.set(may_block) } + } + + fn set_thread(&mut self, thread: Option) -> Option { + let state = self.concurrent_state_mut(); + let old_thread = state.guest_thread.take(); + if let Some(old_thread) = old_thread { + let instance = state.get_mut(old_thread.task).unwrap().instance.instance; + let mut task_may_block = self.task_may_block(instance); + unsafe { task_may_block.set(false) } + } + + self.concurrent_state_mut().guest_thread = thread; + + if thread.is_some() { + self.set_task_may_block(); + } + + old_thread + } + + pub(crate) fn check_blocking(&mut self) -> Result<()> { + let state = self.concurrent_state_mut(); + let task = state.guest_thread.unwrap().task; + let instance = state.get_mut(task).unwrap().instance.instance; + let task_may_block = self.task_may_block(instance); + if unsafe { task_may_block.get() } { + Ok(()) + } else { + Err(Trap::CannotBlockSyncTask.into()) + } + } + + /// Push a `GuestTask` onto the task stack for a sync-to-sync, + /// guest-to-guest call. + /// + /// This task will only be used for the purpose of checking for recursive + /// reentrance; both parameter lowering and result lifting are assumed to be + /// taken care of elsewhere. + fn push_sync_task( + &mut self, + caller: Option, + callee_async: bool, + callee: RuntimeInstance, + ) -> Result<()> { + let state = self.concurrent_state_mut(); + let caller_thread = state.guest_thread; + let task = GuestTask::new( + state, + Box::new(move |_, _| unreachable!()), + LiftResult { + lift: Box::new(move |_, _| unreachable!()), + ty: TypeTupleIndex::reserved_value(), + memory: None, + string_encoding: StringEncoding::Utf8, + }, + if let Some(thread) = caller_thread { + Caller::Guest { + thread, + instance: caller.unwrap(), + } + } else { + Caller::Host { + tx: None, + exit_tx: Arc::new(oneshot::channel().0), + host_future_present: false, + call_post_return_automatically: false, + caller: None, + } + }, + None, + callee, + callee_async, + )?; + + let guest_task = state.push(task)?; + let new_thread = GuestThread::new_implicit(guest_task); + let guest_thread = state.push(new_thread)?; + state.get_mut(guest_task)?.threads.insert(guest_thread); + + if let Some(thread) = caller_thread { + state.get_mut(thread.task)?.subtasks.insert(guest_task); + } + + self.set_thread(Some(QualifiedThreadId { + task: guest_task, + thread: guest_thread, + })); + + Ok(()) + } + + fn pop_sync_task(&mut self) -> Result<()> { + let thread = self.set_thread(None).unwrap(); + let state = self.concurrent_state_mut(); + state.delete(thread.thread)?; + let task = state.get_mut(thread.task)?; + match &task.caller { + &Caller::Guest { + thread: caller_thread, + .. + } => { + task.threads.remove(&thread.thread); + if task.ready_to_delete() { + state.delete(thread.task)?.dispose(state, thread.task)?; + } + let caller = state.get_mut(caller_thread.task)?; + if let Caller::Host { .. } = &caller.caller + && let Some(lift_result) = &caller.lift_result + && lift_result.ty == TypeTupleIndex::reserved_value() + { + // The caller was lazily pushed by `enter_sync_call`, so we + // pop it, also, leaving `state.guest_thread` set to `None` + // as it was originally. + state.delete(caller_thread.thread)?; + let mut caller = state.delete(caller_thread.task)?; + caller.threads.remove(&caller_thread.thread); + caller.dispose(state, caller_thread.task)?; + } else { + self.set_thread(Some(caller_thread)); + } + } + Caller::Host { .. } => unreachable!(), + } + Ok(()) + } + + /// Check for recursive reentrance during a sync-to-sync, guest-to-guest + /// call, and push a guest task on the stack to track future recursion. + /// + /// Note that this may push _two_ tasks on the stack -- one for the caller + /// and another for the callee -- if called during instantiation or via + /// `[Typed]Func::call`, neither of which will have pushed a task for the + /// caller ahead of time. + pub(crate) fn enter_sync_call( + &mut self, + caller: RuntimeInstance, + callee_async: bool, + callee: RuntimeInstance, + ) -> Result<()> { + if self.concurrent_state_mut().guest_thread.is_none() { + // If `self.guest_thread.is_none()`, we're being called from a task + // created either as part of instantiation or using + // `[Typed]Func::call`. In those cases, a task will not already + // have been created, so we lazily create it here. + // + // We'll pop this (along with the callee task) in the corresponding + // call to `exit_sync_call`. + self.push_sync_task(None, false, caller)?; + } + + if callee_async { + self.check_blocking()?; + } + + if self + .concurrent_state_mut() + .may_enter_from_guest(caller, callee) + { + self.push_sync_task(Some(caller), callee_async, callee) + } else { + Err(anyhow!(crate::Trap::CannotEnterComponent)) + } + } + + /// Pop the task(s) pushed by the corresponding call to `enter_sync_call`. + pub(crate) fn exit_sync_call(&mut self) -> Result<()> { + self.pop_sync_task() + } + + /// Determine whether the specified instance may be entered from the host. + /// + /// We return `true` here only if all of the following hold: + /// + /// - The top-level instance is not already on the current task's call stack. + /// - The instance is not in need of a post-return function call. + /// - `self` has not been poisoned due to a trap. + /// + /// See also [ConcurrentState::may_enter_from_guest], which may be used for + /// guest-to-guest calls. + pub(crate) fn may_enter(&mut self, instance: RuntimeInstance) -> bool { + let state = self.concurrent_state_mut(); + if let Some(caller) = state.guest_thread { + instance != state.get_mut(caller.task).unwrap().instance + && self.may_enter_from_caller(caller.task, instance) + } else { + self.may_enter_at_all(instance) + } + } + + fn may_enter_at_all(&self, instance: RuntimeInstance) -> bool { + if self.trapped() { + return false; + } + + let flags = StoreComponentInstanceId::new(self.id(), instance.instance) + .get(self) + .instance_flags(instance.index); + + unsafe { !flags.needs_post_return() } + } + + /// Variation of `may_enter` which takes a `TableId` representing + /// the callee. + fn may_enter_task(&mut self, task: TableId) -> bool { + let instance = self.concurrent_state_mut().get_mut(task).unwrap().instance; + self.may_enter_from_caller(task, instance) + } + + /// Variation of `may_enter` which takes a `TableId` representing + /// the caller, plus a `RuntimeInstance` representing the callee. + fn may_enter_from_caller( + &mut self, + mut guest_task: TableId, + instance: RuntimeInstance, + ) -> bool { + self.may_enter_at_all(instance) && { + let state = self.concurrent_state_mut(); + let guest_instance = instance.instance; + loop { + // Note that, unlike in + // `ConcurrentState::may_enter_from_guest_caller` we only + // compare top-level instance IDs here. The idea is that the + // host is not allowed to recursively enter a top-level instance + // even if the specific leaf instance is not on the stack. + // + // That's important because the stack we walk here might be + // missing frames (and thus might not mention all the leaf + // instances we've actually entered) due to optimizations + // applied when fusing guest-to-guest calls. Those + // optimizations maintain the invariant that every top-level + // instance entered will be represented on this stack even + // though not every leaf instance entered will necessarily be + // represented. + let next_thread = match &state.get_mut(guest_task).unwrap().caller { + Caller::Host { caller: None, .. } => break true, + &Caller::Host { + caller: Some(caller), + .. + } => { + let instance = state.get_mut(caller.task).unwrap().instance; + if instance.instance == guest_instance { + break false; + } else { + caller + } + } + &Caller::Guest { thread, instance } => { + if instance.instance == guest_instance { + break false; + } else { + thread + } + } + }; + guest_task = next_thread.task; + } + } + } + /// Record that we're about to enter a (sub-)component instance which does /// not support more than one concurrent, stackful activation, meaning it /// cannot be entered again until the next call returns. @@ -1477,9 +1739,10 @@ impl StoreOpaque { let fiber = fiber::resolve_or_release(self, fiber).await?; + self.set_thread(old_thread); + let state = self.concurrent_state_mut(); - state.guest_thread = old_thread; if let Some(ref ot) = old_thread { state.get_mut(ot.thread)?.state = GuestThreadState::Running; } @@ -1570,7 +1833,7 @@ impl StoreOpaque { self.with_blocking(|_, cx| cx.suspend(StoreFiberYield::ReleaseStore))?; if let Some(task) = task { - self.concurrent_state_mut().guest_thread = old_guest_thread; + self.set_thread(old_guest_thread); self.maybe_push_call_context(task)?; } @@ -1750,7 +2013,9 @@ impl Instance { if state.may_block(guest_thread.task) { // Push this thread onto the "low priority" queue so it runs // after any other threads have had a chance to run. - state.push_low_priority(WorkItem::GuestCall(call)); + store + .concurrent_state_mut() + .push_low_priority(WorkItem::GuestCall(call)); None } else { // Yielding in a non-blocking context is defined as a no-op @@ -1839,7 +2104,6 @@ impl Instance { callee: SendSyncPtr, param_count: usize, result_count: usize, - flags: Option, async_: bool, callback: Option>, post_return: Option>, @@ -1864,7 +2128,6 @@ impl Instance { callee: SendSyncPtr, param_count: usize, result_count: usize, - flags: Option, ) -> impl FnOnce(&mut dyn VMStore) -> Result<[MaybeUninit; MAX_FLAT_PARAMS]> + Send + Sync @@ -1874,12 +2137,9 @@ impl Instance { move |store: &mut dyn VMStore| { let mut storage = [MaybeUninit::uninit(); MAX_FLAT_PARAMS]; - store - .concurrent_state_mut() - .get_mut(guest_thread.thread)? - .state = GuestThreadState::Running; - let task = store.concurrent_state_mut().get_mut(guest_thread.task)?; - let may_enter_after_call = task.call_post_return_automatically(); + let state = store.concurrent_state_mut(); + state.get_mut(guest_thread.thread)?.state = GuestThreadState::Running; + let task = state.get_mut(guest_thread.task).unwrap(); let lower = task.lower_params.take().unwrap(); lower(store, &mut storage[..param_count])?; @@ -1889,9 +2149,6 @@ impl Instance { // SAFETY: Per the contract documented in `make_call's` // documentation, `callee` must be a valid pointer. unsafe { - if let Some(mut flags) = flags { - flags.set_may_enter(false); - } crate::Func::call_unchecked_raw( &mut store, callee.as_non_null(), @@ -1901,9 +2158,6 @@ impl Instance { ) .unwrap(), )?; - if let Some(mut flags) = flags { - flags.set_may_enter(may_enter_after_call); - } } Ok(storage) @@ -1920,7 +2174,6 @@ impl Instance { callee, param_count, result_count, - flags, ) }; @@ -1939,10 +2192,7 @@ impl Instance { store, callee_instance.index, )?; - let old_thread = store - .concurrent_state_mut() - .guest_thread - .replace(guest_thread); + let old_thread = store.set_thread(Some(guest_thread)); log::trace!( "stackless call: replaced {old_thread:?} with {guest_thread:?} as current thread" ); @@ -1963,8 +2213,8 @@ impl Instance { store.maybe_pop_call_context(guest_thread.task)?; + store.set_thread(old_thread); let state = store.concurrent_state_mut(); - state.guest_thread = old_thread; old_thread .map(|t| state.get_mut(t.thread).unwrap().state = GuestThreadState::Running); log::trace!("stackless call: restored {old_thread:?} as current thread"); @@ -1984,14 +2234,10 @@ impl Instance { store, callee_instance.index, )?; - let old_thread = store - .concurrent_state_mut() - .guest_thread - .replace(guest_thread); + let old_thread = store.set_thread(Some(guest_thread)); log::trace!( "sync/async-stackful call: replaced {old_thread:?} with {guest_thread:?} as current thread", ); - let mut flags = self.id().get(store).instance_flags(callee_instance.index); store.maybe_push_call_context(guest_thread.task)?; @@ -2053,6 +2299,8 @@ impl Instance { _ => unreachable!(), }; + let mut flags = self.id().get(store).instance_flags(callee_instance.index); + if store .concurrent_state_mut() .get_mut(guest_thread.task)? @@ -2082,7 +2330,6 @@ impl Instance { unsafe { flags.set_may_leave(true); - flags.set_may_enter(true); } } @@ -2095,6 +2342,8 @@ impl Instance { )?; } + store.set_thread(old_thread); + store.maybe_pop_call_context(guest_thread.task)?; let state = store.concurrent_state_mut(); @@ -2157,7 +2406,7 @@ impl Instance { // A task may only call an async-typed function via a sync lower if // it was created by a call to an async export. Otherwise, we'll // trap. - store.0.concurrent_state_mut().check_blocking()?; + store.0.check_blocking()?; } enum ResultInfo { @@ -2195,7 +2444,7 @@ impl Instance { let return_ = SendSyncPtr::new(NonNull::new(return_).unwrap()); let token = StoreToken::new(store.as_context_mut()); let state = store.0.concurrent_state_mut(); - let old_thread = state.guest_thread.take(); + let old_thread = state.guest_thread; let new_task = GuestTask::new( state, Box::new(move |store, dst| { @@ -2294,11 +2543,16 @@ impl Instance { }, Caller::Guest { thread: old_thread.unwrap(), - instance: caller_instance, + instance: RuntimeInstance { + instance: self.id().instance(), + index: caller_instance, + }, }, None, - self, - callee_instance, + RuntimeInstance { + instance: self.id().instance(), + index: callee_instance, + }, callee_async, )?; @@ -2307,9 +2561,9 @@ impl Instance { let guest_thread = state.push(new_thread)?; state.get_mut(guest_task)?.threads.insert(guest_thread); - let state = store.0.concurrent_state_mut(); if let Some(old_thread) = old_thread { - if !state.may_enter(guest_task) { + let state = store.0.concurrent_state_mut(); + if !state.may_enter_task_from_guest(guest_task) { bail!(crate::Trap::CannotEnterComponent); } @@ -2318,10 +2572,10 @@ impl Instance { // Make the new thread the current one so that `Self::start_call` knows // which one to start. - state.guest_thread = Some(QualifiedThreadId { + store.0.set_thread(Some(QualifiedThreadId { task: guest_task, thread: guest_thread, - }); + })); log::trace!( "pushed {guest_task:?}:{guest_thread:?} as current thread; old thread was {old_thread:?}" ); @@ -2336,14 +2590,10 @@ impl Instance { unsafe fn call_callback( self, mut store: StoreContextMut, - callee_instance: RuntimeComponentInstanceIndex, function: SendSyncPtr, event: Event, handle: u32, - may_enter_after_call: bool, ) -> Result { - let mut flags = self.id().get(store.0).instance_flags(callee_instance); - let (ordinal, result) = event.parts(); let params = &mut [ ValRaw::u32(ordinal), @@ -2355,14 +2605,13 @@ impl Instance { // `component::Options`. Per `wasmparser` callback signature // validation, we know it takes three parameters and returns one. unsafe { - flags.set_may_enter(false); crate::Func::call_unchecked_raw( &mut store, function.as_non_null(), params.as_mut_slice().into(), )?; - flags.set_may_enter(may_enter_after_call); } + Ok(params[0].get_u32()) } @@ -2394,9 +2643,6 @@ impl Instance { let state = store.0.concurrent_state_mut(); let guest_thread = state.guest_thread.unwrap(); let callee_async = state.get_mut(guest_thread.task)?.async_function; - let may_enter_after_call = state - .get_mut(guest_thread.task)? - .call_post_return_automatically(); let callee = SendSyncPtr::new(NonNull::new(callee).unwrap()); let param_count = usize::try_from(param_count).unwrap(); assert!(param_count <= MAX_FLAT_PARAMS); @@ -2409,18 +2655,9 @@ impl Instance { // the callback and related context as part of the task so we can // call it later when needed. let callback = SendSyncPtr::new(NonNull::new(callback).unwrap()); - task.callback = Some(Box::new(move |store, runtime_instance, event, handle| { + task.callback = Some(Box::new(move |store, event, handle| { let store = token.as_context_mut(store); - unsafe { - self.call_callback::( - store, - runtime_instance, - callback, - event, - handle, - may_enter_after_call, - ) - } + unsafe { self.call_callback::(store, callback, event, handle) } })); } @@ -2436,14 +2673,6 @@ impl Instance { let caller = *caller; let caller_instance = *runtime_instance; - let callee_instance = task.instance; - - let instance_flags = if callback.is_null() { - None - } else { - Some(self.id().get(store.0).instance_flags(callee_instance.index)) - }; - // Queue the call as a "high priority" work item. unsafe { self.queue_call( @@ -2452,7 +2681,6 @@ impl Instance { callee, param_count, result_count, - instance_flags, (flags & START_FLAG_ASYNC_CALLEE) != 0, NonNull::new(callback).map(SendSyncPtr::new), NonNull::new(post_return).map(SendSyncPtr::new), @@ -2516,10 +2744,7 @@ impl Instance { // waitable and return the status. let handle = store .0 - .handle_table(RuntimeInstance { - instance: self.id().instance(), - index: caller_instance, - }) + .handle_table(caller_instance) .subtask_insert_guest(guest_thread.task.rep())?; store .0 @@ -2535,33 +2760,30 @@ impl Instance { } }; - let state = store.0.concurrent_state_mut(); + guest_waitable.join(store.0.concurrent_state_mut(), old_set)?; - guest_waitable.join(state, old_set)?; + // Reset the current thread to point to the caller as it resumes control. + store.0.set_thread(Some(caller)); + store.0.concurrent_state_mut().get_mut(caller.thread)?.state = GuestThreadState::Running; + log::trace!("popped current thread {guest_thread:?}; new thread is {caller:?}"); if let Some(storage) = storage { // The caller used a sync-lowered import to call an async-lifted // export, in which case the result, if any, has been stashed in // `GuestTask::sync_result`. + let state = store.0.concurrent_state_mut(); let task = state.get_mut(guest_thread.task)?; if let Some(result) = task.sync_result.take() { if let Some(result) = result { storage[0] = MaybeUninit::new(result); } - if task.exited { - if task.ready_to_delete() { - Waitable::Guest(guest_thread.task).delete_from(state)?; - } + if task.exited && task.ready_to_delete() { + Waitable::Guest(guest_thread.task).delete_from(state)?; } } } - // Reset the current thread to point to the caller as it resumes control. - state.guest_thread = Some(caller); - state.get_mut(caller.thread)?.state = GuestThreadState::Running; - log::trace!("popped current thread {guest_thread:?}; new thread is {caller:?}"); - Ok(status.pack(waitable)) } @@ -2631,7 +2853,13 @@ impl Instance { // We create a new host task even though it might complete immediately // (in which case we won't need to pass a waitable back to the guest). // If it does complete immediately, we'll remove it before we return. - let task = state.push(HostTask::new(caller_instance, Some(join_handle)))?; + let task = state.push(HostTask::new( + RuntimeInstance { + instance: self.id().instance(), + index: caller_instance, + }, + Some(join_handle), + ))?; log::trace!("new host task child of {caller:?}: {task:?}"); @@ -2763,6 +2991,14 @@ impl Instance { log::trace!("task.return for {guest_thread:?}"); + let instance = store + .concurrent_state_mut() + .get_mut(guest_thread.task)? + .instance + .instance; + + unsafe { store.task_may_block(instance).set(true) }; + let result = (lift.lift)(store, storage)?; self.task_complete( store, @@ -2794,6 +3030,14 @@ impl Instance { log::trace!("task.cancel for {guest_thread:?}"); + let instance = store + .concurrent_state_mut() + .get_mut(guest_thread.task)? + .instance + .instance; + + unsafe { store.task_may_block(instance).set(true) }; + self.task_complete( store, guest_thread.task, @@ -2986,7 +3230,13 @@ impl Instance { // Since waitables can neither be passed between instances nor forged, // this should never fail unless there's a bug in Wasmtime, but we check // here to be sure: - assert_eq!(expected_caller_instance, caller_instance); + assert_eq!( + expected_caller_instance, + RuntimeInstance { + instance: self.id().instance(), + index: caller_instance + } + ); log::trace!("subtask_drop {waitable:?} (handle {task_id})"); Ok(()) } @@ -3006,7 +3256,7 @@ impl Instance { // The caller may only call `waitable-set.wait` from an async task // (i.e. a task created via a call to an async export). // Otherwise, we'll trap. - store.concurrent_state_mut().check_blocking()?; + store.check_blocking()?; } let &CanonicalOptions { @@ -3113,10 +3363,7 @@ impl Instance { let token = StoreToken::new(store.as_context_mut()); let start_func = Box::new( move |store: &mut dyn VMStore, guest_thread: QualifiedThreadId| -> Result<()> { - let old_thread = store - .concurrent_state_mut() - .guest_thread - .replace(guest_thread); + let old_thread = store.set_thread(Some(guest_thread)); log::trace!( "thread start: replaced {old_thread:?} with {guest_thread:?} as current thread" ); @@ -3138,7 +3385,8 @@ impl Instance { if task.threads.is_empty() && !task.returned_or_cancelled() { bail!(Trap::NoAsyncResult); } - state.guest_thread = old_thread; + store.0.set_thread(old_thread); + let state = store.0.concurrent_state_mut(); old_thread .map(|t| state.get_mut(t.thread).unwrap().state = GuestThreadState::Running); if state.get_mut(guest_thread.task)?.ready_to_delete() { @@ -3234,10 +3482,11 @@ impl Instance { self.id().get(store).check_may_leave(caller)?; if to_thread.is_none() { - let state = store.concurrent_state_mut(); if yielding { // This is a `thread.yield` call - if !state.may_block(state.guest_thread.unwrap().task) { + let state = store.concurrent_state_mut(); + let task = state.guest_thread.unwrap().task; + if !state.may_block(task) { // The spec defines `thread.yield` to be a no-op in a // non-blocking context, so we return immediately without giving // any other thread a chance to run. @@ -3247,7 +3496,7 @@ impl Instance { // The caller may only call `thread.suspend` from an async task // (i.e. a task created via a call to an async export). // Otherwise, we'll trap. - state.check_blocking()?; + store.check_blocking()?; } } @@ -3384,7 +3633,7 @@ impl Instance { // The caller may only sync call `subtask.cancel` from an async task // (i.e. a task created via a call to an async export). Otherwise, // we'll trap. - store.concurrent_state_mut().check_blocking()?; + store.check_blocking()?; } let (rep, is_host) = store @@ -3412,7 +3661,13 @@ impl Instance { // Since waitables can neither be passed between instances nor forged, // this should never fail unless there's a bug in Wasmtime, but we check // here to be sure: - assert_eq!(expected_caller_instance, caller_instance); + assert_eq!( + expected_caller_instance, + RuntimeInstance { + instance: self.id().instance(), + index: caller_instance + } + ); log::trace!("subtask_cancel {waitable:?} (handle {task_id})"); @@ -4025,15 +4280,12 @@ type HostTaskFuture = Pin> + Send + 'static>> /// Represents the state of a pending host task. struct HostTask { common: WaitableCommon, - caller_instance: RuntimeComponentInstanceIndex, + caller_instance: RuntimeInstance, join_handle: Option, } impl HostTask { - fn new( - caller_instance: RuntimeComponentInstanceIndex, - join_handle: Option, - ) -> Self { + fn new(caller_instance: RuntimeInstance, join_handle: Option) -> Self { Self { common: WaitableCommon::default(), caller_instance, @@ -4048,12 +4300,7 @@ impl TableDebug for HostTask { } } -type CallbackFn = Box< - dyn Fn(&mut dyn VMStore, RuntimeComponentInstanceIndex, Event, u32) -> Result - + Send - + Sync - + 'static, ->; +type CallbackFn = Box Result + Send + Sync + 'static>; /// Represents the caller of a given guest task. enum Caller { @@ -4073,6 +4320,11 @@ enum Caller { host_future_present: bool, /// If true, call `post-return` function (if any) automatically. call_post_return_automatically: bool, + /// If `Some`, represents the `QualifiedThreadId` caller of the host + /// function which called back into a guest. Note that this thread + /// could belong to an entirely unrelated top-level component instance + /// than the one the host called into. + caller: Option, }, /// Another guest thread called the guest task Guest { @@ -4083,7 +4335,7 @@ enum Caller { /// Note that this might not be the same as the instance the caller task /// started executing in given that one or more synchronous guest->guest /// calls may have occurred involving multiple instances. - instance: RuntimeComponentInstanceIndex, + instance: RuntimeInstance, }, } @@ -4323,8 +4575,7 @@ impl GuestTask { lift_result: LiftResult, caller: Caller, callback: Option, - component_instance: Instance, - instance: RuntimeComponentInstanceIndex, + instance: RuntimeInstance, async_function: bool, ) -> Result { let sync_call_set = state.push(WaitableSet::default())?; @@ -4354,10 +4605,7 @@ impl GuestTask { starting_sent: false, subtasks: HashSet::new(), sync_call_set, - instance: RuntimeInstance { - instance: component_instance.id().instance(), - index: instance, - }, + instance, event: None, function_index: None, exited: false, @@ -4406,7 +4654,9 @@ impl GuestTask { }; } } - Caller::Host { exit_tx, .. } => { + Caller::Host { + exit_tx, caller, .. + } => { for subtask in &self.subtasks { state.get_mut(*subtask)?.caller = Caller::Host { tx: None, @@ -4416,13 +4666,15 @@ impl GuestTask { exit_tx: exit_tx.clone(), host_future_present: false, call_post_return_automatically: true, + caller: *caller, }; } } } for subtask in self.subtasks { - if state.get_mut(subtask)?.exited { + let task = state.get_mut(subtask)?; + if task.exited && task.ready_to_delete() { Waitable::Guest(subtask).delete_from(state)?; } } @@ -4859,18 +5111,26 @@ impl ConcurrentState { } } - /// Determine whether the instance associated with the specified guest task - /// may be entered (i.e. is not already on the async call stack). + /// Determine whether the specified instance may be entered from a + /// guest-to-guest call. /// - /// This is an additional check on top of the "may_enter" instance flag; - /// it's needed because async-lifted exports with callback functions must - /// not call their own instances directly or indirectly, and due to the - /// "stackless" nature of callback-enabled guest tasks this may happen even - /// if there are no activation records on the stack (i.e. the "may_enter" - /// field is `true`) for that instance. - fn may_enter(&mut self, mut guest_task: TableId) -> bool { - let guest_instance = self.get_mut(guest_task).unwrap().instance; + /// Note that this should only be used for guest-to-guest calls. For + /// host->guest calls, use [StoreOpaque::may_enter]. + fn may_enter_from_guest(&mut self, caller: RuntimeInstance, callee: RuntimeInstance) -> bool { + let caller_thread = self.guest_thread.unwrap(); + callee != caller && self.may_enter_from_guest_caller(caller_thread.task, callee) + } + fn may_enter_task_from_guest(&mut self, task: TableId) -> bool { + let instance = self.get_mut(task).unwrap().instance; + self.may_enter_from_guest_caller(task, instance) + } + + fn may_enter_from_guest_caller( + &mut self, + mut guest_task: TableId, + guest_instance: RuntimeInstance, + ) -> bool { // Walk the task tree back to the root, looking for potential // reentrance. // @@ -4880,12 +5140,23 @@ impl ConcurrentState { // constant time check. loop { let next_thread = match &self.get_mut(guest_task).unwrap().caller { - Caller::Host { .. } => break true, - Caller::Guest { thread, instance } => { - if *instance == guest_instance.index { + Caller::Host { caller: None, .. } => break true, + &Caller::Host { + caller: Some(caller), + .. + } => { + let instance = self.get_mut(caller.task).unwrap().instance; + if instance == guest_instance { + break false; + } else { + caller + } + } + &Caller::Guest { thread, instance } => { + if instance == guest_instance { break false; } else { - *thread + thread } } }; @@ -4921,11 +5192,6 @@ impl ConcurrentState { } } - fn check_blocking(&mut self) -> Result<()> { - let task = self.guest_thread.unwrap().task; - self.check_blocking_for(task) - } - fn check_blocking_for(&mut self, task: TableId) -> Result<()> { if self.may_block(task) { Ok(()) @@ -4934,8 +5200,8 @@ impl ConcurrentState { } } - fn may_block(&mut self, task: TableId) -> bool { - let task = self.get_mut(task).unwrap(); + fn may_block(&mut self, task_id: TableId) -> bool { + let task = self.get_mut(task_id).unwrap(); task.async_function || task.returned_or_cancelled() } } @@ -5113,6 +5379,7 @@ pub(crate) fn prepare_call( let (tx, rx) = oneshot::channel(); let (exit_tx, exit_rx) = oneshot::channel(); + let caller = state.guest_thread; let mut task = GuestTask::new( state, Box::new(for_any_lower(move |store, params| { @@ -5131,38 +5398,35 @@ pub(crate) fn prepare_call( exit_tx: Arc::new(exit_tx), host_future_present, call_post_return_automatically, + caller, }, callback.map(|callback| { let callback = SendSyncPtr::new(callback); let instance = handle.instance(); - Box::new( - move |store: &mut dyn VMStore, runtime_instance, event, handle| { - let store = token.as_context_mut(store); - // SAFETY: Per the contract of `prepare_call`, the callback - // will remain valid at least as long is this task exists. - unsafe { - instance.call_callback( - store, - runtime_instance, - callback, - event, - handle, - call_post_return_automatically, - ) - } - }, - ) as CallbackFn + Box::new(move |store: &mut dyn VMStore, event, handle| { + let store = token.as_context_mut(store); + // SAFETY: Per the contract of `prepare_call`, the callback + // will remain valid at least as long is this task exists. + unsafe { instance.call_callback(store, callback, event, handle) } + }) as CallbackFn }), - handle.instance(), - component_instance, + RuntimeInstance { + instance: handle.instance().id().instance(), + index: component_instance, + }, async_function, )?; task.function_index = Some(handle.index()); let task = state.push(task)?; + let thread = state.push(GuestThread::new_implicit(task))?; state.get_mut(task)?.threads.insert(thread); + if !store.0.may_enter_task(task) { + bail!(crate::Trap::CannotEnterComponent); + } + Ok(PreparedCall { handle, thread: QualifiedThreadId { task, thread }, @@ -5212,7 +5476,7 @@ fn queue_call0( guest_thread: QualifiedThreadId, param_count: usize, ) -> Result<()> { - let (_options, flags, _ty, raw_options) = handle.abi_info(store.0); + let (_options, _flags, _ty, raw_options) = handle.abi_info(store.0); let is_concurrent = raw_options.async_; let callback = raw_options.callback; let instance = handle.instance(); @@ -5225,12 +5489,6 @@ fn queue_call0( log::trace!("queueing call {guest_thread:?}"); - let instance_flags = if callback.is_none() { - None - } else { - Some(flags) - }; - // SAFETY: `callee`, `callback`, and `post_return` are valid pointers // (with signatures appropriate for this call) and will remain valid as // long as this instance is valid. @@ -5241,7 +5499,6 @@ fn queue_call0( SendSyncPtr::new(callee), param_count, 1, - instance_flags, is_concurrent, callback, post_return.map(SendSyncPtr::new), diff --git a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs index 9f59bb89927b..a194717ee408 100644 --- a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs +++ b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs @@ -3236,7 +3236,7 @@ impl Instance { // The caller may only sync call `{stream,future}.write` from an // async task (i.e. a task created via a call to an async export). // Otherwise, we'll trap. - store.0.concurrent_state_mut().check_blocking()?; + store.0.check_blocking()?; } let address = usize::try_from(address).unwrap(); @@ -3471,7 +3471,7 @@ impl Instance { // The caller may only sync call `{stream,future}.read` from an // async task (i.e. a task created via a call to an async export). // Otherwise, we'll trap. - store.0.concurrent_state_mut().check_blocking()?; + store.0.check_blocking()?; } let address = usize::try_from(address).unwrap(); @@ -3857,7 +3857,7 @@ impl Instance { // The caller may only sync call `{stream,future}.cancel-write` from // an async task (i.e. a task created via a call to an async // export). Otherwise, we'll trap. - store.concurrent_state_mut().check_blocking()?; + store.check_blocking()?; } let (rep, state) = @@ -3898,7 +3898,7 @@ impl Instance { // The caller may only sync call `{stream,future}.cancel-read` from // an async task (i.e. a task created via a call to an async // export). Otherwise, we'll trap. - store.concurrent_state_mut().check_blocking()?; + store.check_blocking()?; } let (rep, state) = diff --git a/crates/wasmtime/src/runtime/component/concurrent_disabled.rs b/crates/wasmtime/src/runtime/component/concurrent_disabled.rs index 4cc35f93f48a..bb727defbf11 100644 --- a/crates/wasmtime/src/runtime/component/concurrent_disabled.rs +++ b/crates/wasmtime/src/runtime/component/concurrent_disabled.rs @@ -1,14 +1,56 @@ use crate::component::func::{LiftContext, LowerContext}; use crate::component::matching::InstanceType; -use crate::component::{ComponentType, Lift, Lower, Val}; -use crate::runtime::vm::VMStore; +use crate::component::store::StoreComponentInstanceId; +use crate::component::{ComponentType, Lift, Lower, RuntimeInstance, Val}; +use crate::store::StoreOpaque; +use alloc::vec::Vec; use anyhow::{Result, anyhow, bail}; use core::convert::Infallible; use core::mem::MaybeUninit; use wasmtime_environ::component::{CanonicalAbiInfo, InterfaceType}; #[derive(Default)] -pub struct ConcurrentState; +pub struct ConcurrentState { + tasks: Vec, +} + +impl ConcurrentState { + fn enter_sync_call( + &mut self, + caller: RuntimeInstance, + _callee_async: bool, + callee: RuntimeInstance, + ) -> Result<()> { + if self.tasks.is_empty() { + // In this case, lazily create the root task since the host->guest + // call will not have done so. + // + // We'll pop this (along with the callee task) in the corresponding + // call to `exit_sync_call`. + self.tasks.push(caller); + } + + if self.may_enter_from_guest(callee) { + self.tasks.push(callee); + Ok(()) + } else { + Err(anyhow!(crate::Trap::CannotEnterComponent)) + } + } + + fn exit_sync_call(&mut self) -> Result<()> { + _ = self.tasks.pop().unwrap(); + if self.tasks.len() == 1 { + // Also pop the lazily-created caller task: + _ = self.tasks.pop().unwrap(); + } + Ok(()) + } + + fn may_enter_from_guest(&self, instance: RuntimeInstance) -> bool { + !self.tasks.contains(&instance) + } +} fn should_have_failed_validation(what: &str) -> Result { // This should be unreachable; if we trap here, it indicates a @@ -19,10 +61,6 @@ fn should_have_failed_validation(what: &str) -> Result { )) } -pub(crate) fn check_blocking(_: &mut dyn VMStore) -> Result<()> { - Ok(()) -} - pub(crate) fn lower_error_context_to_index( _rep: u32, _cx: &mut LowerContext<'_, U>, @@ -154,3 +192,39 @@ unsafe impl Lower for StreamAny { match self.0 {} } } + +impl StoreOpaque { + pub(crate) fn check_blocking(&mut self) -> Result<()> { + Ok(()) + } + + pub(crate) fn enter_sync_call( + &mut self, + caller: RuntimeInstance, + callee_async: bool, + callee: RuntimeInstance, + ) -> Result<()> { + self.concurrent_state_mut() + .enter_sync_call(caller, callee_async, callee) + } + + pub(crate) fn exit_sync_call(&mut self) -> Result<()> { + self.concurrent_state_mut().exit_sync_call() + } + + pub(crate) fn may_enter(&mut self, instance: RuntimeInstance) -> bool { + if self.trapped() { + return false; + } + + let flags = StoreComponentInstanceId::new(self.id(), instance.instance) + .get(self) + .instance_flags(instance.index); + + if unsafe { flags.needs_post_return() } { + return false; + } + + self.concurrent_state_mut().may_enter_from_guest(instance) + } +} diff --git a/crates/wasmtime/src/runtime/component/func.rs b/crates/wasmtime/src/runtime/component/func.rs index 4b2ead464594..1339ce3b6272 100644 --- a/crates/wasmtime/src/runtime/component/func.rs +++ b/crates/wasmtime/src/runtime/component/func.rs @@ -1,3 +1,4 @@ +use crate::component::RuntimeInstance; use crate::component::instance::Instance; use crate::component::matching::InstanceType; use crate::component::storage::storage_as_slice; @@ -581,6 +582,15 @@ impl Func { LowerReturn: Copy, { let export = self.lifted_core_func(store.0); + let (_options, _flags, _ty, raw_options) = self.abi_info(store.0); + let instance = RuntimeInstance { + instance: self.instance.id().instance(), + index: raw_options.instance, + }; + + if !store.0.may_enter(instance) { + bail!(crate::Trap::CannotEnterComponent); + } #[repr(C)] union Union { @@ -755,9 +765,6 @@ impl Func { ); let post_return_arg = post_return_arg.expect("calling post_return on wrong function"); - // This is a sanity-check assert which shouldn't ever trip. - assert!(!flags.may_enter()); - // Unset the "needs post return" flag now that post-return is being // processed. This will cause future invocations of this method to // panic, even if the function call below traps. @@ -769,10 +776,6 @@ impl Func { // If the function actually had a `post-return` configured in its // canonical options that's executed here. - // - // Note that if this traps (returns an error) this function - // intentionally leaves the instance in a "poisoned" state where it - // can no longer be entered because `may_enter` is `false`. if let Some(func) = post_return { crate::Func::call_unchecked_raw( &mut store, @@ -783,9 +786,7 @@ impl Func { } // And finally if everything completed successfully then the "may - // enter" and "may leave" flags are set to `true` again here which - // enables further use of the component. - flags.set_may_enter(true); + // leave" flag is set to `true` again. flags.set_may_leave(true); let (calls, host_table, _, instance) = store @@ -916,20 +917,6 @@ impl Func { let (options_idx, mut flags, ty, options) = self.abi_info(store.0); let async_ = options.async_; - // Test the "may enter" flag which is a "lock" on this instance. - // This is immediately set to `false` afterwards and note that - // there's no on-cleanup setting this flag back to true. That's an - // intentional design aspect where if anything goes wrong internally - // from this point on the instance is considered "poisoned" and can - // never be entered again. The only time this flag is set to `true` - // again is after post-return logic has completed successfully. - unsafe { - if !flags.may_enter() { - bail!(crate::Trap::CannotEnterComponent); - } - flags.set_may_enter(false); - } - // Perform the actual lowering, where while this is running the // component is forbidden from calling imports. unsafe { @@ -947,9 +934,7 @@ impl Func { // post-return call being required as we're about to enter wasm and // afterwards need a post-return. unsafe { - if may_enter && async_ { - flags.set_may_enter(true); - } else { + if !(may_enter && async_) { flags.set_needs_post_return(true); } } diff --git a/crates/wasmtime/src/runtime/component/func/host.rs b/crates/wasmtime/src/runtime/component/func/host.rs index 24aca7e0d9ca..ec34a6e2a0d1 100644 --- a/crates/wasmtime/src/runtime/component/func/host.rs +++ b/crates/wasmtime/src/runtime/component/func/host.rs @@ -1,5 +1,8 @@ //! Implementation of calling Rust-defined functions from components. +#[cfg(feature = "component-model-async")] +use crate::component::RuntimeInstance; +#[cfg(feature = "component-model-async")] use crate::component::concurrent; #[cfg(feature = "component-model-async")] use crate::component::concurrent::{Accessor, Status}; @@ -327,7 +330,7 @@ where // The caller has synchronously lowered an async function, meaning // the caller can only call it from an async task (i.e. a task // created via a call to an async export). Otherwise, we'll trap. - concurrent::check_blocking(store.0)?; + store.0.check_blocking()?; } let mut lift = LiftContext::new(store.0.store_opaque_mut(), options, instance); @@ -338,9 +341,14 @@ where let ret = match self.run(store.as_context_mut(), params) { HostResult::Done(result) => result?, #[cfg(feature = "component-model-async")] - HostResult::Future(future) => { - concurrent::poll_and_block(store.0, future, caller_instance)? - } + HostResult::Future(future) => concurrent::poll_and_block( + store.0, + future, + RuntimeInstance { + instance: instance.id().instance(), + index: caller_instance, + }, + )?, }; let mut lower = LowerContext::new(store, options, instance); diff --git a/crates/wasmtime/src/runtime/component/func/options.rs b/crates/wasmtime/src/runtime/component/func/options.rs index 02a04df69ac0..55df5005cce7 100644 --- a/crates/wasmtime/src/runtime/component/func/options.rs +++ b/crates/wasmtime/src/runtime/component/func/options.rs @@ -2,12 +2,10 @@ use crate::StoreContextMut; use crate::component::concurrent::ConcurrentState; use crate::component::matching::InstanceType; use crate::component::resources::{HostResourceData, HostResourceIndex, HostResourceTables}; -use crate::component::{Instance, ResourceType}; +use crate::component::{Instance, ResourceType, RuntimeInstance}; use crate::prelude::*; use crate::runtime::vm::VMFuncRef; -use crate::runtime::vm::component::{ - CallContexts, ComponentInstance, HandleTable, InstanceFlags, ResourceTables, -}; +use crate::runtime::vm::component::{CallContexts, ComponentInstance, HandleTable, ResourceTables}; use crate::store::{StoreId, StoreOpaque}; use alloc::sync::Arc; use core::pin::Pin; @@ -260,10 +258,10 @@ impl<'a, T: 'static> LowerContext<'a, T> { &mut self, rep: u32, dtor: Option>, - flags: Option, + instance: Option, ) -> Result { self.resource_tables() - .host_resource_lower_own(rep, dtor, flags) + .host_resource_lower_own(rep, dtor, instance) } /// Returns the underlying resource type for the `ty` table specified. @@ -422,10 +420,10 @@ impl<'a> LiftContext<'a> { &mut self, ty: TypeResourceTableIndex, idx: u32, - ) -> Result<(u32, Option>, Option)> { + ) -> Result<(u32, Option>, Option)> { let idx = self.resource_tables().guest_resource_lift_own(idx, ty)?; - let (dtor, flags) = self.instance.dtor_and_flags(ty); - Ok((idx, dtor, flags)) + let (dtor, instance) = self.instance.dtor_and_instance(ty); + Ok((idx, dtor, instance)) } /// Lifts a `borrow` resource from the guest at the `idx` specified. @@ -443,10 +441,10 @@ impl<'a> LiftContext<'a> { &mut self, rep: u32, dtor: Option>, - flags: Option, + instance: Option, ) -> Result { self.resource_tables() - .host_resource_lower_own(rep, dtor, flags) + .host_resource_lower_own(rep, dtor, instance) } /// Lowers a resource into the host-owned table, returning the index it was diff --git a/crates/wasmtime/src/runtime/component/instance.rs b/crates/wasmtime/src/runtime/component/instance.rs index caa75ae503db..41c7f6cd8d07 100644 --- a/crates/wasmtime/src/runtime/component/instance.rs +++ b/crates/wasmtime/src/runtime/component/instance.rs @@ -614,6 +614,9 @@ pub(crate) fn lookup_vmdef( // within that store, so it's safe to create a `Func`. vm::Export::Function(unsafe { crate::Func::from_vm_func_ref(store.id(), funcref) }) } + CoreDef::TaskMayBlock => vm::Export::Global(crate::Global::from_task_may_block( + StoreComponentInstanceId::new(store.id(), id), + )), } } diff --git a/crates/wasmtime/src/runtime/component/mod.rs b/crates/wasmtime/src/runtime/component/mod.rs index 43fb2c7ea6b3..b20eebb04f6c 100644 --- a/crates/wasmtime/src/runtime/component/mod.rs +++ b/crates/wasmtime/src/runtime/component/mod.rs @@ -139,7 +139,7 @@ pub use self::values::Val; pub(crate) use self::instance::RuntimeImport; pub(crate) use self::resources::HostResourceData; -pub(crate) use self::store::ComponentInstanceId; +pub(crate) use self::store::{ComponentInstanceId, RuntimeInstance}; // Re-export wasm_wave crate so the compatible version of this dep doesn't have to be // tracked separately from wasmtime. diff --git a/crates/wasmtime/src/runtime/component/resources/any.rs b/crates/wasmtime/src/runtime/component/resources/any.rs index 279038f3837b..e2e686c38cba 100644 --- a/crates/wasmtime/src/runtime/component/resources/any.rs +++ b/crates/wasmtime/src/runtime/component/resources/any.rs @@ -188,15 +188,9 @@ impl ResourceAny { // Implement the reentrance check required by the canonical ABI. Note // that this happens whether or not a destructor is present. - // - // Note that this should be safe because the raw pointer access in - // `flags` is valid due to `store` being the owner of the flags and - // flags are never destroyed within the store. - if let Some(flags) = slot.flags { - unsafe { - if !flags.may_enter() { - bail!(Trap::CannotEnterComponent); - } + if let Some(instance) = slot.instance { + if !store.0.may_enter(instance) { + bail!(Trap::CannotEnterComponent); } } diff --git a/crates/wasmtime/src/runtime/component/resources/host_tables.rs b/crates/wasmtime/src/runtime/component/resources/host_tables.rs index a5e965688156..3ef7253897f9 100644 --- a/crates/wasmtime/src/runtime/component/resources/host_tables.rs +++ b/crates/wasmtime/src/runtime/component/resources/host_tables.rs @@ -21,9 +21,8 @@ //! the own is removed or otherwise taken out. use crate::prelude::*; -use crate::runtime::vm::component::{ - InstanceFlags, ResourceTables, TypedResource, TypedResourceIndex, -}; +use crate::runtime::component::RuntimeInstance; +use crate::runtime::vm::component::{ResourceTables, TypedResource, TypedResourceIndex}; use crate::runtime::vm::{SendSyncPtr, VMFuncRef}; use crate::store::StoreOpaque; use core::ptr::NonNull; @@ -69,7 +68,7 @@ pub struct HostResourceData { #[derive(Copy, Clone)] pub struct TableSlot { generation: u32, - pub(super) flags: Option, + pub(super) instance: Option, pub(super) dtor: Option>, } @@ -144,16 +143,16 @@ impl<'a> HostResourceTables<'a> { /// will point to the `rep` specified. The returned index is suitable for /// conversion into either [`Resource`] or [`ResourceAny`]. /// - /// The `dtor` and instance `flags` are specified as well to know what + /// The `dtor` and instance `instance` are specified as well to know what /// destructor to run when this resource is destroyed. pub fn host_resource_lower_own( &mut self, rep: u32, dtor: Option>, - flags: Option, + instance: Option, ) -> Result { let idx = self.tables.resource_lower_own(TypedResource::Host(rep))?; - Ok(self.new_host_index(idx, dtor, flags)) + Ok(self.new_host_index(idx, dtor, instance)) } /// See [`HostResourceTables::host_resource_lower_own`]. @@ -208,12 +207,12 @@ impl<'a> HostResourceTables<'a> { &mut self, idx: u32, dtor: Option>, - flags: Option, + instance: Option, ) -> HostResourceIndex { let list = &mut self.host_resource_data.table_slot_metadata; let info = TableSlot { generation: self.host_resource_data.cur_generation, - flags, + instance, dtor: dtor.map(SendSyncPtr::new), }; match list.get_mut(idx as usize) { @@ -225,7 +224,7 @@ impl<'a> HostResourceTables<'a> { assert_eq!(idx, 1); list.push(TableSlot { generation: 0, - flags: None, + instance: None, dtor: None, }); } diff --git a/crates/wasmtime/src/runtime/component/store.rs b/crates/wasmtime/src/runtime/component/store.rs index c24070c7eed2..87576e9d9255 100644 --- a/crates/wasmtime/src/runtime/component/store.rs +++ b/crates/wasmtime/src/runtime/component/store.rs @@ -6,6 +6,7 @@ use crate::store::{StoreData, StoreId, StoreOpaque}; use alloc::vec::Vec; use core::pin::Pin; use wasmtime_environ::PrimaryMap; +use wasmtime_environ::component::RuntimeComponentInstanceIndex; #[derive(Default)] pub struct ComponentStoreData { @@ -16,6 +17,13 @@ pub struct ComponentStoreData { pub struct ComponentInstanceId(u32); wasmtime_environ::entity_impl!(ComponentInstanceId); +/// Represents a (`ComponentInstanceId`, `RuntimeComponentInstanceIndex`) pair. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct RuntimeInstance { + pub instance: ComponentInstanceId, + pub index: RuntimeComponentInstanceIndex, +} + impl StoreData { pub(crate) fn push_component_instance( &mut self, diff --git a/crates/wasmtime/src/runtime/externals/global.rs b/crates/wasmtime/src/runtime/externals/global.rs index 88daa9c32610..23d6775f78b1 100644 --- a/crates/wasmtime/src/runtime/externals/global.rs +++ b/crates/wasmtime/src/runtime/externals/global.rs @@ -338,6 +338,17 @@ impl Global { } } + #[cfg(feature = "component-model")] + pub(crate) fn from_task_may_block( + instance: crate::component::store::StoreComponentInstanceId, + ) -> Global { + Global { + store: instance.store_id(), + instance: instance.instance().as_u32(), + kind: VMGlobalKind::TaskMayBlock, + } + } + pub(crate) fn wasmtime_ty<'a>(&self, store: &'a StoreOpaque) -> &'a wasmtime_environ::Global { self.store.assert_belongs_to(store.id()); match self.kind { @@ -349,7 +360,7 @@ impl Global { } VMGlobalKind::Host(index) => unsafe { &store.host_globals()[index].get().as_ref().ty }, #[cfg(feature = "component-model")] - VMGlobalKind::ComponentFlags(_) => { + VMGlobalKind::ComponentFlags(_) | VMGlobalKind::TaskMayBlock => { const TY: wasmtime_environ::Global = wasmtime_environ::Global { mutability: true, wasm_ty: wasmtime_environ::WasmValType::I32, @@ -367,7 +378,7 @@ impl Global { } VMGlobalKind::Host(_) => None, #[cfg(feature = "component-model")] - VMGlobalKind::ComponentFlags(_) => { + VMGlobalKind::ComponentFlags(_) | VMGlobalKind::TaskMayBlock => { let instance = crate::component::ComponentInstanceId::from_u32(self.instance); Some( VMOpaqueContext::from_vmcomponent(store.component_instance(instance).vmctx()) @@ -414,6 +425,13 @@ impl Global { .instance_flags(index) .as_raw() } + #[cfg(feature = "component-model")] + VMGlobalKind::TaskMayBlock => store + .component_instance(crate::component::ComponentInstanceId::from_u32( + self.instance, + )) + .task_may_block() + .as_raw(), } } } diff --git a/crates/wasmtime/src/runtime/func.rs b/crates/wasmtime/src/runtime/func.rs index 5dc48488bede..5b5da25e073d 100644 --- a/crates/wasmtime/src/runtime/func.rs +++ b/crates/wasmtime/src/runtime/func.rs @@ -1505,14 +1505,20 @@ pub(crate) fn invoke_wasm_and_catch_traps( // stack-allocated `previous_runtime_state`. let mut previous_runtime_state = EntryStoreContext::enter_wasm(store, &mut initial_stack_csi); - if let Err(trap) = store.0.call_hook(CallHook::CallingWasm) { - // `previous_runtime_state` implicitly dropped here - return Err(trap); - } - let result = crate::runtime::vm::catch_traps(store, &mut previous_runtime_state, closure); - core::mem::drop(previous_runtime_state); - store.0.call_hook(CallHook::ReturningFromWasm)?; - result + (|| { + if let Err(trap) = store.0.call_hook(CallHook::CallingWasm) { + // `previous_runtime_state` implicitly dropped here + return Err(trap); + } + let result = crate::runtime::vm::catch_traps(store, &mut previous_runtime_state, closure); + core::mem::drop(previous_runtime_state); + store.0.call_hook(CallHook::ReturningFromWasm)?; + result + })() + .inspect_err(|_| { + #[cfg(feature = "component-model")] + store.0.set_trapped(); + }) } /// This type helps managing the state of the runtime when entering and exiting diff --git a/crates/wasmtime/src/runtime/store.rs b/crates/wasmtime/src/runtime/store.rs index 0ce69fc6c341..d55e942bffbb 100644 --- a/crates/wasmtime/src/runtime/store.rs +++ b/crates/wasmtime/src/runtime/store.rs @@ -545,6 +545,10 @@ pub struct StoreOpaque { #[cfg(feature = "component-model")] concurrent_state: concurrent::ConcurrentState, + /// Whether an instance belonging to this store has trapped. + #[cfg(feature = "component-model")] + trapped: bool, + /// State related to the executor of wasm code. /// /// For example if Pulley is enabled and configured then this will store a @@ -764,6 +768,8 @@ impl Store { wasm_val_raw_storage: Vec::new(), pkey, #[cfg(feature = "component-model")] + trapped: false, + #[cfg(feature = "component-model")] component_host_table: Default::default(), #[cfg(feature = "component-model")] component_calls: Default::default(), @@ -2578,7 +2584,7 @@ at https://bytecodealliance.org/security. &mut self.async_state } - #[cfg(feature = "component-model-async")] + #[cfg(feature = "component-model")] pub(crate) fn concurrent_state_mut(&mut self) -> &mut concurrent::ConcurrentState { &mut self.concurrent_state } @@ -2765,6 +2771,16 @@ at https://bytecodealliance.org/security. pub(crate) fn get_epoch_deadline(&mut self) -> u64 { *self.vm_store_context.epoch_deadline.get_mut() } + + #[cfg(feature = "component-model")] + pub(crate) fn trapped(&self) -> bool { + self.trapped + } + + #[cfg(feature = "component-model")] + pub(crate) fn set_trapped(&mut self) { + self.trapped = true; + } } /// Helper parameter to [`StoreOpaque::allocate_instance`]. diff --git a/crates/wasmtime/src/runtime/vm/component.rs b/crates/wasmtime/src/runtime/vm/component.rs index a75bbb303698..72b49055562e 100644 --- a/crates/wasmtime/src/runtime/vm/component.rs +++ b/crates/wasmtime/src/runtime/vm/component.rs @@ -8,9 +8,9 @@ use crate::component::{Component, Instance, InstancePre, ResourceType, RuntimeImport}; use crate::module::ModuleRegistry; -use crate::runtime::component::ComponentInstanceId; #[cfg(feature = "component-model-async")] use crate::runtime::component::concurrent::ConcurrentInstanceState; +use crate::runtime::component::{ComponentInstanceId, RuntimeInstance}; use crate::runtime::vm::instance::{InstanceLayout, OwnedInstance, OwnedVMContext}; use crate::runtime::vm::vmcontext::VMFunctionBody; use crate::runtime::vm::{ @@ -354,8 +354,8 @@ impl ComponentInstance { InstanceLayout::vmctx(self) } - /// Returns a pointer to the "may leave" flag for this instance specified - /// for canonical lowering and lifting operations. + /// Returns a pointer to the flags for the instance specified for canonical + /// lowering and lifting operations. #[inline] pub fn instance_flags(&self, instance: RuntimeComponentInstanceIndex) -> InstanceFlags { unsafe { @@ -365,6 +365,14 @@ impl ComponentInstance { } } + pub fn task_may_block(&self) -> TaskMayBlock { + unsafe { + let ptr = + self.vmctx_plus_offset_raw::(self.offsets.task_may_block()); + TaskMayBlock(SendSyncPtr::new(ptr)) + } + } + /// Returns the runtime memory definition corresponding to the index of the /// memory provided. /// @@ -700,7 +708,7 @@ impl ComponentInstance { // SAFETY: this is a valid initialization of all globals which are // 32-bit values. unsafe { - *def.as_i32_mut() = FLAG_MAY_ENTER | FLAG_MAY_LEAVE; + *def.as_i32_mut() = FLAG_MAY_LEAVE; self.instance_flags(i).as_raw().write(def); } } @@ -871,27 +879,29 @@ impl ComponentInstance { pub fn instance_state( self: Pin<&mut Self>, instance: RuntimeComponentInstanceIndex, - ) -> Option<&mut InstanceState> { - self.instance_states().0.get_mut(instance) + ) -> &mut InstanceState { + &mut self.instance_states().0[instance] } - /// Returns the destructor and instance flags for the specified resource + /// Returns the destructor and instance for the specified resource /// table type. /// /// This will lookup the origin definition of the `ty` table and return the /// destructor/flags for that. - pub fn dtor_and_flags( + pub fn dtor_and_instance( &self, ty: TypeResourceTableIndex, - ) -> (Option>, Option) { + ) -> (Option>, Option) { let resource = self.component.types()[ty].unwrap_concrete_ty(); let dtor = self.resource_destructor(resource); let component = self.component.env_component(); - let flags = component.defined_resource_index(resource).map(|i| { - let instance = component.defined_resource_instances[i]; - self.instance_flags(instance) - }); - (dtor, flags) + let instance = component + .defined_resource_index(resource) + .map(|i| RuntimeInstance { + instance: self.id(), + index: component.defined_resource_instances[i], + }); + (dtor, instance) } /// Returns the store-local id that points to this component. @@ -1108,38 +1118,42 @@ impl InstanceFlags { } #[inline] - pub unsafe fn may_enter(&self) -> bool { - unsafe { *self.as_raw().as_ref().as_i32() & FLAG_MAY_ENTER != 0 } + pub unsafe fn needs_post_return(&self) -> bool { + unsafe { *self.as_raw().as_ref().as_i32() & FLAG_NEEDS_POST_RETURN != 0 } } #[inline] - pub unsafe fn set_may_enter(&mut self, val: bool) { + pub unsafe fn set_needs_post_return(&mut self, val: bool) { unsafe { if val { - *self.as_raw().as_mut().as_i32_mut() |= FLAG_MAY_ENTER; + *self.as_raw().as_mut().as_i32_mut() |= FLAG_NEEDS_POST_RETURN; } else { - *self.as_raw().as_mut().as_i32_mut() &= !FLAG_MAY_ENTER; + *self.as_raw().as_mut().as_i32_mut() &= !FLAG_NEEDS_POST_RETURN; } } } #[inline] - pub unsafe fn needs_post_return(&self) -> bool { - unsafe { *self.as_raw().as_ref().as_i32() & FLAG_NEEDS_POST_RETURN != 0 } + pub fn as_raw(&self) -> NonNull { + self.0.as_non_null() } +} - #[inline] - pub unsafe fn set_needs_post_return(&mut self, val: bool) { +#[repr(transparent)] +#[derive(Copy, Clone)] +pub struct TaskMayBlock(SendSyncPtr); + +impl TaskMayBlock { + pub unsafe fn get(&self) -> bool { + unsafe { *self.as_raw().as_ref().as_i32() != 0 } + } + + pub unsafe fn set(&mut self, val: bool) { unsafe { - if val { - *self.as_raw().as_mut().as_i32_mut() |= FLAG_NEEDS_POST_RETURN; - } else { - *self.as_raw().as_mut().as_i32_mut() &= !FLAG_NEEDS_POST_RETURN; - } + *self.as_raw().as_mut().as_i32_mut() = if val { 1 } else { 0 }; } } - #[inline] pub fn as_raw(&self) -> NonNull { self.0.as_non_null() } diff --git a/crates/wasmtime/src/runtime/vm/component/libcalls.rs b/crates/wasmtime/src/runtime/vm/component/libcalls.rs index d7f8527c3ef1..580b1a593fd8 100644 --- a/crates/wasmtime/src/runtime/vm/component/libcalls.rs +++ b/crates/wasmtime/src/runtime/vm/component/libcalls.rs @@ -1,11 +1,11 @@ //! Implementation of string transcoding required by the component model. -use crate::component::Instance; #[cfg(feature = "component-model-async")] use crate::component::concurrent::WaitResult; +use crate::component::{Instance, RuntimeInstance}; use crate::prelude::*; #[cfg(feature = "component-model-async")] -use crate::runtime::component::concurrent::{ResourcePair, RuntimeInstance}; +use crate::runtime::component::concurrent::ResourcePair; use crate::runtime::vm::component::{ComponentInstance, VMComponentContext}; use crate::runtime::vm::{HostResultHasUnwindSentinel, VMStore, VmSafe}; use core::cell::Cell; @@ -671,6 +671,30 @@ fn trap(_store: &mut dyn VMStore, _instance: Instance, code: u32) -> Result<()> .into()) } +fn enter_sync_call( + store: &mut dyn VMStore, + instance: Instance, + caller_instance: u32, + callee_async: u32, + callee_instance: u32, +) -> Result<()> { + store.enter_sync_call( + RuntimeInstance { + instance: instance.id().instance(), + index: RuntimeComponentInstanceIndex::from_u32(caller_instance), + }, + callee_async != 0, + RuntimeInstance { + instance: instance.id().instance(), + index: RuntimeComponentInstanceIndex::from_u32(callee_instance), + }, + ) +} + +fn exit_sync_call(store: &mut dyn VMStore, _instance: Instance) -> Result<()> { + store.exit_sync_call() +} + #[cfg(feature = "component-model-async")] fn backpressure_modify( store: &mut dyn VMStore, @@ -971,11 +995,6 @@ fn error_context_transfer( instance.error_context_transfer(store, src_idx, src_table, dst_table) } -#[cfg(feature = "component-model-async")] -fn check_blocking(store: &mut dyn VMStore, _instance: Instance) -> Result<()> { - crate::component::concurrent::check_blocking(store) -} - #[cfg(feature = "component-model-async")] unsafe impl HostResultHasUnwindSentinel for ResourcePair { type Abi = u64; diff --git a/crates/wasmtime/src/runtime/vm/instance.rs b/crates/wasmtime/src/runtime/vm/instance.rs index 38b9df44bb61..a574e38033a5 100644 --- a/crates/wasmtime/src/runtime/vm/instance.rs +++ b/crates/wasmtime/src/runtime/vm/instance.rs @@ -675,6 +675,20 @@ impl Instance { index, ) } + #[cfg(feature = "component-model")] + VMGlobalKind::TaskMayBlock => { + // SAFETY: validity of this `&Instance` means validity of its + // imports meaning we can read the id of the vmctx within. + let id = unsafe { + let vmctx = super::component::VMComponentContext::from_opaque( + import.vmctx.unwrap().as_non_null(), + ); + super::component::ComponentInstance::vmctx_instance_id(vmctx) + }; + crate::Global::from_task_may_block( + crate::component::store::StoreComponentInstanceId::new(store, id), + ) + } } } diff --git a/crates/wasmtime/src/runtime/vm/vmcontext.rs b/crates/wasmtime/src/runtime/vm/vmcontext.rs index 61cbba81c714..c3287979a17b 100644 --- a/crates/wasmtime/src/runtime/vm/vmcontext.rs +++ b/crates/wasmtime/src/runtime/vm/vmcontext.rs @@ -292,6 +292,8 @@ pub enum VMGlobalKind { /// Flags for a component instance, stored in `VMComponentContext`. #[cfg(feature = "component-model")] ComponentFlags(wasmtime_environ::component::RuntimeComponentInstanceIndex), + #[cfg(feature = "component-model")] + TaskMayBlock, } // SAFETY: the above enum is repr(C) and stores nothing else diff --git a/tests/all/component_model/func.rs b/tests/all/component_model/func.rs index 3e3d6f964edc..87e3d120a679 100644 --- a/tests/all/component_model/func.rs +++ b/tests/all/component_model/func.rs @@ -183,8 +183,7 @@ fn integers() -> Result<()> { let engine = super::engine(); let component = Component::new(&engine, component)?; let mut store = Store::new(&engine, ()); - let new_instance = |store: &mut Store<()>| Linker::new(&engine).instantiate(store, &component); - let instance = new_instance(&mut store)?; + let instance = Linker::new(&engine).instantiate(&mut store, &component)?; // Passing in 100 is valid for all primitives instance @@ -213,46 +212,62 @@ fn integers() -> Result<()> { .call_and_post_return(&mut store, (100,))?; // This specific wasm instance traps if any value other than 100 is passed - new_instance(&mut store)? - .get_typed_func::<(u8,), ()>(&mut store, "take-u8")? - .call(&mut store, (101,)) - .unwrap_err() - .downcast::()?; - new_instance(&mut store)? - .get_typed_func::<(i8,), ()>(&mut store, "take-s8")? - .call(&mut store, (101,)) - .unwrap_err() - .downcast::()?; - new_instance(&mut store)? - .get_typed_func::<(u16,), ()>(&mut store, "take-u16")? - .call(&mut store, (101,)) - .unwrap_err() - .downcast::()?; - new_instance(&mut store)? - .get_typed_func::<(i16,), ()>(&mut store, "take-s16")? - .call(&mut store, (101,)) - .unwrap_err() - .downcast::()?; - new_instance(&mut store)? - .get_typed_func::<(u32,), ()>(&mut store, "take-u32")? - .call(&mut store, (101,)) - .unwrap_err() - .downcast::()?; - new_instance(&mut store)? - .get_typed_func::<(i32,), ()>(&mut store, "take-s32")? - .call(&mut store, (101,)) - .unwrap_err() - .downcast::()?; - new_instance(&mut store)? - .get_typed_func::<(u64,), ()>(&mut store, "take-u64")? - .call(&mut store, (101,)) - .unwrap_err() - .downcast::()?; - new_instance(&mut store)? - .get_typed_func::<(i64,), ()>(&mut store, "take-s64")? - .call(&mut store, (101,)) - .unwrap_err() - .downcast::()?; + with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(u8,), ()>(&mut *store, "take-u8")? + .call(store, (101,)) + .unwrap_err() + .downcast::() + })?; + with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(i8,), ()>(&mut *store, "take-s8")? + .call(store, (101,)) + .unwrap_err() + .downcast::() + })?; + with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(u16,), ()>(&mut *store, "take-u16")? + .call(store, (101,)) + .unwrap_err() + .downcast::() + })?; + with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(i16,), ()>(&mut *store, "take-s16")? + .call(store, (101,)) + .unwrap_err() + .downcast::() + })?; + with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(u32,), ()>(&mut *store, "take-u32")? + .call(store, (101,)) + .unwrap_err() + .downcast::() + })?; + with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(i32,), ()>(&mut *store, "take-s32")? + .call(store, (101,)) + .unwrap_err() + .downcast::() + })?; + with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(u64,), ()>(&mut *store, "take-u64")? + .call(store, (101,)) + .unwrap_err() + .downcast::() + })?; + with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(i64,), ()>(&mut *store, "take-s64")? + .call(store, (101,)) + .unwrap_err() + .downcast::() + })?; // Zero can be returned as any integer assert_eq!( @@ -1991,35 +2006,39 @@ fn some_traps() -> Result<()> { let engine = super::engine(); let component = Component::new(&engine, component)?; - let mut store = Store::new(&engine, ()); - let instance = |store: &mut Store<()>| Linker::new(&engine).instantiate(store, &component); // This should fail when calling the allocator function for the argument - let err = instance(&mut store)? - .get_typed_func::<(&[u8],), ()>(&mut store, "take-list-unreachable")? - .call(&mut store, (&[],)) - .unwrap_err() - .downcast::()?; + let err = with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&[u8],), ()>(&mut *store, "take-list-unreachable")? + .call(store, (&[],)) + .unwrap_err() + .downcast::() + })?; assert_eq!(err, Trap::UnreachableCodeReached); // This should fail when calling the allocator function for the argument - let err = instance(&mut store)? - .get_typed_func::<(&str,), ()>(&mut store, "take-string-unreachable")? - .call(&mut store, ("",)) - .unwrap_err() - .downcast::()?; + let err = with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&str,), ()>(&mut *store, "take-string-unreachable")? + .call(store, ("",)) + .unwrap_err() + .downcast::() + })?; assert_eq!(err, Trap::UnreachableCodeReached); // This should fail when calling the allocator function for the space // to store the arguments (before arguments are even lowered) - let err = instance(&mut store)? - .get_typed_func::<(&str, &str, &str, &str, &str, &str, &str, &str, &str, &str), ()>( - &mut store, - "take-many-unreachable", - )? - .call(&mut store, ("", "", "", "", "", "", "", "", "", "")) - .unwrap_err() - .downcast::()?; + let err = with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&str, &str, &str, &str, &str, &str, &str, &str, &str, &str), ()>( + &mut *store, + "take-many-unreachable", + )? + .call(store, ("", "", "", "", "", "", "", "", "", "")) + .unwrap_err() + .downcast::() + })?; assert_eq!(err, Trap::UnreachableCodeReached); // Assert that when the base pointer returned by malloc is out of bounds @@ -2036,87 +2055,115 @@ fn some_traps() -> Result<()> { "{err:?}", ); } - let err = instance(&mut store)? - .get_typed_func::<(&[u8],), ()>(&mut store, "take-list-base-oob")? - .call(&mut store, (&[],)) - .unwrap_err(); + let err = with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&[u8],), ()>(&mut *store, "take-list-base-oob")? + .call(store, (&[],)) + }) + .unwrap_err(); assert_oob(&err); - let err = instance(&mut store)? - .get_typed_func::<(&[u8],), ()>(&mut store, "take-list-base-oob")? - .call(&mut store, (&[1],)) - .unwrap_err(); + let err = with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&[u8],), ()>(&mut *store, "take-list-base-oob")? + .call(store, (&[1],)) + }) + .unwrap_err(); assert_oob(&err); - let err = instance(&mut store)? - .get_typed_func::<(&str,), ()>(&mut store, "take-string-base-oob")? - .call(&mut store, ("",)) - .unwrap_err(); + let err = with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&str,), ()>(&mut *store, "take-string-base-oob")? + .call(store, ("",)) + }) + .unwrap_err(); assert_oob(&err); - let err = instance(&mut store)? - .get_typed_func::<(&str,), ()>(&mut store, "take-string-base-oob")? - .call(&mut store, ("x",)) - .unwrap_err(); + let err = with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&str,), ()>(&mut *store, "take-string-base-oob")? + .call(store, ("x",)) + }) + .unwrap_err(); assert_oob(&err); - let err = instance(&mut store)? - .get_typed_func::<(&str, &str, &str, &str, &str, &str, &str, &str, &str, &str), ()>( - &mut store, - "take-many-base-oob", - )? - .call(&mut store, ("", "", "", "", "", "", "", "", "", "")) - .unwrap_err(); + let err = with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&str, &str, &str, &str, &str, &str, &str, &str, &str, &str), ()>( + &mut *store, + "take-many-base-oob", + )? + .call(store, ("", "", "", "", "", "", "", "", "", "")) + }) + .unwrap_err(); assert_oob(&err); // Test here that when the returned pointer from malloc is one byte from the // end of memory that empty things are fine, but larger things are not. - instance(&mut store)? - .get_typed_func::<(&[u8],), ()>(&mut store, "take-list-end-oob")? - .call_and_post_return(&mut store, (&[],))?; - instance(&mut store)? - .get_typed_func::<(&[u8],), ()>(&mut store, "take-list-end-oob")? - .call_and_post_return(&mut store, (&[1, 2, 3, 4],))?; - let err = instance(&mut store)? - .get_typed_func::<(&[u8],), ()>(&mut store, "take-list-end-oob")? - .call(&mut store, (&[1, 2, 3, 4, 5],)) - .unwrap_err(); + with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&[u8],), ()>(&mut *store, "take-list-end-oob")? + .call_and_post_return(store, (&[],)) + })?; + with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&[u8],), ()>(&mut *store, "take-list-end-oob")? + .call_and_post_return(store, (&[1, 2, 3, 4],)) + })?; + let err = with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&[u8],), ()>(&mut *store, "take-list-end-oob")? + .call(store, (&[1, 2, 3, 4, 5],)) + }) + .unwrap_err(); assert_oob(&err); - instance(&mut store)? - .get_typed_func::<(&str,), ()>(&mut store, "take-string-end-oob")? - .call_and_post_return(&mut store, ("",))?; - instance(&mut store)? - .get_typed_func::<(&str,), ()>(&mut store, "take-string-end-oob")? - .call_and_post_return(&mut store, ("abcd",))?; - let err = instance(&mut store)? - .get_typed_func::<(&str,), ()>(&mut store, "take-string-end-oob")? - .call(&mut store, ("abcde",)) - .unwrap_err(); + with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&str,), ()>(&mut *store, "take-string-end-oob")? + .call_and_post_return(store, ("",)) + })?; + with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&str,), ()>(&mut *store, "take-string-end-oob")? + .call_and_post_return(store, ("abcd",)) + })?; + let err = with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&str,), ()>(&mut *store, "take-string-end-oob")? + .call(store, ("abcde",)) + }) + .unwrap_err(); assert_oob(&err); - let err = instance(&mut store)? - .get_typed_func::<(&str, &str, &str, &str, &str, &str, &str, &str, &str, &str), ()>( - &mut store, - "take-many-end-oob", - )? - .call(&mut store, ("", "", "", "", "", "", "", "", "", "")) - .unwrap_err(); + let err = with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&str, &str, &str, &str, &str, &str, &str, &str, &str, &str), ()>( + &mut *store, + "take-many-end-oob", + )? + .call(store, ("", "", "", "", "", "", "", "", "", "")) + }) + .unwrap_err(); assert_oob(&err); // For this function the first allocation, the space to store all the // arguments, is in-bounds but then all further allocations, such as for // each individual string, are all out of bounds. - let err = instance(&mut store)? - .get_typed_func::<(&str, &str, &str, &str, &str, &str, &str, &str, &str, &str), ()>( - &mut store, - "take-many-second-oob", - )? - .call(&mut store, ("", "", "", "", "", "", "", "", "", "")) - .unwrap_err(); + let err = with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&str, &str, &str, &str, &str, &str, &str, &str, &str, &str), ()>( + &mut *store, + "take-many-second-oob", + )? + .call(store, ("", "", "", "", "", "", "", "", "", "")) + }) + .unwrap_err(); assert_oob(&err); - let err = instance(&mut store)? - .get_typed_func::<(&str, &str, &str, &str, &str, &str, &str, &str, &str, &str), ()>( - &mut store, - "take-many-second-oob", - )? - .call(&mut store, ("", "", "", "", "", "", "", "", "", "x")) - .unwrap_err(); + let err = with_new_instance(&engine, &component, |store, instance| { + instance + .get_typed_func::<(&str, &str, &str, &str, &str, &str, &str, &str, &str, &str), ()>( + &mut *store, + "take-many-second-oob", + )? + .call(store, ("", "", "", "", "", "", "", "", "", "x")) + }) + .unwrap_err(); assert_oob(&err); Ok(()) } @@ -3223,24 +3270,33 @@ fn errors_that_poison_instance() -> Result<()> { let engine = super::engine(); let component = Component::new(&engine, component)?; - let mut store = Store::new(&engine, ()); let linker = Linker::new(&engine); - let instance = linker.instantiate(&mut store, &component)?; - let f1 = instance.get_typed_func::<(), ()>(&mut store, "f1")?; - let f2 = instance.get_typed_func::<(), ()>(&mut store, "f2")?; - assert_unreachable(f1.call(&mut store, ())); - assert_poisoned(f1.call(&mut store, ())); - assert_poisoned(f2.call(&mut store, ())); - let instance = linker.instantiate(&mut store, &component)?; - let f3 = instance.get_typed_func::<(&str,), ()>(&mut store, "f3")?; - assert_unreachable(f3.call(&mut store, ("x",))); - assert_poisoned(f3.call(&mut store, ("x",))); + { + let mut store = Store::new(&engine, ()); + let instance = linker.instantiate(&mut store, &component)?; + let f1 = instance.get_typed_func::<(), ()>(&mut store, "f1")?; + let f2 = instance.get_typed_func::<(), ()>(&mut store, "f2")?; + assert_unreachable(f1.call(&mut store, ())); + assert_poisoned(f1.call(&mut store, ())); + assert_poisoned(f2.call(&mut store, ())); + } - let instance = linker.instantiate(&mut store, &component)?; - let f4 = instance.get_typed_func::<(), (WasmStr,)>(&mut store, "f4")?; - assert!(f4.call(&mut store, ()).is_err()); - assert_poisoned(f4.call(&mut store, ())); + { + let mut store = Store::new(&engine, ()); + let instance = linker.instantiate(&mut store, &component)?; + let f3 = instance.get_typed_func::<(&str,), ()>(&mut store, "f3")?; + assert_unreachable(f3.call(&mut store, ("x",))); + assert_poisoned(f3.call(&mut store, ("x",))); + } + + { + let mut store = Store::new(&engine, ()); + let instance = linker.instantiate(&mut store, &component)?; + let f4 = instance.get_typed_func::<(), (WasmStr,)>(&mut store, "f4")?; + assert!(f4.call(&mut store, ()).is_err()); + assert_poisoned(f4.call(&mut store, ())); + } return Ok(()); @@ -3316,3 +3372,13 @@ fn run_export_with_internal_adapter() -> Result<()> { assert_eq!(run.call(&mut store, ())?, (5,)); Ok(()) } + +fn with_new_instance( + engine: &Engine, + component: &Component, + fun: impl Fn(&mut Store<()>, Instance) -> anyhow::Result, +) -> anyhow::Result { + let mut store = Store::new(engine, ()); + let instance = Linker::new(engine).instantiate(&mut store, component)?; + fun(&mut store, instance) +} diff --git a/tests/all/component_model/import.rs b/tests/all/component_model/import.rs index 6a65dc3a6312..8c9d5e4c0cf7 100644 --- a/tests/all/component_model/import.rs +++ b/tests/all/component_model/import.rs @@ -348,55 +348,63 @@ fn attempt_to_leave_during_malloc() -> Result<()> { Ok(("hello".to_string(),)) })?; let component = Component::new(&engine, component)?; - let mut store = Store::new(&engine, ()); - // Assert that during a host import if we return values to wasm that a trap - // happens if we try to leave the instance. - let trap = linker - .instantiate(&mut store, &component)? - .get_typed_func::<(), ()>(&mut store, "run")? - .call(&mut store, ()) - .unwrap_err(); - assert!( - format!("{trap:?}").contains("cannot leave component instance"), - "bad trap: {trap:?}", - ); + { + let mut store = Store::new(&engine, ()); + + // Assert that during a host import if we return values to wasm that a trap + // happens if we try to leave the instance. + let trap = linker + .instantiate(&mut store, &component)? + .get_typed_func::<(), ()>(&mut store, "run")? + .call(&mut store, ()) + .unwrap_err(); + assert!( + format!("{trap:?}").contains("cannot leave component instance"), + "bad trap: {trap:?}", + ); + + let trace = trap.downcast_ref::().unwrap().frames(); + assert_eq!(trace.len(), 4); + + // This was our entry point... + assert_eq!(trace[3].module().name(), Some("m")); + assert_eq!(trace[3].func_name(), Some("run")); + + // ... which called an imported function which ends up being originally + // defined by the shim instance. The shim instance then does an indirect + // call through a table which goes to the `canon.lower`'d host function + assert_eq!(trace[2].module().name(), Some("host_shim")); + assert_eq!(trace[2].func_name(), Some("shim_ret_string")); + + // ... and the lowered host function will call realloc to allocate space for + // the result + assert_eq!(trace[1].module().name(), Some("m")); + assert_eq!(trace[1].func_name(), Some("realloc")); + + // ... but realloc calls the shim instance and tries to exit the + // component, triggering a dynamic trap + assert_eq!(trace[0].module().name(), Some("host_shim")); + assert_eq!(trace[0].func_name(), Some("shim_thunk")); + } + + { + let mut store = Store::new(&engine, ()); + + // In addition to the above trap also ensure that when we enter a wasm + // component if we try to leave while lowering then that's also a dynamic + // trap. + let trap = linker + .instantiate(&mut store, &component)? + .get_typed_func::<(&str,), ()>(&mut store, "take-string")? + .call(&mut store, ("x",)) + .unwrap_err(); + assert!( + format!("{trap:?}").contains("cannot leave component instance"), + "bad trap: {trap:?}", + ); + } - let trace = trap.downcast_ref::().unwrap().frames(); - assert_eq!(trace.len(), 4); - - // This was our entry point... - assert_eq!(trace[3].module().name(), Some("m")); - assert_eq!(trace[3].func_name(), Some("run")); - - // ... which called an imported function which ends up being originally - // defined by the shim instance. The shim instance then does an indirect - // call through a table which goes to the `canon.lower`'d host function - assert_eq!(trace[2].module().name(), Some("host_shim")); - assert_eq!(trace[2].func_name(), Some("shim_ret_string")); - - // ... and the lowered host function will call realloc to allocate space for - // the result - assert_eq!(trace[1].module().name(), Some("m")); - assert_eq!(trace[1].func_name(), Some("realloc")); - - // ... but realloc calls the shim instance and tries to exit the - // component, triggering a dynamic trap - assert_eq!(trace[0].module().name(), Some("host_shim")); - assert_eq!(trace[0].func_name(), Some("shim_thunk")); - - // In addition to the above trap also ensure that when we enter a wasm - // component if we try to leave while lowering then that's also a dynamic - // trap. - let trap = linker - .instantiate(&mut store, &component)? - .get_typed_func::<(&str,), ()>(&mut store, "take-string")? - .call(&mut store, ("x",)) - .unwrap_err(); - assert!( - format!("{trap:?}").contains("cannot leave component instance"), - "bad trap: {trap:?}", - ); Ok(()) } @@ -1048,28 +1056,34 @@ fn bad_import_alignment() -> Result<()> { -> Result<()> { unreachable!() }, )?; let component = Component::new(&engine, component)?; - let mut store = Store::new(&engine, ()); - let trap = linker - .instantiate(&mut store, &component)? - .get_typed_func::<(), ()>(&mut store, "unaligned-retptr2")? - .call(&mut store, ()) - .unwrap_err(); - assert!( - format!("{trap:?}").contains("pointer not aligned"), - "{}", - trap - ); - let trap = linker - .instantiate(&mut store, &component)? - .get_typed_func::<(), ()>(&mut store, "unaligned-argptr2")? - .call(&mut store, ()) - .unwrap_err(); - assert!( - format!("{trap:?}").contains("pointer not aligned"), - "{}", - trap - ); + { + let mut store = Store::new(&engine, ()); + let trap = linker + .instantiate(&mut store, &component)? + .get_typed_func::<(), ()>(&mut store, "unaligned-retptr2")? + .call(&mut store, ()) + .unwrap_err(); + assert!( + format!("{trap:?}").contains("pointer not aligned"), + "{}", + trap + ); + } + + { + let mut store = Store::new(&engine, ()); + let trap = linker + .instantiate(&mut store, &component)? + .get_typed_func::<(), ()>(&mut store, "unaligned-argptr2")? + .call(&mut store, ()) + .unwrap_err(); + assert!( + format!("{trap:?}").contains("pointer not aligned"), + "{}", + trap + ); + } Ok(()) } diff --git a/tests/all/component_model/strings.rs b/tests/all/component_model/strings.rs index a33dd960d205..66d65681cb7b 100644 --- a/tests/all/component_model/strings.rs +++ b/tests/all/component_model/strings.rs @@ -314,10 +314,10 @@ fn test_ptr_overflow(engine: &Engine, src: &str, dst: &str) -> Result<()> { ); let component = Component::new(engine, &component)?; - let mut store = Store::new(engine, ()); - let mut test_overflow = |size: u32| -> Result<()> { + let test_overflow = |size: u32| -> Result<()> { println!("src={src} dst={dst} size={size:#x}"); + let mut store = Store::new(engine, ()); let instance = Linker::new(engine).instantiate(&mut store, &component)?; let func = instance.get_typed_func::<(u32,), ()>(&mut store, "f")?; let trap = func diff --git a/tests/all/pooling_allocator.rs b/tests/all/pooling_allocator.rs index fddd26bf64e8..4cf4a098d98f 100644 --- a/tests/all/pooling_allocator.rs +++ b/tests/all/pooling_allocator.rs @@ -859,7 +859,7 @@ fn component_instance_size_limit() -> Result<()> { match wasmtime::component::Component::new(&engine, "(component)") { Ok(_) => panic!("should have hit limit"), Err(e) => e.assert_contains( - "instance allocation for this component requires 48 bytes of \ + "instance allocation for this component requires 56 bytes of \ `VMComponentContext` space which exceeds the configured maximum of 1 bytes", ), } diff --git a/tests/disas/component-model/direct-adapter-calls-inlining.wat b/tests/disas/component-model/direct-adapter-calls-inlining.wat index 01717f76aee7..c236ca2707ec 100644 --- a/tests/disas/component-model/direct-adapter-calls-inlining.wat +++ b/tests/disas/component-model/direct-adapter-calls-inlining.wat @@ -64,10 +64,11 @@ ;; gv6 = load.i64 notrap aligned gv5+16 ;; gv7 = vmctx ;; gv8 = load.i64 notrap aligned readonly can_move gv7+120 -;; gv9 = load.i64 notrap aligned readonly can_move gv7+96 -;; gv10 = vmctx -;; gv11 = load.i64 notrap aligned readonly gv10+8 -;; gv12 = load.i64 notrap aligned gv11+16 +;; gv9 = load.i64 notrap aligned readonly can_move gv7+144 +;; gv10 = load.i64 notrap aligned readonly can_move gv7+96 +;; gv11 = vmctx +;; gv12 = load.i64 notrap aligned readonly gv11+8 +;; gv13 = load.i64 notrap aligned gv12+16 ;; sig0 = (i64 vmctx, i64, i32) -> i32 tail ;; sig1 = (i64 vmctx, i64, i32) tail ;; sig2 = (i64 vmctx, i64, i32) -> i32 tail @@ -90,68 +91,51 @@ ;; brif v18, block4, block5 ;; ;; block4: -;; v93 = load.i64 notrap aligned readonly can_move v5+72 -;; v94 = load.i64 notrap aligned readonly can_move v5+88 +;; v21 = load.i64 notrap aligned readonly can_move v5+72 +;; v20 = load.i64 notrap aligned readonly can_move v5+88 ;; v19 = iconst.i32 24 -;; call_indirect sig1, v93(v94, v5, v19) ; v19 = 24 +;; call_indirect sig1, v21(v20, v5, v19) ; v19 = 24 ;; trap user11 ;; ;; block5: -;; v22 = load.i64 notrap aligned readonly can_move v5+96 +;; v22 = load.i64 notrap aligned readonly can_move v5+144 ;; v23 = load.i32 notrap aligned table v22 -;; v24 = iconst.i32 2 -;; v25 = band v23, v24 ; v24 = 2 -;; v85 = iconst.i32 0 -;; v86 = icmp eq v25, v85 ; v85 = 0 -;; v28 = uextend.i32 v86 -;; brif v28, block6, block7 +;; v61 = iconst.i32 0 +;; store notrap aligned table v61, v22 ; v61 = 0 +;; v26 = load.i64 notrap aligned readonly can_move v5+96 +;; v27 = load.i32 notrap aligned table v26 +;; v28 = iconst.i32 -2 +;; v29 = band v27, v28 ; v28 = -2 +;; store notrap aligned table v29, v26 +;; v62 = iconst.i32 1 +;; v63 = bor v27, v62 ; v62 = 1 +;; store notrap aligned table v63, v26 +;; jump block6 ;; ;; block6: -;; v21 = load.i64 notrap aligned readonly can_move v5+72 -;; v20 = load.i64 notrap aligned readonly can_move v5+88 -;; v29 = iconst.i32 18 -;; call_indirect sig1, v21(v20, v5, v29) ; v29 = 18 -;; trap user11 +;; jump block7 ;; ;; block7: -;; v34 = iconst.i32 -3 -;; v35 = band.i32 v23, v34 ; v34 = -3 -;; store notrap aligned table v35, v22 -;; v66 = iconst.i32 -4 -;; v72 = band.i32 v23, v66 ; v66 = -4 -;; store notrap aligned table v72, v22 -;; v87 = iconst.i32 1 -;; v88 = bor v35, v87 ; v87 = 1 -;; store notrap aligned table v88, v22 ;; jump block8 ;; ;; block8: -;; jump block9 -;; -;; block9: -;; jump block10 -;; -;; block10: -;; v51 = load.i32 notrap aligned table v12 -;; v39 = iconst.i32 -2 -;; v53 = band v51, v39 ; v39 = -2 -;; store notrap aligned table v53, v12 -;; v89 = iconst.i32 1 -;; v90 = bor v51, v89 ; v89 = 1 -;; store notrap aligned table v90, v12 -;; v61 = load.i32 notrap aligned table v22 -;; v91 = iconst.i32 2 -;; v92 = bor v61, v91 ; v91 = 2 -;; store notrap aligned table v92, v22 +;; v40 = load.i32 notrap aligned table v12 +;; v64 = iconst.i32 -2 +;; v65 = band v40, v64 ; v64 = -2 +;; store notrap aligned table v65, v12 +;; v66 = iconst.i32 1 +;; v67 = bor v40, v66 ; v66 = 1 +;; store notrap aligned table v67, v12 +;; store.i32 notrap aligned table v23, v22 ;; jump block3 ;; ;; block3: -;; jump block11 +;; jump block9 ;; -;; block11: +;; block9: ;; @00f0 jump block1 ;; ;; block1: -;; v76 = iconst.i32 1276 -;; @00f0 return v76 ; v76 = 1276 +;; v52 = iconst.i32 1276 +;; @00f0 return v52 ; v52 = 1276 ;; } diff --git a/tests/disas/component-model/direct-adapter-calls-x64.wat b/tests/disas/component-model/direct-adapter-calls-x64.wat index 36096bbf26c7..773cc4425353 100644 --- a/tests/disas/component-model/direct-adapter-calls-x64.wat +++ b/tests/disas/component-model/direct-adapter-calls-x64.wat @@ -85,52 +85,49 @@ ;; movq %rsp, %rbp ;; movq 8(%rdi), %r10 ;; movq 0x10(%r10), %r10 -;; addq $0x20, %r10 +;; addq $0x30, %r10 ;; cmpq %rsp, %r10 -;; ja 0x120 -;; 79: subq $0x10, %rsp +;; ja 0x113 +;; 79: subq $0x20, %rsp ;; movq %rbx, (%rsp) -;; movq %r13, 8(%rsp) -;; movq 0x78(%rdi), %rbx -;; movl (%rbx), %eax -;; testl $1, %eax -;; je 0x10b -;; 97: movq 0x60(%rdi), %r13 -;; movq %rdi, %rsi -;; movl (%r13), %eax -;; testl $2, %eax -;; je 0xf9 -;; ad: movq %rax, %r8 -;; andl $0xfffffffd, %r8d -;; movl %r8d, (%r13) -;; andl $0xfffffffc, %eax -;; movl %eax, (%r13) -;; orl $1, %r8d -;; movl %r8d, (%r13) -;; movq 0x40(%rsi), %rdi +;; movq %r12, 8(%rsp) +;; movq %r14, 0x10(%rsp) +;; movq 0x78(%rdi), %r14 +;; movl (%r14), %esi +;; testl $1, %esi +;; je 0xff +;; 9e: movq 0x90(%rdi), %rbx +;; movl (%rbx), %r12d +;; movl $0, (%rbx) +;; movq 0x60(%rdi), %rcx +;; movq %rdi, %r9 +;; movl (%rcx), %eax +;; movq %rax, %r8 +;; andl $0xfffffffe, %r8d +;; movl %r8d, (%rcx) +;; orl $1, %eax +;; movl %eax, (%rcx) +;; movq 0x40(%r9), %rdi +;; movq %r9, %rsi ;; callq 0 -;; movl (%rbx), %r10d -;; movq %r10, %rsi -;; andl $0xfffffffe, %esi -;; movl %esi, (%rbx) -;; orl $1, %r10d -;; movl %r10d, (%rbx) -;; orl $2, (%r13) +;; movl (%r14), %ecx +;; movq %rcx, %r8 +;; andl $0xfffffffe, %r8d +;; movl %r8d, (%r14) +;; orl $1, %ecx +;; movl %ecx, (%r14) +;; movl %r12d, (%rbx) ;; movq (%rsp), %rbx -;; movq 8(%rsp), %r13 -;; addq $0x10, %rsp +;; movq 8(%rsp), %r12 +;; movq 0x10(%rsp), %r14 +;; addq $0x20, %rsp ;; movq %rbp, %rsp ;; popq %rbp ;; retq -;; f9: movq 0x48(%rsi), %r8 -;; fd: movq 0x58(%rsi), %rdi -;; 101: movl $0x12, %edx -;; 106: callq *%r8 -;; 109: ud2 -;; 10b: movq %rdi, %rsi -;; 10e: movq 0x48(%rsi), %r10 -;; 112: movq 0x58(%rsi), %rdi -;; 116: movl $0x18, %edx -;; 11b: callq *%r10 -;; 11e: ud2 -;; 120: ud2 +;; ff: movq %rdi, %rsi +;; 102: movq 0x48(%rsi), %rax +;; 106: movq 0x58(%rsi), %rdi +;; 10a: movl $0x18, %edx +;; 10f: callq *%rax +;; 111: ud2 +;; 113: ud2 diff --git a/tests/disas/component-model/direct-adapter-calls.wat b/tests/disas/component-model/direct-adapter-calls.wat index 2d953043117e..6a74df662a60 100644 --- a/tests/disas/component-model/direct-adapter-calls.wat +++ b/tests/disas/component-model/direct-adapter-calls.wat @@ -97,70 +97,53 @@ ;; gv2 = load.i64 notrap aligned gv1+16 ;; gv3 = vmctx ;; gv4 = load.i64 notrap aligned readonly can_move gv3+120 -;; gv5 = load.i64 notrap aligned readonly can_move gv3+96 +;; gv5 = load.i64 notrap aligned readonly can_move gv3+144 +;; gv6 = load.i64 notrap aligned readonly can_move gv3+96 ;; sig0 = (i64 vmctx, i64, i32) tail ;; sig1 = (i64 vmctx, i64, i32) -> i32 tail ;; fn0 = colocated u0:0 sig1 ;; stack_limit = gv2 ;; ;; block0(v0: i64, v1: i64, v2: i32): -;; @0077 v5 = load.i64 notrap aligned readonly can_move v0+120 -;; @0077 v6 = load.i32 notrap aligned table v5 -;; @0079 v7 = iconst.i32 1 -;; @007b v8 = band v6, v7 ; v7 = 1 -;; @0075 v4 = iconst.i32 0 -;; @007c v9 = icmp eq v8, v4 ; v4 = 0 -;; @007c v10 = uextend.i32 v9 -;; @007d brif v10, block2, block3 +;; @0092 v5 = load.i64 notrap aligned readonly can_move v0+120 +;; @0092 v6 = load.i32 notrap aligned table v5 +;; @0094 v7 = iconst.i32 1 +;; @0096 v8 = band v6, v7 ; v7 = 1 +;; @0090 v4 = iconst.i32 0 +;; @0097 v9 = icmp eq v8, v4 ; v4 = 0 +;; @0097 v10 = uextend.i32 v9 +;; @0098 brif v10, block2, block3 ;; ;; block2: -;; v94 = load.i64 notrap aligned readonly can_move v0+72 -;; v95 = load.i64 notrap aligned readonly can_move v0+88 -;; @007f v11 = iconst.i32 24 -;; @0081 call_indirect sig0, v94(v95, v0, v11) ; v11 = 24 -;; @0083 trap user11 +;; @009c v14 = load.i64 notrap aligned readonly can_move v0+72 +;; @009c v13 = load.i64 notrap aligned readonly can_move v0+88 +;; @009a v11 = iconst.i32 24 +;; @009c call_indirect sig0, v14(v13, v0, v11) ; v11 = 24 +;; @009e trap user11 ;; ;; block3: -;; @0085 v15 = load.i64 notrap aligned readonly can_move v0+96 -;; @0085 v16 = load.i32 notrap aligned table v15 -;; @0087 v17 = iconst.i32 2 -;; @0089 v18 = band v16, v17 ; v17 = 2 -;; v87 = iconst.i32 0 -;; v88 = icmp eq v18, v87 ; v87 = 0 -;; @008a v20 = uextend.i32 v88 -;; @008b brif v20, block4, block5 -;; -;; block4: -;; @0081 v14 = load.i64 notrap aligned readonly can_move v0+72 -;; @0081 v13 = load.i64 notrap aligned readonly can_move v0+88 -;; @008d v21 = iconst.i32 18 -;; @008f call_indirect sig0, v14(v13, v0, v21) ; v21 = 18 -;; @0091 trap user11 -;; -;; block5: -;; @0095 v27 = iconst.i32 -3 -;; @0097 v28 = band.i32 v16, v27 ; v27 = -3 -;; @0098 store notrap aligned table v28, v15 -;; v75 = iconst.i32 -4 -;; v81 = band.i32 v16, v75 ; v75 = -4 -;; @009f store notrap aligned table v81, v15 -;; v89 = iconst.i32 1 -;; v90 = bor v28, v89 ; v89 = 1 -;; @00a8 store notrap aligned table v90, v15 -;; @00aa v41 = load.i64 notrap aligned readonly can_move v0+64 -;; @00aa v42 = call fn0(v41, v0, v2) -;; @00ae v44 = load.i32 notrap aligned table v5 -;; @009c v32 = iconst.i32 -2 -;; @00b2 v46 = band v44, v32 ; v32 = -2 -;; @00b3 store notrap aligned table v46, v5 -;; v91 = bor v44, v89 ; v89 = 1 -;; @00bc store notrap aligned table v91, v5 -;; @00be v54 = load.i32 notrap aligned table v15 -;; v92 = iconst.i32 2 -;; v93 = bor v54, v92 ; v92 = 2 -;; @00c3 store notrap aligned table v93, v15 -;; @00c5 jump block1 +;; @00a0 v15 = load.i64 notrap aligned readonly can_move v0+144 +;; @00a0 v16 = load.i32 notrap aligned table v15 +;; v60 = iconst.i32 0 +;; @00a6 store notrap aligned table v60, v15 ; v60 = 0 +;; @00a8 v19 = load.i64 notrap aligned readonly can_move v0+96 +;; @00a8 v20 = load.i32 notrap aligned table v19 +;; @00aa v21 = iconst.i32 -2 +;; @00ac v22 = band v20, v21 ; v21 = -2 +;; @00ad store notrap aligned table v22, v19 +;; v61 = iconst.i32 1 +;; v62 = bor v20, v61 ; v61 = 1 +;; @00b6 store notrap aligned table v62, v19 +;; @00b8 v30 = load.i64 notrap aligned readonly can_move v0+64 +;; @00b8 v31 = call fn0(v30, v0, v2) +;; @00bc v33 = load.i32 notrap aligned table v5 +;; @00c0 v35 = band v33, v21 ; v21 = -2 +;; @00c1 store notrap aligned table v35, v5 +;; v63 = bor v33, v61 ; v61 = 1 +;; @00ca store notrap aligned table v63, v5 +;; @00ce store notrap aligned table v16, v15 +;; @00d0 jump block1 ;; ;; block1: -;; @00c5 return v42 +;; @00d0 return v31 ;; } diff --git a/tests/misc_testsuite/component-model/async/future-read.wast b/tests/misc_testsuite/component-model/async/future-read.wast index 934f909590e4..ec80f85796b7 100644 --- a/tests/misc_testsuite/component-model/async/future-read.wast +++ b/tests/misc_testsuite/component-model/async/future-read.wast @@ -25,7 +25,7 @@ (export "read" (func $read)) )) )) - (func (export "run") (param "x" $future) + (func (export "run") async (param "x" $future) (canon lift (core func $i "run"))) ) (instance $child (instantiate $child)) diff --git a/tests/misc_testsuite/component-model/async/reenter-during-yield.wast b/tests/misc_testsuite/component-model/async/reenter-during-yield.wast new file mode 100644 index 000000000000..61fcb12ae107 --- /dev/null +++ b/tests/misc_testsuite/component-model/async/reenter-during-yield.wast @@ -0,0 +1,82 @@ +;;! component_model_async = true +;;! reference_types = true +;;! gc_types = true +;;! multi_memory = true + +(component + (component $a + (core module $a + (import "" "yield" (func $yield (result i32))) + + (func (export "yield-loop") (result i32) + ;; simulate `waitable-set.poll` with a yield loop + (loop + call $yield + drop + br 0 + ) + unreachable + ) + + ;; not reached + (func (export "callback") (param i32 i32 i32) (result i32) unreachable) + + (func (export "noop")) + ) + (core func $yield (canon thread.yield)) + (core instance $a (instantiate $a + (with "" (instance + (export "yield" (func $yield)) + )) + )) + (func (export "yield-loop") async + (canon lift + (core func $a "yield-loop") + async + (callback (func $a "callback")) + ) + ) + (func (export "noop") (canon lift (core func $a "noop"))) + ) + (instance $a (instantiate $a)) + + (component $b + (import "yield-loop" (func $yield-loop async)) + (import "noop" (func $noop)) + + (core func $yield-loop (canon lower (func $yield-loop) async)) + (core func $noop (canon lower (func $noop))) + + (core module $b + (import "" "yield-loop" (func $yield-loop (result i32))) + (import "" "noop" (func $noop)) + + (func (export "run") + ;; call `yield-loop`, double-check it's in the "started" state. + call $yield-loop + i32.const 0xf + i32.and + i32.const 1 + i32.ne + if unreachable end + + ;; now try to reenter the other component with some other function. + call $noop + ) + ) + (core instance $b (instantiate $b + (with "" (instance + (export "yield-loop" (func $yield-loop)) + (export "noop" (func $noop)) + )) + )) + (func (export "run") async (canon lift (core func $b "run"))) + ) + (instance $b (instantiate $b + (with "yield-loop" (func $a "yield-loop")) + (with "noop" (func $a "noop")) + )) + (export "run" (func $b "run")) +) + +(assert_return (invoke "run")) diff --git a/tests/misc_testsuite/component-model/async/trap-if-done.wast b/tests/misc_testsuite/component-model/async/trap-if-done.wast index d4220cbc6300..742edac1feef 100644 --- a/tests/misc_testsuite/component-model/async/trap-if-done.wast +++ b/tests/misc_testsuite/component-model/async/trap-if-done.wast @@ -122,22 +122,22 @@ )))) (func (export "start-future") (result (future u8)) (canon lift (core func $cm "start-future"))) (func (export "future-write") (result u32) (canon lift (core func $cm "future-write"))) - (func (export "acknowledge-future-write") (canon lift (core func $cm "acknowledge-future-write"))) + (func (export "acknowledge-future-write") async (canon lift (core func $cm "acknowledge-future-write"))) (func (export "future-drop-writable") (canon lift (core func $cm "future-drop-writable"))) (func (export "start-stream") (result (stream u8)) (canon lift (core func $cm "start-stream"))) (func (export "stream-write") (result u32) (canon lift (core func $cm "stream-write"))) - (func (export "acknowledge-stream-write") (canon lift (core func $cm "acknowledge-stream-write"))) + (func (export "acknowledge-stream-write") async (canon lift (core func $cm "acknowledge-stream-write"))) (func (export "stream-drop-writable") (canon lift (core func $cm "stream-drop-writable"))) ) (component $D (import "c" (instance $c (export "start-future" (func (result (future u8)))) (export "future-write" (func (result u32))) - (export "acknowledge-future-write" (func)) + (export "acknowledge-future-write" (func async)) (export "future-drop-writable" (func)) (export "start-stream" (func (result (stream u8)))) (export "stream-write" (func (result u32))) - (export "acknowledge-stream-write" (func)) + (export "acknowledge-stream-write" (func async)) (export "stream-drop-writable" (func)) ))