diff --git a/crates/component-macro/tests/expanded/char_concurrent.rs b/crates/component-macro/tests/expanded/char_concurrent.rs index 18b9d225a13d..17e218da4d1f 100644 --- a/crates/component-macro/tests/expanded/char_concurrent.rs +++ b/crates/component-macro/tests/expanded/char_concurrent.rs @@ -218,7 +218,7 @@ pub mod foo { "take-char", move |caller: &wasmtime::component::Accessor, (arg0,): (char,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::take_char(accessor, arg0) .await; Ok(r) @@ -229,7 +229,7 @@ pub mod foo { "return-char", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::return_char(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/conventions_concurrent.rs b/crates/component-macro/tests/expanded/conventions_concurrent.rs index 9c4e16e970d3..34ad59182a8c 100644 --- a/crates/component-macro/tests/expanded/conventions_concurrent.rs +++ b/crates/component-macro/tests/expanded/conventions_concurrent.rs @@ -286,7 +286,7 @@ pub mod foo { "kebab-case", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::kebab_case(accessor).await; Ok(r) }) @@ -299,7 +299,7 @@ pub mod foo { (arg0,): (LudicrousSpeed,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor, arg0).await; Ok(r) }) @@ -309,7 +309,7 @@ pub mod foo { "function-with-dashes", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::function_with_dashes(accessor) .await; Ok(r) @@ -320,7 +320,7 @@ pub mod foo { "function-with-no-weird-characters", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::function_with_no_weird_characters( accessor, ) @@ -333,7 +333,7 @@ pub mod foo { "apple", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::apple(accessor).await; Ok(r) }) @@ -343,7 +343,7 @@ pub mod foo { "apple-pear", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::apple_pear(accessor).await; Ok(r) }) @@ -353,7 +353,7 @@ pub mod foo { "apple-pear-grape", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::apple_pear_grape(accessor) .await; Ok(r) @@ -364,7 +364,7 @@ pub mod foo { "a0", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a0(accessor).await; Ok(r) }) @@ -374,7 +374,7 @@ pub mod foo { "is-XML", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::is_xml(accessor).await; Ok(r) }) @@ -384,7 +384,7 @@ pub mod foo { "explicit", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::explicit(accessor).await; Ok(r) }) @@ -394,7 +394,7 @@ pub mod foo { "explicit-kebab", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::explicit_kebab(accessor).await; Ok(r) }) @@ -404,7 +404,7 @@ pub mod foo { "bool", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::bool(accessor).await; Ok(r) }) diff --git a/crates/component-macro/tests/expanded/dead-code_concurrent.rs b/crates/component-macro/tests/expanded/dead-code_concurrent.rs index 48a3e7be18e3..8e862699aa82 100644 --- a/crates/component-macro/tests/expanded/dead-code_concurrent.rs +++ b/crates/component-macro/tests/expanded/dead-code_concurrent.rs @@ -228,7 +228,7 @@ pub mod a { "f", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/direct-import_concurrent.rs b/crates/component-macro/tests/expanded/direct-import_concurrent.rs index af13e65bc0b8..6cb2a3c72bdd 100644 --- a/crates/component-macro/tests/expanded/direct-import_concurrent.rs +++ b/crates/component-macro/tests/expanded/direct-import_concurrent.rs @@ -184,7 +184,7 @@ const _: () = { "foo", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor).await; Ok(r) }) diff --git a/crates/component-macro/tests/expanded/flags_concurrent.rs b/crates/component-macro/tests/expanded/flags_concurrent.rs index dc33af21457b..361019023b8a 100644 --- a/crates/component-macro/tests/expanded/flags_concurrent.rs +++ b/crates/component-macro/tests/expanded/flags_concurrent.rs @@ -351,7 +351,7 @@ pub mod foo { "roundtrip-flag1", move |caller: &wasmtime::component::Accessor, (arg0,): (Flag1,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::roundtrip_flag1(accessor, arg0) .await; Ok((r,)) @@ -362,7 +362,7 @@ pub mod foo { "roundtrip-flag2", move |caller: &wasmtime::component::Accessor, (arg0,): (Flag2,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::roundtrip_flag2(accessor, arg0) .await; Ok((r,)) @@ -373,7 +373,7 @@ pub mod foo { "roundtrip-flag4", move |caller: &wasmtime::component::Accessor, (arg0,): (Flag4,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::roundtrip_flag4(accessor, arg0) .await; Ok((r,)) @@ -384,7 +384,7 @@ pub mod foo { "roundtrip-flag8", move |caller: &wasmtime::component::Accessor, (arg0,): (Flag8,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::roundtrip_flag8(accessor, arg0) .await; Ok((r,)) @@ -395,7 +395,7 @@ pub mod foo { "roundtrip-flag16", move |caller: &wasmtime::component::Accessor, (arg0,): (Flag16,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::roundtrip_flag16( accessor, arg0, @@ -409,7 +409,7 @@ pub mod foo { "roundtrip-flag32", move |caller: &wasmtime::component::Accessor, (arg0,): (Flag32,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::roundtrip_flag32( accessor, arg0, @@ -423,7 +423,7 @@ pub mod foo { "roundtrip-flag64", move |caller: &wasmtime::component::Accessor, (arg0,): (Flag64,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::roundtrip_flag64( accessor, arg0, diff --git a/crates/component-macro/tests/expanded/floats_concurrent.rs b/crates/component-macro/tests/expanded/floats_concurrent.rs index 4074956a70b7..48b41f84fb8d 100644 --- a/crates/component-macro/tests/expanded/floats_concurrent.rs +++ b/crates/component-macro/tests/expanded/floats_concurrent.rs @@ -225,7 +225,7 @@ pub mod foo { "f32-param", move |caller: &wasmtime::component::Accessor, (arg0,): (f32,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f32_param(accessor, arg0) .await; Ok(r) @@ -236,7 +236,7 @@ pub mod foo { "f64-param", move |caller: &wasmtime::component::Accessor, (arg0,): (f64,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f64_param(accessor, arg0) .await; Ok(r) @@ -247,7 +247,7 @@ pub mod foo { "f32-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f32_result(accessor).await; Ok((r,)) }) @@ -257,7 +257,7 @@ pub mod foo { "f64-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f64_result(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/host-world_concurrent.rs b/crates/component-macro/tests/expanded/host-world_concurrent.rs index 4e01b30ad8f6..2b6fcd675bfa 100644 --- a/crates/component-macro/tests/expanded/host-world_concurrent.rs +++ b/crates/component-macro/tests/expanded/host-world_concurrent.rs @@ -184,7 +184,7 @@ const _: () = { "foo", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor).await; Ok(r) }) diff --git a/crates/component-macro/tests/expanded/integers_concurrent.rs b/crates/component-macro/tests/expanded/integers_concurrent.rs index b4013def35d3..22bcecf1377f 100644 --- a/crates/component-macro/tests/expanded/integers_concurrent.rs +++ b/crates/component-macro/tests/expanded/integers_concurrent.rs @@ -281,7 +281,7 @@ pub mod foo { "a1", move |caller: &wasmtime::component::Accessor, (arg0,): (u8,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a1(accessor, arg0).await; Ok(r) }) @@ -291,7 +291,7 @@ pub mod foo { "a2", move |caller: &wasmtime::component::Accessor, (arg0,): (i8,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a2(accessor, arg0).await; Ok(r) }) @@ -301,7 +301,7 @@ pub mod foo { "a3", move |caller: &wasmtime::component::Accessor, (arg0,): (u16,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a3(accessor, arg0).await; Ok(r) }) @@ -311,7 +311,7 @@ pub mod foo { "a4", move |caller: &wasmtime::component::Accessor, (arg0,): (i16,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a4(accessor, arg0).await; Ok(r) }) @@ -321,7 +321,7 @@ pub mod foo { "a5", move |caller: &wasmtime::component::Accessor, (arg0,): (u32,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a5(accessor, arg0).await; Ok(r) }) @@ -331,7 +331,7 @@ pub mod foo { "a6", move |caller: &wasmtime::component::Accessor, (arg0,): (i32,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a6(accessor, arg0).await; Ok(r) }) @@ -341,7 +341,7 @@ pub mod foo { "a7", move |caller: &wasmtime::component::Accessor, (arg0,): (u64,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a7(accessor, arg0).await; Ok(r) }) @@ -351,7 +351,7 @@ pub mod foo { "a8", move |caller: &wasmtime::component::Accessor, (arg0,): (i64,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a8(accessor, arg0).await; Ok(r) }) @@ -373,7 +373,7 @@ pub mod foo { ): (u8, i8, u16, i16, u32, i32, u64, i64)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a9( accessor, arg0, @@ -394,7 +394,7 @@ pub mod foo { "r1", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::r1(accessor).await; Ok((r,)) }) @@ -404,7 +404,7 @@ pub mod foo { "r2", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::r2(accessor).await; Ok((r,)) }) @@ -414,7 +414,7 @@ pub mod foo { "r3", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::r3(accessor).await; Ok((r,)) }) @@ -424,7 +424,7 @@ pub mod foo { "r4", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::r4(accessor).await; Ok((r,)) }) @@ -434,7 +434,7 @@ pub mod foo { "r5", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::r5(accessor).await; Ok((r,)) }) @@ -444,7 +444,7 @@ pub mod foo { "r6", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::r6(accessor).await; Ok((r,)) }) @@ -454,7 +454,7 @@ pub mod foo { "r7", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::r7(accessor).await; Ok((r,)) }) @@ -464,7 +464,7 @@ pub mod foo { "r8", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::r8(accessor).await; Ok((r,)) }) @@ -474,7 +474,7 @@ pub mod foo { "pair-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::pair_ret(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/lists_concurrent.rs b/crates/component-macro/tests/expanded/lists_concurrent.rs index 11324e1d83ad..d7b6d2ec5bc2 100644 --- a/crates/component-macro/tests/expanded/lists_concurrent.rs +++ b/crates/component-macro/tests/expanded/lists_concurrent.rs @@ -543,7 +543,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_u8_param(accessor, arg0) .await; Ok(r) @@ -557,7 +557,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_u16_param(accessor, arg0) .await; Ok(r) @@ -571,7 +571,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_u32_param(accessor, arg0) .await; Ok(r) @@ -585,7 +585,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_u64_param(accessor, arg0) .await; Ok(r) @@ -599,7 +599,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_s8_param(accessor, arg0) .await; Ok(r) @@ -613,7 +613,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_s16_param(accessor, arg0) .await; Ok(r) @@ -627,7 +627,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_s32_param(accessor, arg0) .await; Ok(r) @@ -641,7 +641,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_s64_param(accessor, arg0) .await; Ok(r) @@ -655,7 +655,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_f32_param(accessor, arg0) .await; Ok(r) @@ -669,7 +669,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_f64_param(accessor, arg0) .await; Ok(r) @@ -680,7 +680,7 @@ pub mod foo { "list-u8-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_u8_ret(accessor).await; Ok((r,)) }) @@ -690,7 +690,7 @@ pub mod foo { "list-u16-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_u16_ret(accessor).await; Ok((r,)) }) @@ -700,7 +700,7 @@ pub mod foo { "list-u32-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_u32_ret(accessor).await; Ok((r,)) }) @@ -710,7 +710,7 @@ pub mod foo { "list-u64-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_u64_ret(accessor).await; Ok((r,)) }) @@ -720,7 +720,7 @@ pub mod foo { "list-s8-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_s8_ret(accessor).await; Ok((r,)) }) @@ -730,7 +730,7 @@ pub mod foo { "list-s16-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_s16_ret(accessor).await; Ok((r,)) }) @@ -740,7 +740,7 @@ pub mod foo { "list-s32-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_s32_ret(accessor).await; Ok((r,)) }) @@ -750,7 +750,7 @@ pub mod foo { "list-s64-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_s64_ret(accessor).await; Ok((r,)) }) @@ -760,7 +760,7 @@ pub mod foo { "list-f32-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_f32_ret(accessor).await; Ok((r,)) }) @@ -770,7 +770,7 @@ pub mod foo { "list-f64-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_f64_ret(accessor).await; Ok((r,)) }) @@ -783,7 +783,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec<(u8, i8)>,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::tuple_list(accessor, arg0) .await; Ok((r,)) @@ -803,7 +803,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::string_list_arg(accessor, arg0) .await; Ok(r) @@ -814,7 +814,7 @@ pub mod foo { "string-list-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::string_list_ret(accessor) .await; Ok((r,)) @@ -834,7 +834,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::tuple_string_list( accessor, arg0, @@ -857,7 +857,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::string_list(accessor, arg0) .await; Ok((r,)) @@ -871,7 +871,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::record_list(accessor, arg0) .await; Ok((r,)) @@ -885,7 +885,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::record_list_reverse( accessor, arg0, @@ -902,7 +902,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::variant_list(accessor, arg0) .await; Ok((r,)) @@ -916,7 +916,7 @@ pub mod foo { (arg0,): (LoadStoreAllSizes,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::load_store_everything( accessor, arg0, diff --git a/crates/component-macro/tests/expanded/many-arguments_concurrent.rs b/crates/component-macro/tests/expanded/many-arguments_concurrent.rs index 46c44fb96a63..3822a8b5f15e 100644 --- a/crates/component-macro/tests/expanded/many-arguments_concurrent.rs +++ b/crates/component-macro/tests/expanded/many-arguments_concurrent.rs @@ -352,7 +352,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::many_args( accessor, arg0, @@ -384,7 +384,7 @@ pub mod foo { (arg0,): (BigStruct,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::big_argument(accessor, arg0) .await; Ok(r) diff --git a/crates/component-macro/tests/expanded/multiversion_concurrent.rs b/crates/component-macro/tests/expanded/multiversion_concurrent.rs index 866e8184a177..93792c2ed84b 100644 --- a/crates/component-macro/tests/expanded/multiversion_concurrent.rs +++ b/crates/component-macro/tests/expanded/multiversion_concurrent.rs @@ -223,7 +223,7 @@ pub mod my { "x", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::x(accessor).await; Ok(r) }) @@ -259,7 +259,7 @@ pub mod my { "x", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::x(accessor).await; Ok(r) }) diff --git a/crates/component-macro/tests/expanded/records_concurrent.rs b/crates/component-macro/tests/expanded/records_concurrent.rs index f372fca3f501..becdf1917bc1 100644 --- a/crates/component-macro/tests/expanded/records_concurrent.rs +++ b/crates/component-macro/tests/expanded/records_concurrent.rs @@ -408,7 +408,7 @@ pub mod foo { (arg0,): ((char, u32),)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::tuple_arg(accessor, arg0) .await; Ok(r) @@ -419,7 +419,7 @@ pub mod foo { "tuple-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::tuple_result(accessor).await; Ok((r,)) }) @@ -429,7 +429,7 @@ pub mod foo { "empty-arg", move |caller: &wasmtime::component::Accessor, (arg0,): (Empty,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::empty_arg(accessor, arg0) .await; Ok(r) @@ -440,7 +440,7 @@ pub mod foo { "empty-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::empty_result(accessor).await; Ok((r,)) }) @@ -453,7 +453,7 @@ pub mod foo { (arg0,): (Scalars,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::scalar_arg(accessor, arg0) .await; Ok(r) @@ -464,7 +464,7 @@ pub mod foo { "scalar-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::scalar_result(accessor).await; Ok((r,)) }) @@ -477,7 +477,7 @@ pub mod foo { (arg0,): (ReallyFlags,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::flags_arg(accessor, arg0) .await; Ok(r) @@ -488,7 +488,7 @@ pub mod foo { "flags-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::flags_result(accessor).await; Ok((r,)) }) @@ -501,7 +501,7 @@ pub mod foo { (arg0,): (Aggregates,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::aggregate_arg(accessor, arg0) .await; Ok(r) @@ -512,7 +512,7 @@ pub mod foo { "aggregate-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::aggregate_result(accessor) .await; Ok((r,)) @@ -526,7 +526,7 @@ pub mod foo { (arg0,): (TupleTypedef2,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::typedef_inout(accessor, arg0) .await; Ok((r,)) diff --git a/crates/component-macro/tests/expanded/rename_concurrent.rs b/crates/component-macro/tests/expanded/rename_concurrent.rs index 976fb32856fe..37609b7ab4e8 100644 --- a/crates/component-macro/tests/expanded/rename_concurrent.rs +++ b/crates/component-macro/tests/expanded/rename_concurrent.rs @@ -238,7 +238,7 @@ pub mod foo { "foo", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/resources-export_concurrent.rs b/crates/component-macro/tests/expanded/resources-export_concurrent.rs index 4f1121af72d0..7060f72591d1 100644 --- a/crates/component-macro/tests/expanded/resources-export_concurrent.rs +++ b/crates/component-macro/tests/expanded/resources-export_concurrent.rs @@ -269,7 +269,7 @@ pub mod foo { wasmtime::component::ResourceType::host::(), move |caller: &wasmtime::component::Accessor, rep| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); HostYWithStore::drop( accessor, wasmtime::component::Resource::new_own(rep), diff --git a/crates/component-macro/tests/expanded/resources-import_concurrent.rs b/crates/component-macro/tests/expanded/resources-import_concurrent.rs index 684c268a32fd..0ce0bbb3768d 100644 --- a/crates/component-macro/tests/expanded/resources-import_concurrent.rs +++ b/crates/component-macro/tests/expanded/resources-import_concurrent.rs @@ -259,7 +259,7 @@ const _: () = { wasmtime::component::ResourceType::host::(), move |caller: &wasmtime::component::Accessor, rep| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); HostWorldResourceWithStore::drop( accessor, wasmtime::component::Resource::new_own(rep), @@ -273,7 +273,7 @@ const _: () = { "some-world-func", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::some_world_func( accessor, ) @@ -287,7 +287,7 @@ const _: () = { "[constructor]world-resource", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::new(accessor) .await; Ok((r,)) @@ -302,7 +302,7 @@ const _: () = { (arg0,): (wasmtime::component::Resource,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo( accessor, arg0, @@ -317,7 +317,7 @@ const _: () = { "[static]world-resource.static-foo", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::static_foo( accessor, ) @@ -593,7 +593,7 @@ pub mod foo { wasmtime::component::ResourceType::host::(), move |caller: &wasmtime::component::Accessor, rep| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); HostBarWithStore::drop( accessor, wasmtime::component::Resource::new_own(rep), @@ -607,7 +607,7 @@ pub mod foo { wasmtime::component::ResourceType::host::(), move |caller: &wasmtime::component::Accessor, rep| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); HostFallibleWithStore::drop( accessor, wasmtime::component::Resource::new_own(rep), @@ -620,7 +620,7 @@ pub mod foo { "[constructor]bar", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::new(accessor).await; Ok((r,)) }) @@ -630,7 +630,7 @@ pub mod foo { "[static]bar.static-a", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::static_a(accessor).await; Ok((r,)) }) @@ -643,7 +643,7 @@ pub mod foo { (arg0,): (wasmtime::component::Resource,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::method_a(accessor, arg0) .await; Ok((r,)) @@ -657,7 +657,7 @@ pub mod foo { (arg0,): (wasmtime::component::Resource,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::bar_own_arg(accessor, arg0) .await; Ok(r) @@ -671,7 +671,7 @@ pub mod foo { (arg0,): (wasmtime::component::Resource,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::bar_borrow_arg(accessor, arg0) .await; Ok(r) @@ -682,7 +682,7 @@ pub mod foo { "bar-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::bar_result(accessor).await; Ok((r,)) }) @@ -695,7 +695,7 @@ pub mod foo { (arg0,): ((wasmtime::component::Resource, u32),)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::tuple_own_arg(accessor, arg0) .await; Ok(r) @@ -709,7 +709,7 @@ pub mod foo { (arg0,): ((wasmtime::component::Resource, u32),)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::tuple_borrow_arg( accessor, arg0, @@ -723,7 +723,7 @@ pub mod foo { "tuple-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::tuple_result(accessor).await; Ok((r,)) }) @@ -736,7 +736,7 @@ pub mod foo { (arg0,): (Option>,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::option_own_arg(accessor, arg0) .await; Ok(r) @@ -750,7 +750,7 @@ pub mod foo { (arg0,): (Option>,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::option_borrow_arg( accessor, arg0, @@ -764,7 +764,7 @@ pub mod foo { "option-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::option_result(accessor).await; Ok((r,)) }) @@ -777,7 +777,7 @@ pub mod foo { (arg0,): (Result, ()>,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::result_own_arg(accessor, arg0) .await; Ok(r) @@ -791,7 +791,7 @@ pub mod foo { (arg0,): (Result, ()>,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::result_borrow_arg( accessor, arg0, @@ -805,7 +805,7 @@ pub mod foo { "result-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::result_result(accessor).await; Ok((r,)) }) @@ -824,7 +824,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_own_arg(accessor, arg0) .await; Ok(r) @@ -844,7 +844,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_borrow_arg(accessor, arg0) .await; Ok(r) @@ -855,7 +855,7 @@ pub mod foo { "list-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_result(accessor).await; Ok((r,)) }) @@ -868,7 +868,7 @@ pub mod foo { (arg0,): (NestedOwn,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::record_own_arg(accessor, arg0) .await; Ok(r) @@ -882,7 +882,7 @@ pub mod foo { (arg0,): (NestedBorrow,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::record_borrow_arg( accessor, arg0, @@ -896,7 +896,7 @@ pub mod foo { "record-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::record_result(accessor).await; Ok((r,)) }) @@ -909,7 +909,7 @@ pub mod foo { (arg0,): (SomeHandle,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::func_with_handle_typedef( accessor, arg0, @@ -923,7 +923,7 @@ pub mod foo { "[constructor]fallible", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::new(accessor).await; Ok((r,)) }) @@ -969,7 +969,7 @@ pub mod foo { wasmtime::component::ResourceType::host::(), move |caller: &wasmtime::component::Accessor, rep| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); HostAWithStore::drop( accessor, wasmtime::component::Resource::new_own(rep), @@ -1059,7 +1059,7 @@ pub mod foo { "foo", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor).await; Ok((r,)) }) @@ -1106,7 +1106,7 @@ pub mod foo { wasmtime::component::ResourceType::host::(), move |caller: &wasmtime::component::Accessor, rep| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); HostFooWithStore::drop( accessor, wasmtime::component::Resource::new_own(rep), diff --git a/crates/component-macro/tests/expanded/share-types_concurrent.rs b/crates/component-macro/tests/expanded/share-types_concurrent.rs index 2591cdd87c00..9348e23fa839 100644 --- a/crates/component-macro/tests/expanded/share-types_concurrent.rs +++ b/crates/component-macro/tests/expanded/share-types_concurrent.rs @@ -288,7 +288,7 @@ pub mod http_fetch { "fetch-request", move |caller: &wasmtime::component::Accessor, (arg0,): (Request,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::fetch_request(accessor, arg0).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/simple-functions_concurrent.rs b/crates/component-macro/tests/expanded/simple-functions_concurrent.rs index 843d04ae8e33..f4af4e6153cb 100644 --- a/crates/component-macro/tests/expanded/simple-functions_concurrent.rs +++ b/crates/component-macro/tests/expanded/simple-functions_concurrent.rs @@ -235,7 +235,7 @@ pub mod foo { "f1", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f1(accessor).await; Ok(r) }) @@ -245,7 +245,7 @@ pub mod foo { "f2", move |caller: &wasmtime::component::Accessor, (arg0,): (u32,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f2(accessor, arg0).await; Ok(r) }) @@ -258,7 +258,7 @@ pub mod foo { (arg0, arg1): (u32, u32)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f3(accessor, arg0, arg1).await; Ok(r) }) @@ -268,7 +268,7 @@ pub mod foo { "f4", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f4(accessor).await; Ok((r,)) }) @@ -278,7 +278,7 @@ pub mod foo { "f5", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f5(accessor).await; Ok((r,)) }) @@ -291,7 +291,7 @@ pub mod foo { (arg0, arg1, arg2): (u32, u32, u32)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f6(accessor, arg0, arg1, arg2) .await; Ok((r,)) diff --git a/crates/component-macro/tests/expanded/simple-lists_concurrent.rs b/crates/component-macro/tests/expanded/simple-lists_concurrent.rs index 2dac5ac632c6..6e1e555171a5 100644 --- a/crates/component-macro/tests/expanded/simple-lists_concurrent.rs +++ b/crates/component-macro/tests/expanded/simple-lists_concurrent.rs @@ -243,7 +243,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::simple_list1(accessor, arg0) .await; Ok(r) @@ -254,7 +254,7 @@ pub mod foo { "simple-list2", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::simple_list2(accessor).await; Ok((r,)) }) @@ -273,7 +273,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::simple_list3( accessor, arg0, @@ -297,7 +297,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::simple_list4(accessor, arg0) .await; Ok((r,)) diff --git a/crates/component-macro/tests/expanded/simple-wasi_concurrent.rs b/crates/component-macro/tests/expanded/simple-wasi_concurrent.rs index 3e2a1f612e11..2f8bc3dcaedd 100644 --- a/crates/component-macro/tests/expanded/simple-wasi_concurrent.rs +++ b/crates/component-macro/tests/expanded/simple-wasi_concurrent.rs @@ -273,7 +273,7 @@ pub mod foo { "create-directory-at", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::create_directory_at(accessor) .await; Ok((r,)) @@ -284,7 +284,7 @@ pub mod foo { "stat", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::stat(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/small-anonymous_concurrent.rs b/crates/component-macro/tests/expanded/small-anonymous_concurrent.rs index 6fe5843c0bf1..f709f7da3803 100644 --- a/crates/component-macro/tests/expanded/small-anonymous_concurrent.rs +++ b/crates/component-macro/tests/expanded/small-anonymous_concurrent.rs @@ -262,7 +262,7 @@ pub mod foo { "option-test", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::option_test(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/smoke_concurrent.rs b/crates/component-macro/tests/expanded/smoke_concurrent.rs index 425d7783cd9f..44b764d5c75b 100644 --- a/crates/component-macro/tests/expanded/smoke_concurrent.rs +++ b/crates/component-macro/tests/expanded/smoke_concurrent.rs @@ -201,7 +201,7 @@ pub mod imports { "y", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::y(accessor).await; Ok(r) }) diff --git a/crates/component-macro/tests/expanded/strings_concurrent.rs b/crates/component-macro/tests/expanded/strings_concurrent.rs index 8c47ecb73616..5de24d6ea275 100644 --- a/crates/component-macro/tests/expanded/strings_concurrent.rs +++ b/crates/component-macro/tests/expanded/strings_concurrent.rs @@ -230,7 +230,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::String,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a(accessor, arg0).await; Ok(r) }) @@ -240,7 +240,7 @@ pub mod foo { "b", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::b(accessor).await; Ok((r,)) }) @@ -259,7 +259,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::c(accessor, arg0, arg1).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/unstable-features_concurrent.rs b/crates/component-macro/tests/expanded/unstable-features_concurrent.rs index a45fe341cd0a..3882ab7626c7 100644 --- a/crates/component-macro/tests/expanded/unstable-features_concurrent.rs +++ b/crates/component-macro/tests/expanded/unstable-features_concurrent.rs @@ -283,7 +283,7 @@ const _: () = { wasmtime::component::ResourceType::host::(), move |caller: &wasmtime::component::Accessor, rep| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); HostBazWithStore::drop( accessor, wasmtime::component::Resource::new_own(rep), @@ -299,7 +299,7 @@ const _: () = { "foo", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor) .await; Ok(r) @@ -316,7 +316,7 @@ const _: () = { (arg0,): (wasmtime::component::Resource,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor, arg0).await; Ok(r) }) @@ -434,7 +434,7 @@ pub mod foo { wasmtime::component::ResourceType::host::(), move |caller: &wasmtime::component::Accessor, rep| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); HostBarWithStore::drop( accessor, wasmtime::component::Resource::new_own(rep), @@ -449,7 +449,7 @@ pub mod foo { "foo", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor).await; Ok(r) }) @@ -464,7 +464,7 @@ pub mod foo { (arg0,): (wasmtime::component::Resource,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor, arg0).await; Ok(r) }) diff --git a/crates/component-macro/tests/expanded/unversioned-foo_concurrent.rs b/crates/component-macro/tests/expanded/unversioned-foo_concurrent.rs index 096e97ccac46..31fa0ffec0fe 100644 --- a/crates/component-macro/tests/expanded/unversioned-foo_concurrent.rs +++ b/crates/component-macro/tests/expanded/unversioned-foo_concurrent.rs @@ -231,7 +231,7 @@ pub mod foo { "g", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::g(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/use-paths_concurrent.rs b/crates/component-macro/tests/expanded/use-paths_concurrent.rs index ef8f1f4cacef..80771ea7384b 100644 --- a/crates/component-macro/tests/expanded/use-paths_concurrent.rs +++ b/crates/component-macro/tests/expanded/use-paths_concurrent.rs @@ -225,7 +225,7 @@ pub mod foo { "a", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a(accessor).await; Ok((r,)) }) @@ -264,7 +264,7 @@ pub mod foo { "a", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a(accessor).await; Ok((r,)) }) @@ -303,7 +303,7 @@ pub mod foo { "a", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a(accessor).await; Ok((r,)) }) @@ -344,7 +344,7 @@ pub mod d { "b", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::b(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/variants_concurrent.rs b/crates/component-macro/tests/expanded/variants_concurrent.rs index a63406844f36..3ab97ed3a962 100644 --- a/crates/component-macro/tests/expanded/variants_concurrent.rs +++ b/crates/component-macro/tests/expanded/variants_concurrent.rs @@ -595,7 +595,7 @@ pub mod foo { "e1-arg", move |caller: &wasmtime::component::Accessor, (arg0,): (E1,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::e1_arg(accessor, arg0).await; Ok(r) }) @@ -605,7 +605,7 @@ pub mod foo { "e1-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::e1_result(accessor).await; Ok((r,)) }) @@ -615,7 +615,7 @@ pub mod foo { "v1-arg", move |caller: &wasmtime::component::Accessor, (arg0,): (V1,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::v1_arg(accessor, arg0).await; Ok(r) }) @@ -625,7 +625,7 @@ pub mod foo { "v1-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::v1_result(accessor).await; Ok((r,)) }) @@ -635,7 +635,7 @@ pub mod foo { "bool-arg", move |caller: &wasmtime::component::Accessor, (arg0,): (bool,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::bool_arg(accessor, arg0).await; Ok(r) }) @@ -645,7 +645,7 @@ pub mod foo { "bool-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::bool_result(accessor).await; Ok((r,)) }) @@ -672,7 +672,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::option_arg( accessor, arg0, @@ -691,7 +691,7 @@ pub mod foo { "option-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::option_result(accessor).await; Ok((r,)) }) @@ -711,7 +711,7 @@ pub mod foo { ): (Casts1, Casts2, Casts3, Casts4, Casts5, Casts6)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::casts( accessor, arg0, @@ -750,7 +750,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::result_arg( accessor, arg0, @@ -769,7 +769,7 @@ pub mod foo { "result-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::result_result(accessor).await; Ok((r,)) }) @@ -779,7 +779,7 @@ pub mod foo { "return-result-sugar", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::return_result_sugar(accessor) .await; Ok((r,)) @@ -790,7 +790,7 @@ pub mod foo { "return-result-sugar2", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::return_result_sugar2(accessor) .await; Ok((r,)) @@ -801,7 +801,7 @@ pub mod foo { "return-result-sugar3", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::return_result_sugar3(accessor) .await; Ok((r,)) @@ -812,7 +812,7 @@ pub mod foo { "return-result-sugar4", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::return_result_sugar4(accessor) .await; Ok((r,)) @@ -823,7 +823,7 @@ pub mod foo { "return-option-sugar", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::return_option_sugar(accessor) .await; Ok((r,)) @@ -834,7 +834,7 @@ pub mod foo { "return-option-sugar2", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::return_option_sugar2(accessor) .await; Ok((r,)) @@ -845,7 +845,7 @@ pub mod foo { "result-simple", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::result_simple(accessor).await; Ok((r,)) }) @@ -858,7 +858,7 @@ pub mod foo { (arg0,): (IsClone,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::is_clone_arg(accessor, arg0) .await; Ok(r) @@ -869,7 +869,7 @@ pub mod foo { "is-clone-return", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::is_clone_return(accessor) .await; Ok((r,)) diff --git a/crates/misc/component-async-tests/src/resource_stream.rs b/crates/misc/component-async-tests/src/resource_stream.rs index b545efd34c5d..baf3bdcecfe1 100644 --- a/crates/misc/component-async-tests/src/resource_stream.rs +++ b/crates/misc/component-async-tests/src/resource_stream.rs @@ -1,7 +1,7 @@ +use crate::util::PipeProducer; use anyhow::Result; -use wasmtime::component::{ - Accessor, AccessorTask, GuardedStreamWriter, Resource, StreamReader, StreamWriter, -}; +use futures::channel::mpsc; +use wasmtime::component::{Accessor, Resource, StreamReader}; use super::Ctx; @@ -38,29 +38,15 @@ impl bindings::local::local::resource_stream::HostWithStore for Ctx { accessor: &Accessor, count: u32, ) -> wasmtime::Result>> { - struct Task { - tx: StreamWriter>, - - count: u32, - } - - impl AccessorTask> for Task { - async fn run(self, accessor: &Accessor) -> Result<()> { - let mut tx = GuardedStreamWriter::new(accessor, self.tx); - for _ in 0..self.count { - let item = accessor.with(|mut view| view.get().table.push(ResourceStreamX))?; - tx.write_all(Some(item)).await; - } - Ok(()) + accessor.with(|mut access| { + let (mut tx, rx) = mpsc::channel(usize::try_from(count).unwrap()); + for _ in 0..count { + tx.try_send(access.get().table.push(ResourceStreamX)?) + .unwrap() } - } - - let (tx, rx) = accessor.with(|mut view| { - let instance = view.instance(); - instance.stream(&mut view) - })?; - accessor.spawn(Task { tx, count }); - Ok(rx) + let instance = access.instance(); + Ok(StreamReader::new(instance, access, PipeProducer::new(rx))) + }) } } diff --git a/crates/misc/component-async-tests/src/util.rs b/crates/misc/component-async-tests/src/util.rs index fb1380603e8f..40dc572d5e39 100644 --- a/crates/misc/component-async-tests/src/util.rs +++ b/crates/misc/component-async-tests/src/util.rs @@ -1,5 +1,18 @@ -use futures::channel::oneshot; -use std::thread; +use anyhow::Result; +use futures::{Sink, Stream, channel::oneshot}; +use std::{ + marker::PhantomData, + pin::Pin, + task::{Context, Poll}, + thread, +}; +use wasmtime::{ + StoreContextMut, + component::{ + Accessor, Destination, FutureConsumer, FutureProducer, Lift, Lower, Source, StreamConsumer, + StreamProducer, StreamResult, + }, +}; pub async fn sleep(duration: std::time::Duration) { if cfg!(miri) { @@ -21,3 +34,129 @@ pub async fn sleep(duration: std::time::Duration) { tokio::time::sleep(duration).await; } } + +pub struct PipeProducer(S); + +impl PipeProducer { + pub fn new(rx: S) -> Self { + Self(rx) + } +} + +impl + Send + 'static> StreamProducer + for PipeProducer +{ + type Item = T; + type Buffer = Option; + + fn poll_produce<'a>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + _: StoreContextMut, + destination: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll> { + // SAFETY: This is a standard pin-projection, and we never move + // out of `self`. + let stream = unsafe { self.map_unchecked_mut(|v| &mut v.0) }; + + match stream.poll_next(cx) { + Poll::Pending => { + if finish { + Poll::Ready(Ok(StreamResult::Cancelled)) + } else { + Poll::Pending + } + } + Poll::Ready(Some(item)) => { + destination.set_buffer(Some(item)); + Poll::Ready(Ok(StreamResult::Completed)) + } + Poll::Ready(None) => Poll::Ready(Ok(StreamResult::Dropped)), + } + } +} + +pub struct PipeConsumer(S, PhantomData T>); + +impl PipeConsumer { + pub fn new(tx: S) -> Self { + Self(tx, PhantomData) + } +} + +impl + Send + 'static> + StreamConsumer for PipeConsumer +{ + type Item = T; + + fn poll_consume( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut, + source: &mut Source, + finish: bool, + ) -> Poll> { + // SAFETY: This is a standard pin-projection, and we never move + // out of `self`. + let mut sink = unsafe { self.map_unchecked_mut(|v| &mut v.0) }; + + let on_pending = || { + if finish { + Poll::Ready(Ok(StreamResult::Cancelled)) + } else { + Poll::Pending + } + }; + + match sink.as_mut().poll_flush(cx) { + Poll::Pending => on_pending(), + Poll::Ready(result) => { + result?; + match sink.as_mut().poll_ready(cx) { + Poll::Pending => on_pending(), + Poll::Ready(result) => { + result?; + let item = &mut None; + source.read(store, item)?; + sink.start_send(item.take().unwrap())?; + Poll::Ready(Ok(StreamResult::Completed)) + } + } + } + } + } +} + +pub struct OneshotProducer(oneshot::Receiver); + +impl OneshotProducer { + pub fn new(rx: oneshot::Receiver) -> Self { + Self(rx) + } +} + +impl FutureProducer for OneshotProducer { + type Item = T; + + async fn produce(self, _: &Accessor) -> Result { + Ok(self.0.await?) + } +} + +pub struct OneshotConsumer(oneshot::Sender); + +impl OneshotConsumer { + pub fn new(tx: oneshot::Sender) -> Self { + Self(tx) + } +} + +impl FutureConsumer for OneshotConsumer { + type Item = T; + + async fn consume(self, _: &Accessor, value: T) -> Result<()> { + _ = self.0.send(value); + Ok(()) + } +} diff --git a/crates/misc/component-async-tests/tests/scenario/round_trip.rs b/crates/misc/component-async-tests/tests/scenario/round_trip.rs index b420e2fe4f97..840857512708 100644 --- a/crates/misc/component-async-tests/tests/scenario/round_trip.rs +++ b/crates/misc/component-async-tests/tests/scenario/round_trip.rs @@ -236,7 +236,7 @@ pub async fn test_round_trip( component_async_tests::round_trip::bindings::RoundTrip::new(&mut store, &instance)?; if call_style == 0 || !cfg!(miri) { - // Now do it again using `Instance::run_concurrent`: + // Run the test using `Instance::run_concurrent`: instance .run_concurrent(&mut store, { let inputs_and_outputs = inputs_and_outputs diff --git a/crates/misc/component-async-tests/tests/scenario/streams.rs b/crates/misc/component-async-tests/tests/scenario/streams.rs index c024fc1ae5c5..e63f2d303182 100644 --- a/crates/misc/component-async-tests/tests/scenario/streams.rs +++ b/crates/misc/component-async-tests/tests/scenario/streams.rs @@ -1,195 +1,25 @@ use { super::util::{config, make_component}, anyhow::Result, - component_async_tests::{Ctx, closed_streams}, - futures::{ - future::FutureExt, - stream::{FuturesUnordered, StreamExt, TryStreamExt}, + component_async_tests::{ + Ctx, closed_streams, + util::{OneshotConsumer, OneshotProducer, PipeConsumer, PipeProducer}, }, - std::{ - future::{self, Future}, - pin::pin, - sync::{Arc, Mutex}, - task::{Context, Waker}, + futures::{ + SinkExt, StreamExt, + channel::{mpsc, oneshot}, + future, }, + std::sync::{Arc, Mutex}, wasmtime::{ - Engine, Store, Trap, - component::{ - Accessor, GuardedFutureReader, GuardedStreamReader, GuardedStreamWriter, Linker, - ResourceTable, VecBuffer, - }, + Engine, Store, + component::{FutureReader, Linker, ResourceTable, StreamReader}, }, wasmtime_wasi::WasiCtxBuilder, }; -#[tokio::test] -pub async fn async_watch_streams() -> Result<()> { - let engine = Engine::new(&config())?; - - let mut store = Store::new( - &engine, - Ctx { - wasi: WasiCtxBuilder::new().inherit_stdio().build(), - table: ResourceTable::default(), - continue_: false, - wakers: Arc::new(Mutex::new(None)), - }, - ); - - let mut linker = Linker::new(&engine); - - wasmtime_wasi::p2::add_to_linker_async(&mut linker)?; - - let component = make_component( - &engine, - &[test_programs_artifacts::ASYNC_CLOSED_STREAMS_COMPONENT], - ) - .await?; - - let instance = linker.instantiate_async(&mut store, &component).await?; - - // Test watching and then dropping the read end of a stream. - let (mut tx, mut rx) = instance.stream::(&mut store)?; - instance - .run_concurrent(&mut store, async |store| { - futures::join!(tx.watch_reader(store), async { rx.close_with(store) }).1 - }) - .await?; - - // Test dropping and then watching the read end of a stream. - let (mut tx, mut rx) = instance.stream::(&mut store)?; - instance - .run_concurrent(&mut store, async |store| { - rx.close_with(store); - tx.watch_reader(store).await; - }) - .await?; - - // Test watching and then dropping the write end of a stream. - let (mut tx, mut rx) = instance.stream::(&mut store)?; - instance - .run_concurrent(&mut store, async |store| { - futures::join!(rx.watch_writer(store), async { tx.close_with(store) }).1 - }) - .await?; - - // Test dropping and then watching the write end of a stream. - let (mut tx, mut rx) = instance.stream::(&mut store)?; - instance - .run_concurrent(&mut store, async |store| { - tx.close_with(store); - rx.watch_writer(store).await; - }) - .await?; - - // Test watching and then dropping the read end of a future. - let (mut tx, mut rx) = instance.future::(&mut store, || 42)?; - instance - .run_concurrent(&mut store, async |store| { - futures::join!(tx.watch_reader(store), async { rx.close_with(store) }).1 - }) - .await?; - - // Test dropping and then watching the read end of a future. - let (mut tx, mut rx) = instance.future::(&mut store, || 42)?; - instance - .run_concurrent(&mut store, async |store| { - rx.close_with(store); - tx.watch_reader(store).await; - }) - .await?; - - // Test watching and then dropping the write end of a future. - let (mut tx, mut rx) = instance.future::(&mut store, || 42)?; - instance - .run_concurrent(&mut store, async |store| { - futures::join!(rx.watch_writer(store), async { tx.close_with(store) }).1 - }) - .await?; - - // Test dropping and then watching the write end of a future. - let (mut tx, mut rx) = instance.future::(&mut store, || 42)?; - instance - .run_concurrent(&mut store, async |store| { - tx.close_with(store); - rx.watch_writer(store).await; - }) - .await?; - - enum Event<'a> { - Write(Option>>), - Read( - Option>>, - Option, - ), - } - - // Test watching, then writing to, then dropping, then writing again to the - // read end of a stream. - let (tx, rx) = instance.stream(&mut store)?; - instance - .run_concurrent(&mut store, async move |store| -> wasmtime::Result<_> { - let mut tx = GuardedStreamWriter::new(store, tx); - let mut rx = GuardedStreamReader::new(store, rx); - let mut futures = FuturesUnordered::new(); - assert!( - pin!(tx.watch_reader()) - .poll(&mut Context::from_waker(&Waker::noop())) - .is_pending() - ); - futures.push( - async move { - tx.write_all(Some(42)).await; - let w = if tx.is_closed() { None } else { Some(tx) }; - anyhow::Ok(Event::Write(w)) - } - .boxed(), - ); - futures.push( - async move { - let b = rx.read(None).await; - let r = if rx.is_closed() { None } else { Some(rx) }; - Ok(Event::Read(r, b)) - } - .boxed(), - ); - let mut rx = None; - let mut tx = None; - while let Some(event) = futures.try_next().await? { - match event { - Event::Write(None) => unreachable!(), - Event::Write(Some(new_tx)) => tx = Some(new_tx), - Event::Read(None, _) => unreachable!(), - Event::Read(Some(new_rx), mut buffer) => { - assert_eq!(buffer.take(), Some(42)); - rx = Some(new_rx); - } - } - } - drop(rx); - - let mut tx = tx.take().unwrap(); - tx.watch_reader().await; - tx.write_all(Some(42)).await; - assert!(tx.is_closed()); - Ok(()) - }) - .await??; - - Ok(()) -} - #[tokio::test] pub async fn async_closed_streams() -> Result<()> { - test_closed_streams(false).await -} - -#[tokio::test] -pub async fn async_closed_streams_with_watch() -> Result<()> { - test_closed_streams(true).await -} - -pub async fn test_closed_streams(watch: bool) -> Result<()> { let engine = Engine::new(&config())?; let mut store = Store::new( @@ -214,153 +44,53 @@ pub async fn test_closed_streams(watch: bool) -> Result<()> { let instance = linker.instantiate_async(&mut store, &component).await?; - enum StreamEvent<'a> { - FirstWrite(Option>>), - FirstRead(Option>>, Vec), - SecondWrite(Option>>), - GuestCompleted, - } - - enum FutureEvent { - Write(bool), - Read(Option), - WriteIgnored(bool), - GuestCompleted, - } - let values = vec![42_u8, 43, 44]; let value = 42_u8; // First, test stream host->host { - let (tx, rx) = instance.stream(&mut store)?; - let values = values.clone(); + let (mut input_tx, input_rx) = mpsc::channel(1); + let (output_tx, mut output_rx) = mpsc::channel(1); + StreamReader::new(instance, &mut store, PipeProducer::new(input_rx)) + .pipe(&mut store, PipeConsumer::new(output_tx)); instance - .run_concurrent(&mut store, async move |store| -> wasmtime::Result<_> { - let mut tx = GuardedStreamWriter::new(store, tx); - let mut rx = GuardedStreamReader::new(store, rx); - - let mut futures = FuturesUnordered::new(); - futures.push({ - let values = values.clone(); - async move { - tx.write_all(VecBuffer::from(values)).await; - anyhow::Ok(StreamEvent::FirstWrite(if tx.is_closed() { - None - } else { - Some(tx) - })) - } - .boxed() - }); - futures.push( - async move { - let b = rx.read(Vec::with_capacity(3)).await; - let r = if rx.is_closed() { None } else { Some(rx) }; - Ok(StreamEvent::FirstRead(r, b)) - } - .boxed(), - ); - - let mut count = 0; - while let Some(event) = futures.try_next().await? { - count += 1; - match event { - StreamEvent::FirstWrite(Some(mut tx)) => { - if watch { - futures.push( - async move { - tx.watch_reader().await; - Ok(StreamEvent::SecondWrite(None)) - } - .boxed(), - ); - } else { - futures.push({ - let values = values.clone(); - async move { - tx.write_all(VecBuffer::from(values)).await; - Ok(StreamEvent::SecondWrite(if tx.is_closed() { - None - } else { - Some(tx) - })) - } - .boxed() - }); - } + .run_concurrent(&mut store, async |_| { + let (a, b) = future::join( + async { + for &value in &values { + input_tx.send(value).await?; } - StreamEvent::FirstWrite(None) => { - panic!("first write should have been accepted") + drop(input_tx); + anyhow::Ok(()) + }, + async { + for &value in &values { + assert_eq!(Some(value), output_rx.next().await); } - StreamEvent::FirstRead(Some(_), results) => { - assert_eq!(values, results); - } - StreamEvent::FirstRead(None, _) => unreachable!(), - StreamEvent::SecondWrite(None) => {} - StreamEvent::SecondWrite(Some(_)) => { - panic!("second write should _not_ have been accepted") - } - StreamEvent::GuestCompleted => unreachable!(), - } - } + assert!(output_rx.next().await.is_none()); + Ok(()) + }, + ) + .await; - assert_eq!(count, 3); - Ok(()) + a.and(b) }) .await??; } // Next, test futures host->host { - let (tx, rx) = instance.future(&mut store, || unreachable!())?; - let (mut tx_ignored, rx_ignored) = instance.future(&mut store, || unreachable!())?; + let (input_tx, input_rx) = oneshot::channel(); + let (output_tx, output_rx) = oneshot::channel(); + FutureReader::new(instance, &mut store, OneshotProducer::new(input_rx)) + .pipe(&mut store, OneshotConsumer::new(output_tx)); instance - .run_concurrent(&mut store, async move |store| { - let rx_ignored = GuardedFutureReader::new(store, rx_ignored); - - let mut futures = FuturesUnordered::new(); - futures.push(tx.write(store, value).map(FutureEvent::Write).boxed()); - futures.push(rx.read(store).map(FutureEvent::Read).boxed()); - if watch { - futures.push( - tx_ignored - .watch_reader(store) - .map(|()| FutureEvent::WriteIgnored(false)) - .boxed(), - ); - } else { - futures.push( - tx_ignored - .write(store, value) - .map(FutureEvent::WriteIgnored) - .boxed(), - ); - } - drop(rx_ignored); - - let mut count = 0; - while let Some(event) = futures.next().await { - count += 1; - match event { - FutureEvent::Write(delivered) => { - assert!(delivered); - } - FutureEvent::Read(Some(result)) => { - assert_eq!(value, result); - } - FutureEvent::Read(None) => panic!("read should have succeeded"), - FutureEvent::WriteIgnored(delivered) => { - assert!(!delivered); - } - FutureEvent::GuestCompleted => unreachable!(), - } - } - - assert_eq!(count, 3); + .run_concurrent(&mut store, async |_| { + _ = input_tx.send(value); + assert_eq!(value, output_rx.await?); anyhow::Ok(()) }) .await??; @@ -368,7 +98,8 @@ pub async fn test_closed_streams(watch: bool) -> Result<()> { // Next, test stream host->guest { - let (tx, rx) = instance.stream(&mut store)?; + let (mut tx, rx) = mpsc::channel(1); + let rx = StreamReader::new(instance, &mut store, PipeProducer::new(rx)); let closed_streams = closed_streams::bindings::ClosedStreams::new(&mut store, &instance)?; @@ -376,163 +107,45 @@ pub async fn test_closed_streams(watch: bool) -> Result<()> { instance .run_concurrent(&mut store, async move |accessor| { - let mut tx = GuardedStreamWriter::new(accessor, tx); - - let mut futures = FuturesUnordered::new(); - futures.push( - closed_streams - .local_local_closed() - .call_read_stream(accessor, rx, values.clone()) - .map(|v| v.map(|()| StreamEvent::GuestCompleted)) - .boxed(), - ); - futures.push({ - let values = values.clone(); - async move { - tx.write_all(VecBuffer::from(values)).await; - let w = if tx.is_closed() { None } else { Some(tx) }; - Ok(StreamEvent::FirstWrite(w)) - } - .boxed() - }); - - let mut count = 0; - while let Some(event) = futures.try_next().await? { - count += 1; - match event { - StreamEvent::FirstWrite(Some(mut tx)) => { - if watch { - futures.push( - async move { - tx.watch_reader().await; - Ok(StreamEvent::SecondWrite(None)) - } - .boxed(), - ); - } else { - futures.push({ - let values = values.clone(); - async move { - tx.write_all(VecBuffer::from(values)).await; - let w = if tx.is_closed() { None } else { Some(tx) }; - Ok(StreamEvent::SecondWrite(w)) - } - .boxed() - }); - } - } - StreamEvent::FirstWrite(None) => { - panic!("first write should have been accepted") - } - StreamEvent::FirstRead(_, _) => unreachable!(), - StreamEvent::SecondWrite(None) => {} - StreamEvent::SecondWrite(Some(_)) => { - panic!("second write should _not_ have been accepted") + let (a, b) = future::join( + async { + for &value in &values { + tx.send(value).await?; } - StreamEvent::GuestCompleted => {} - } - } - - assert_eq!(count, 3); - - anyhow::Ok(()) + drop(tx); + Ok(()) + }, + closed_streams.local_local_closed().call_read_stream( + accessor, + rx, + values.clone(), + ), + ) + .await; + + a.and(b) }) .await??; } // Next, test futures host->guest { - let (tx, rx) = instance.future(&mut store, || unreachable!())?; - let (mut tx_ignored, rx_ignored) = instance.future(&mut store, || unreachable!())?; + let (tx, rx) = oneshot::channel(); + let rx = FutureReader::new(instance, &mut store, OneshotProducer::new(rx)); + let (_, rx_ignored) = oneshot::channel(); + let rx_ignored = FutureReader::new(instance, &mut store, OneshotProducer::new(rx_ignored)); let closed_streams = closed_streams::bindings::ClosedStreams::new(&mut store, &instance)?; instance .run_concurrent(&mut store, async move |accessor| { - let mut futures = FuturesUnordered::new(); - futures.push( - closed_streams - .local_local_closed() - .call_read_future(accessor, rx, value, rx_ignored) - .map(|v| v.map(|()| FutureEvent::GuestCompleted)) - .boxed(), - ); - futures.push( - tx.write(accessor, value) - .map(FutureEvent::Write) - .map(Ok) - .boxed(), - ); - if watch { - futures.push( - tx_ignored - .watch_reader(accessor) - .map(|()| Ok(FutureEvent::WriteIgnored(false))) - .boxed(), - ); - } else { - futures.push( - tx_ignored - .write(accessor, value) - .map(FutureEvent::WriteIgnored) - .map(Ok) - .boxed(), - ); - } - - let mut count = 0; - while let Some(event) = futures.try_next().await? { - count += 1; - match event { - FutureEvent::Write(delivered) => { - assert!(delivered); - } - FutureEvent::Read(_) => unreachable!(), - FutureEvent::WriteIgnored(delivered) => { - assert!(!delivered); - } - FutureEvent::GuestCompleted => {} - } - } - - assert_eq!(count, 3); - - anyhow::Ok(()) - }) - .await??; - } - - // Next, test futures host->guest again, but this time using the default value when closing the writers. - { - let (mut tx, rx) = instance.future(&mut store, || 42)?; - let (mut tx_ignored, rx_ignored) = instance.future(&mut store, || 42)?; - - let closed_streams = closed_streams::bindings::ClosedStreams::new(&mut store, &instance)?; - - let result = instance - .run_concurrent(&mut store, async move |accessor| { + _ = tx.send(value); closed_streams .local_local_closed() - .call_read_future_post_return(accessor, rx, 42, rx_ignored) - .await?; - - tx.close_with(accessor); - tx_ignored.close_with(accessor); - - future::pending::<()>().await; - - anyhow::Ok(()) + .call_read_future(accessor, rx, value, rx_ignored) + .await }) - .await; - - // As of this writing, passing a future which never resolves to - // `Instance::run_concurrent` and expecting a `Trap::AsyncDeadlock` is - // the only way to join all tasks for the `Instance`, so that's what we - // do: - assert!(matches!( - result.unwrap_err().downcast::(), - Ok(Trap::AsyncDeadlock) - )); + .await??; } Ok(()) diff --git a/crates/misc/component-async-tests/tests/scenario/transmit.rs b/crates/misc/component-async-tests/tests/scenario/transmit.rs index 59a505832fb6..80de705e731d 100644 --- a/crates/misc/component-async-tests/tests/scenario/transmit.rs +++ b/crates/misc/component-async-tests/tests/scenario/transmit.rs @@ -1,23 +1,191 @@ -use std::future::Future; +use std::future::{self, Future}; use std::pin::Pin; use std::sync::{Arc, Mutex}; +use std::task::{Context, Poll}; +use std::time::Duration; use super::util::{config, make_component, test_run, test_run_with_count}; use anyhow::{Result, anyhow}; use cancel::exports::local::local::cancel::Mode; use component_async_tests::transmit::bindings::exports::local::local::transmit::Control; +use component_async_tests::util::{OneshotConsumer, OneshotProducer, PipeConsumer, PipeProducer}; use component_async_tests::{Ctx, sleep, transmit}; use futures::{ - future::FutureExt, - stream::{FuturesUnordered, TryStreamExt}, + FutureExt, SinkExt, StreamExt, TryStreamExt, + channel::{mpsc, oneshot}, + stream::FuturesUnordered, }; use wasmtime::component::{ - Accessor, Component, FutureReader, GuardedFutureReader, GuardedStreamReader, - GuardedStreamWriter, HasSelf, Instance, Linker, ResourceTable, StreamReader, Val, + Accessor, Component, Destination, FutureReader, HasSelf, Instance, Linker, ResourceTable, + Source, StreamConsumer, StreamProducer, StreamReader, StreamResult, Val, }; -use wasmtime::{AsContextMut, Engine, Store}; +use wasmtime::{AsContextMut, Engine, Store, StoreContextMut, Trap}; use wasmtime_wasi::WasiCtxBuilder; +mod readiness { + wasmtime::component::bindgen!({ + path: "wit", + world: "readiness-guest" + }); +} + +struct ReadinessProducer { + buffer: Vec, + sleep: Pin + Send>>, +} + +impl StreamProducer for ReadinessProducer { + type Item = u8; + type Buffer = Option; + + fn poll_produce<'a>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + mut store: StoreContextMut<'a, D>, + destination: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll> { + let me = self.get_mut(); + + match me.sleep.as_mut().poll(cx) { + Poll::Pending => { + if finish { + Poll::Ready(Ok(StreamResult::Cancelled)) + } else { + Poll::Pending + } + } + Poll::Ready(()) => { + me.sleep = async {}.boxed(); + let capacity = destination.remaining(store.as_context_mut()); + if capacity == Some(0) { + Poll::Ready(Ok(StreamResult::Completed)) + } else { + assert_eq!(capacity, Some(me.buffer.len())); + let mut destination = destination.as_direct_destination(store).unwrap(); + destination.remaining().copy_from_slice(&me.buffer); + destination.mark_written(me.buffer.len()); + + Poll::Ready(Ok(StreamResult::Dropped)) + } + } + } + } +} + +struct ReadinessConsumer { + expected: Vec, + sleep: Pin + Send>>, +} + +impl StreamConsumer for ReadinessConsumer { + type Item = u8; + + fn poll_consume( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + mut store: StoreContextMut, + source: &mut Source, + finish: bool, + ) -> Poll> { + let me = self.get_mut(); + + match me.sleep.as_mut().poll(cx) { + Poll::Pending => { + if finish { + Poll::Ready(Ok(StreamResult::Cancelled)) + } else { + Poll::Pending + } + } + Poll::Ready(()) => { + me.sleep = async {}.boxed(); + let available = source.remaining(store.as_context_mut()); + if available == 0 { + Poll::Ready(Ok(StreamResult::Completed)) + } else { + assert_eq!(available, me.expected.len()); + let mut source = source.as_direct_source(store); + assert_eq!(&me.expected, source.remaining()); + source.mark_read(me.expected.len()); + + Poll::Ready(Ok(StreamResult::Dropped)) + } + } + } + } +} + +#[tokio::test] +pub async fn async_readiness() -> Result<()> { + let component = test_programs_artifacts::ASYNC_READINESS_COMPONENT; + + let engine = Engine::new(&config())?; + + let component = make_component(&engine, &[component]).await?; + + let mut linker = Linker::new(&engine); + + wasmtime_wasi::p2::add_to_linker_async(&mut linker)?; + + let mut store = Store::new( + &engine, + Ctx { + wasi: WasiCtxBuilder::new().inherit_stdio().build(), + table: ResourceTable::default(), + continue_: false, + wakers: Arc::new(Mutex::new(None)), + }, + ); + + let instance = linker.instantiate_async(&mut store, &component).await?; + let readiness_guest = readiness::ReadinessGuest::new(&mut store, &instance)?; + let expected = vec![2u8, 4, 6, 8, 9]; + let rx = StreamReader::new( + instance, + &mut store, + ReadinessProducer { + buffer: expected.clone(), + sleep: component_async_tests::util::sleep(Duration::from_millis(delay_millis())) + .boxed(), + }, + ); + let result = instance + .run_concurrent(&mut store, async move |accessor| { + let (rx, expected) = readiness_guest + .local_local_readiness() + .call_start(accessor, rx, expected) + .await?; + + accessor.with(|access| { + rx.pipe( + access, + ReadinessConsumer { + expected, + sleep: component_async_tests::util::sleep(Duration::from_millis( + delay_millis(), + )) + .boxed(), + }, + ) + }); + + future::pending::>().await + }) + .await; + + // As of this writing, passing a future which never resolves to + // `Instance::run_concurrent` and expecting a `Trap::AsyncDeadlock` is + // the only way to join all tasks for the `Instance`, so that's what we + // do: + assert!(matches!( + result.unwrap_err().downcast::(), + Ok(Trap::AsyncDeadlock) + )); + + Ok(()) +} + #[tokio::test] pub async fn async_poll_synchronous() -> Result<()> { test_run(&[test_programs_artifacts::ASYNC_POLL_SYNCHRONOUS_COMPONENT]).await @@ -28,7 +196,7 @@ pub async fn async_poll_stackless() -> Result<()> { test_run(&[test_programs_artifacts::ASYNC_POLL_STACKLESS_COMPONENT]).await } -pub mod cancel { +mod cancel { wasmtime::component::bindgen!({ path: "wit", world: "cancel-host", @@ -78,7 +246,7 @@ pub async fn async_trap_cancel_host_after_return() -> Result<()> { test_cancel_trap(Mode::TrapCancelHostAfterReturn).await } -fn cancel_delay() -> u64 { +fn delay_millis() -> u64 { // Miri-based builds are much slower to run, so we delay longer in that case // to ensure that async calls which the test expects to return `BLOCKED` // actually do so. @@ -131,7 +299,7 @@ async fn test_cancel(mode: Mode) -> Result<()> { .run_concurrent(&mut store, async move |accessor| { cancel_host .local_local_cancel() - .call_run(accessor, mode, cancel_delay()) + .call_run(accessor, mode, delay_millis()) .await }) .await??; @@ -362,59 +530,60 @@ async fn test_transmit_with(component: &str) -> Re let (test, instance) = Test::instantiate(&mut store, &component, &linker).await?; - enum Event<'a, Test: TransmitTest> { + enum Event { Result(Test::Result), - ControlWriteA(Option>>), - ControlWriteB(Option>>), - ControlWriteC(Option>>), + ControlWriteA(mpsc::Sender), + ControlWriteB(mpsc::Sender), + ControlWriteC(mpsc::Sender), ControlWriteD, WriteA, - WriteB(bool), - ReadC( - Option>>, - Option, - ), - ReadD(Option), - ReadNone(Option>>), + ReadC(mpsc::Receiver, Option), + ReadD(mpsc::Receiver, Option), + ReadNone(Option), } - let (control_tx, control_rx) = instance.stream(&mut store)?; - let (caller_stream_tx, caller_stream_rx) = instance.stream(&mut store)?; - let (caller_future1_tx, caller_future1_rx) = instance.future(&mut store, || unreachable!())?; - let (_caller_future2_tx, caller_future2_rx) = instance.future(&mut store, || unreachable!())?; - + let (mut control_tx, control_rx) = mpsc::channel(1); + let control_rx = StreamReader::new(instance, &mut store, PipeProducer::new(control_rx)); + let (mut caller_stream_tx, caller_stream_rx) = mpsc::channel(1); + let caller_stream_rx = + StreamReader::new(instance, &mut store, PipeProducer::new(caller_stream_rx)); + let (caller_future1_tx, caller_future1_rx) = oneshot::channel(); + let caller_future1_rx = FutureReader::new( + instance, + &mut store, + OneshotProducer::new(caller_future1_rx), + ); + let (_, caller_future2_rx) = oneshot::channel(); + let caller_future2_rx = FutureReader::new( + instance, + &mut store, + OneshotProducer::new(caller_future2_rx), + ); + let (callee_future1_tx, callee_future1_rx) = oneshot::channel(); + let (callee_stream_tx, callee_stream_rx) = mpsc::channel(1); instance - .run_concurrent(&mut store, async move |accessor| { - let mut control_tx = GuardedStreamWriter::new(accessor, control_tx); - let control_rx = GuardedStreamReader::new(accessor, control_rx); - let mut caller_stream_tx = GuardedStreamWriter::new(accessor, caller_stream_tx); - - let mut futures = FuturesUnordered::< - Pin>> + Send>>, - >::new(); + .run_concurrent(&mut store, async |accessor| { let mut caller_future1_tx = Some(caller_future1_tx); - let mut callee_stream_rx = None; - let mut callee_future1_rx = None; + let mut callee_future1_tx = Some(callee_future1_tx); + let mut callee_future1_rx = Some(callee_future1_rx); + let mut callee_stream_tx = Some(callee_stream_tx); + let mut callee_stream_rx = Some(callee_stream_rx); let mut complete = false; + let mut futures = FuturesUnordered::< + Pin>> + Send>>, + >::new(); futures.push( async move { - control_tx - .write_all(Some(Control::ReadStream("a".into()))) - .await; - let w = if control_tx.is_closed() { - None - } else { - Some(control_tx) - }; - Ok(Event::ControlWriteA(w)) + control_tx.send(Control::ReadStream("a".into())).await?; + Ok(Event::ControlWriteA(control_tx)) } .boxed(), ); futures.push( async move { - caller_stream_tx.write_all(Some(String::from("a"))).await; + caller_stream_tx.send(String::from("a")).await?; Ok(Event::WriteA) } .boxed(), @@ -425,7 +594,7 @@ async fn test_transmit_with(component: &str) -> Re accessor, &test, Test::into_params( - control_rx.into(), + control_rx, caller_stream_rx, caller_future1_rx, caller_future2_rx, @@ -438,92 +607,76 @@ async fn test_transmit_with(component: &str) -> Re while let Some(event) = futures.try_next().await? { match event { Event::Result(result) => { - let (stream_rx, future_rx, _) = accessor - .with(|mut store| Test::from_result(&mut store, instance, result))?; - callee_stream_rx = Some(GuardedStreamReader::new(accessor, stream_rx)); - callee_future1_rx = Some(GuardedFutureReader::new(accessor, future_rx)); + accessor.with(|mut store| { + let (callee_stream_rx, callee_future1_rx, _) = + Test::from_result(&mut store, instance, result)?; + callee_stream_rx.pipe( + &mut store, + PipeConsumer::new(callee_stream_tx.take().unwrap()), + ); + callee_future1_rx.pipe( + &mut store, + OneshotConsumer::new(callee_future1_tx.take().unwrap()), + ); + anyhow::Ok(()) + })?; } - Event::ControlWriteA(tx) => { + Event::ControlWriteA(mut control_tx) => { futures.push( async move { - let mut tx = tx.unwrap(); - tx.write_all(Some(Control::ReadFuture("b".into()))).await; - let w = if tx.is_closed() { None } else { Some(tx) }; - Ok(Event::ControlWriteB(w)) + control_tx.send(Control::ReadFuture("b".into())).await?; + Ok(Event::ControlWriteB(control_tx)) } .boxed(), ); } Event::WriteA => { - futures.push( - caller_future1_tx - .take() - .unwrap() - .write(accessor, "b".into()) - .map(Event::WriteB) - .map(Ok) - .boxed(), - ); - } - Event::ControlWriteB(tx) => { + _ = caller_future1_tx.take().unwrap().send("b".into()); + let mut callee_stream_rx = callee_stream_rx.take().unwrap(); futures.push( async move { - let mut tx = tx.unwrap(); - tx.write_all(Some(Control::WriteStream("c".into()))).await; - let w = if tx.is_closed() { None } else { Some(tx) }; - Ok(Event::ControlWriteC(w)) + let value = callee_stream_rx.next().await; + Ok(Event::ReadC(callee_stream_rx, value)) } .boxed(), ); } - Event::WriteB(delivered) => { - assert!(delivered); - let mut rx = callee_stream_rx.take().unwrap(); + Event::ControlWriteB(mut control_tx) => { futures.push( async move { - let b = rx.read(None).await; - let r = if rx.is_closed() { None } else { Some(rx) }; - Ok(Event::ReadC(r, b)) + control_tx.send(Control::WriteStream("c".into())).await?; + Ok(Event::ControlWriteC(control_tx)) } .boxed(), ); } - Event::ControlWriteC(tx) => { + Event::ControlWriteC(mut control_tx) => { futures.push( async move { - let mut tx = tx.unwrap(); - tx.write_all(Some(Control::WriteFuture("d".into()))).await; + control_tx.send(Control::WriteFuture("d".into())).await?; Ok(Event::ControlWriteD) } .boxed(), ); } - Event::ReadC(None, _) => unreachable!(), - Event::ReadC(Some(rx), mut value) => { + Event::ReadC(callee_stream_rx, mut value) => { assert_eq!(value.take().as_deref(), Some("c")); futures.push( callee_future1_rx .take() .unwrap() - .read() - .map(Event::ReadD) + .map(|v| Event::ReadD(callee_stream_rx, v.ok())) .map(Ok) .boxed(), ); - callee_stream_rx = Some(rx); } Event::ControlWriteD => {} - Event::ReadD(None) => unreachable!(), - Event::ReadD(Some(value)) => { + Event::ReadD(_, None) => unreachable!(), + Event::ReadD(mut callee_stream_rx, Some(value)) => { assert_eq!(&value, "d"); - let mut rx = callee_stream_rx.take().unwrap(); futures.push( - async move { - rx.read(None).await; - let r = if rx.is_closed() { None } else { Some(rx) }; - Ok(Event::ReadNone(r)) - } - .boxed(), + async move { Ok(Event::ReadNone(callee_stream_rx.next().await)) } + .boxed(), ); } Event::ReadNone(Some(_)) => unreachable!(), @@ -537,5 +690,6 @@ async fn test_transmit_with(component: &str) -> Re anyhow::Ok(()) }) - .await? + .await??; + Ok(()) } diff --git a/crates/misc/component-async-tests/tests/test_all.rs b/crates/misc/component-async-tests/tests/test_all.rs index 0553254d34a5..54ddbba3384d 100644 --- a/crates/misc/component-async-tests/tests/test_all.rs +++ b/crates/misc/component-async-tests/tests/test_all.rs @@ -30,7 +30,7 @@ use scenario::round_trip_many::{ use scenario::streams::async_closed_streams; use scenario::transmit::{ async_cancel_callee, async_cancel_caller, async_intertask_communication, async_poll_stackless, - async_poll_synchronous, async_transmit_callee, async_transmit_caller, + async_poll_synchronous, async_readiness, async_transmit_callee, async_transmit_caller, }; use scenario::unit_stream::{async_unit_stream_callee, async_unit_stream_caller}; use scenario::yield_::{ diff --git a/crates/misc/component-async-tests/wit/test.wit b/crates/misc/component-async-tests/wit/test.wit index f074e9bc3167..9a602834ec8a 100644 --- a/crates/misc/component-async-tests/wit/test.wit +++ b/crates/misc/component-async-tests/wit/test.wit @@ -162,6 +162,10 @@ interface intertask { foo: func(fut: future); } +interface readiness { + start: async func(s: stream, expected: list) -> tuple, list>; +} + world yield-caller { import continue; import ready; @@ -307,3 +311,7 @@ world intertask-communication { import intertask; export run; } + +world readiness-guest { + export readiness; +} diff --git a/crates/test-programs/src/bin/async_closed_streams.rs b/crates/test-programs/src/bin/async_closed_streams.rs index 459b542fb290..c794279043ce 100644 --- a/crates/test-programs/src/bin/async_closed_streams.rs +++ b/crates/test-programs/src/bin/async_closed_streams.rs @@ -11,6 +11,7 @@ mod bindings { use { bindings::exports::local::local::closed::Guest, + std::mem, wit_bindgen_rt::async_support::{self, FutureReader, StreamReader, StreamResult}, }; @@ -18,9 +19,15 @@ struct Component; impl Guest for Component { async fn read_stream(mut rx: StreamReader, expected: Vec) { - let (result, buf) = rx.read(Vec::with_capacity(expected.len())).await; - assert_eq!(result, StreamResult::Complete(expected.len())); - assert_eq!(buf, expected); + let mut buffer = Vec::with_capacity(expected.len()); + loop { + let (result, buf) = rx.read(mem::replace(&mut buffer, Vec::new())).await; + buffer = buf; + if !matches!(result, StreamResult::Complete(_)) { + break; + } + } + assert_eq!(buffer, expected); } async fn read_future(rx: FutureReader, expected: u8, _rx_ignored: FutureReader) { diff --git a/crates/test-programs/src/bin/async_poll_stackless.rs b/crates/test-programs/src/bin/async_poll_stackless.rs index 7ad3908a3e71..43a860772ee3 100644 --- a/crates/test-programs/src/bin/async_poll_stackless.rs +++ b/crates/test-programs/src/bin/async_poll_stackless.rs @@ -121,7 +121,7 @@ unsafe extern "C" fn callback_run(event0: u32, event1: u32, event2: u32) -> u32 assert_eq!(event0, EVENT_NONE); let set = *set; - assert!(async_when_ready() == STATUS_RETURNED); + assert_eq!(async_when_ready(), STATUS_RETURNED); *state = State::S5 { set }; diff --git a/crates/test-programs/src/bin/async_poll_synchronous.rs b/crates/test-programs/src/bin/async_poll_synchronous.rs index 962b5e7ba70c..f5450ae2a6ce 100644 --- a/crates/test-programs/src/bin/async_poll_synchronous.rs +++ b/crates/test-programs/src/bin/async_poll_synchronous.rs @@ -64,7 +64,7 @@ impl Guest for Component { assert_eq!(waitable_set_poll(set), (EVENT_NONE, 0, 0)); - assert!(async_when_ready() == STATUS_RETURNED); + assert_eq!(async_when_ready(), STATUS_RETURNED); assert_eq!(waitable_set_poll(set), (EVENT_NONE, 0, 0)); diff --git a/crates/test-programs/src/bin/async_readiness.rs b/crates/test-programs/src/bin/async_readiness.rs new file mode 100644 index 000000000000..23a74e858016 --- /dev/null +++ b/crates/test-programs/src/bin/async_readiness.rs @@ -0,0 +1,244 @@ +mod bindings { + wit_bindgen::generate!({ + path: "../misc/component-async-tests/wit", + world: "readiness-guest", + }); +} + +use { + std::{mem, ptr}, + test_programs::async_::{ + BLOCKED, CALLBACK_CODE_EXIT, CALLBACK_CODE_WAIT, DROPPED, EVENT_NONE, EVENT_STREAM_READ, + EVENT_STREAM_WRITE, context_get, context_set, waitable_join, waitable_set_drop, + waitable_set_new, + }, +}; + +#[cfg(target_arch = "wasm32")] +#[link(wasm_import_module = "[export]local:local/readiness")] +unsafe extern "C" { + #[link_name = "[task-return][async]start"] + fn task_return_start(_: u32, _: *const u8, _: usize); +} +#[cfg(not(target_arch = "wasm32"))] +unsafe extern "C" fn task_return_start(_: u32, _: *const u8, _: usize) { + unreachable!() +} + +#[cfg(target_arch = "wasm32")] +#[link(wasm_import_module = "[export]local:local/readiness")] +unsafe extern "C" { + #[link_name = "[stream-new-0][async]start"] + fn stream_new() -> u64; +} +#[cfg(not(target_arch = "wasm32"))] +unsafe extern "C" fn stream_new() -> u64 { + unreachable!() +} + +#[cfg(target_arch = "wasm32")] +#[link(wasm_import_module = "[export]local:local/readiness")] +unsafe extern "C" { + #[link_name = "[async-lower][stream-write-0][async]start"] + fn stream_write(_: u32, _: *const u8, _: usize) -> u32; +} +#[cfg(not(target_arch = "wasm32"))] +unsafe extern "C" fn stream_write(_: u32, _: *const u8, _: usize) -> u32 { + unreachable!() +} + +#[cfg(target_arch = "wasm32")] +#[link(wasm_import_module = "[export]local:local/readiness")] +unsafe extern "C" { + #[link_name = "[async-lower][stream-read-0][async]start"] + fn stream_read(_: u32, _: *mut u8, _: usize) -> u32; +} +#[cfg(not(target_arch = "wasm32"))] +unsafe extern "C" fn stream_read(_: u32, _: *mut u8, _: usize) -> u32 { + unreachable!() +} + +#[cfg(target_arch = "wasm32")] +#[link(wasm_import_module = "[export]local:local/readiness")] +unsafe extern "C" { + #[link_name = "[stream-drop-readable-0][async]start"] + fn stream_drop_readable(_: u32); +} +#[cfg(not(target_arch = "wasm32"))] +unsafe extern "C" fn stream_drop_readable(_: u32) { + unreachable!() +} + +#[cfg(target_arch = "wasm32")] +#[link(wasm_import_module = "[export]local:local/readiness")] +unsafe extern "C" { + #[link_name = "[stream-drop-writable-0][async]start"] + fn stream_drop_writable(_: u32); +} +#[cfg(not(target_arch = "wasm32"))] +unsafe extern "C" fn stream_drop_writable(_: u32) { + unreachable!() +} + +static BYTES_TO_WRITE: &[u8] = &[1, 3, 5, 7, 11]; + +enum State { + S0 { + rx: u32, + expected: Vec, + }, + S1 { + set: u32, + tx: Option, + rx: Option, + expected: Vec, + }, +} + +#[unsafe(export_name = "[async-lift]local:local/readiness#[async]start")] +unsafe extern "C" fn export_start(rx: u32, expected: u32, expected_len: u32) -> u32 { + let expected_len = usize::try_from(expected_len).unwrap(); + + unsafe { + context_set( + u32::try_from(Box::into_raw(Box::new(State::S0 { + rx, + expected: Vec::from_raw_parts( + expected as usize as *mut u8, + expected_len, + expected_len, + ), + })) as usize) + .unwrap(), + ); + + callback_start(EVENT_NONE, 0, 0) + } +} + +#[unsafe(export_name = "[callback][async-lift]local:local/readiness#[async]start")] +unsafe extern "C" fn callback_start(event0: u32, event1: u32, event2: u32) -> u32 { + unsafe { + let state = &mut *(usize::try_from(context_get()).unwrap() as *mut State); + match state { + State::S0 { rx, expected } => { + assert_eq!(event0, EVENT_NONE); + + // Do a zero-length read to wait until the writer is ready. + // + // Here we assume specific behavior from the writer, namely: + // + // - It is not immediately ready to send us anything. + // + // - When it _is_ ready, it will send us all the bytes it told us to + // expect at once. + let status = stream_read(*rx, ptr::null_mut(), 0); + assert_eq!(status, BLOCKED); + + let set = waitable_set_new(); + + waitable_join(*rx, set); + + let tx = { + let pair = stream_new(); + let tx = u32::try_from(pair >> 32).unwrap(); + let rx = u32::try_from(pair & 0xFFFFFFFF_u64).unwrap(); + + // Do a zero-length write to wait until the reader is ready. + // + // Here we assume specific behavior from the reader, namely: + // + // - It is not immediately ready to receive anything (indeed, it + // can't possibly be ready given that we haven't returned the + // read handle to it yet). + // + // - When it _is_ ready, it will accept all the bytes we told it + // to expect at once. + let status = stream_write(tx, ptr::null(), 0); + assert_eq!(status, BLOCKED); + + waitable_join(tx, set); + + task_return_start(rx, BYTES_TO_WRITE.as_ptr(), BYTES_TO_WRITE.len()); + + tx + }; + + *state = State::S1 { + set, + tx: Some(tx), + rx: Some(*rx), + expected: mem::take(expected), + }; + + CALLBACK_CODE_WAIT | (set << 4) + } + + State::S1 { + set, + tx, + rx, + expected, + } => { + if event0 == EVENT_STREAM_READ { + let rx = rx.take().unwrap(); + assert_eq!(event1, rx); + assert_eq!(event2, 0); + + // The writer is ready now, so this read should not block. + // + // As noted above, we we rely on the writer sending us all the + // expected bytes at once. + let received = &mut vec![0_u8; expected.len()]; + let status = stream_read(rx, received.as_mut_ptr(), received.len()); + assert_eq!( + status, + DROPPED | u32::try_from(received.len() << 4).unwrap() + ); + assert_eq!(received, expected); + + waitable_join(rx, 0); + stream_drop_readable(rx); + + if tx.is_none() { + waitable_set_drop(*set); + + CALLBACK_CODE_EXIT + } else { + CALLBACK_CODE_WAIT | (*set << 4) + } + } else if event0 == EVENT_STREAM_WRITE { + let tx = tx.take().unwrap(); + assert_eq!(event1, tx); + assert_eq!(event2, 0); + + // The reader is ready now, so this write should not block. + // + // As noted above, we we rely on the reader accepting all the + // expected bytes at once. + let status = stream_write(tx, BYTES_TO_WRITE.as_ptr(), BYTES_TO_WRITE.len()); + assert_eq!( + status, + DROPPED | u32::try_from(BYTES_TO_WRITE.len() << 4).unwrap() + ); + + waitable_join(tx, 0); + stream_drop_writable(tx); + + if rx.is_none() { + waitable_set_drop(*set); + + CALLBACK_CODE_EXIT + } else { + CALLBACK_CODE_WAIT | (*set << 4) + } + } else { + unreachable!() + } + } + } + } +} + +// Unused function; required since this file is built as a `bin`: +fn main() {} diff --git a/crates/test-programs/src/bin/p3_sockets_tcp_bind.rs b/crates/test-programs/src/bin/p3_sockets_tcp_bind.rs index 05628f58a174..8fec2e69c183 100644 --- a/crates/test-programs/src/bin/p3_sockets_tcp_bind.rs +++ b/crates/test-programs/src/bin/p3_sockets_tcp_bind.rs @@ -4,7 +4,6 @@ use test_programs::p3::wasi::sockets::types::{ ErrorCode, IpAddress, IpAddressFamily, IpSocketAddress, TcpSocket, }; use test_programs::p3::wit_stream; -use wit_bindgen::yield_blocking; struct Component; @@ -87,25 +86,10 @@ async fn test_tcp_bind_reuseaddr(ip: IpAddress) { // If SO_REUSEADDR was configured correctly, the following lines // shouldn't be affected by the TIME_WAIT state of the just closed - // `listener1` socket. - // - // Note though that the way things are modeled in Wasmtime right now is that - // the TCP socket is kept alive by a spawned task created in `listen` - // meaning that to fully close the socket it requires the spawned task to - // shut down. That may require yielding to the host or similar so try a few - // times to let the host get around to closing the task while testing each - // time to see if we can reuse the address. This loop is bounded because it - // should complete "quickly". - for _ in 0..10 { - let listener2 = TcpSocket::create(ip.family()).unwrap(); - if listener2.bind(bind_addr).is_ok() { - listener2.listen().unwrap(); - return; - } - yield_blocking(); - } - - panic!("looks like REUSEADDR isn't in use?"); + // `listener1` socket: + let listener2 = TcpSocket::create(ip.family()).unwrap(); + listener2.bind(bind_addr).unwrap(); + listener2.listen().unwrap(); } // Try binding to an address that is not configured on the system. diff --git a/crates/test-programs/src/bin/p3_sockets_tcp_sample_application.rs b/crates/test-programs/src/bin/p3_sockets_tcp_sample_application.rs index ab1ff48f1d9a..c2a3602144fe 100644 --- a/crates/test-programs/src/bin/p3_sockets_tcp_sample_application.rs +++ b/crates/test-programs/src/bin/p3_sockets_tcp_sample_application.rs @@ -44,10 +44,14 @@ async fn test_tcp_sample_application(family: IpAddressFamily, bind_address: IpSo let (mut data_rx, fut) = sock.receive(); let (result, data) = data_rx.read(Vec::with_capacity(100)).await; assert_eq!(result, StreamResult::Complete(first_message.len())); - // Check that we sent and received our message! assert_eq!(data, first_message); // Not guaranteed to work but should work in practice. - fut.await.unwrap() + + let (result, data) = data_rx.read(Vec::with_capacity(1)).await; + assert_eq!(result, StreamResult::Dropped); + assert_eq!(data, []); + + fut.await.unwrap(); }, ); @@ -73,10 +77,14 @@ async fn test_tcp_sample_application(family: IpAddressFamily, bind_address: IpSo let (mut data_rx, fut) = sock.receive(); let (result, data) = data_rx.read(Vec::with_capacity(100)).await; assert_eq!(result, StreamResult::Complete(second_message.len())); - // Check that we sent and received our message! assert_eq!(data, second_message); // Not guaranteed to work but should work in practice. - fut.await.unwrap() + + let (result, data) = data_rx.read(Vec::with_capacity(1)).await; + assert_eq!(result, StreamResult::Dropped); + assert_eq!(data, []); + + fut.await.unwrap(); } ); } diff --git a/crates/wasi/src/filesystem.rs b/crates/wasi/src/filesystem.rs index cf2dc7b24da9..8f8a281050eb 100644 --- a/crates/wasi/src/filesystem.rs +++ b/crates/wasi/src/filesystem.rs @@ -701,6 +701,12 @@ impl File { } } + /// Returns reference to the underlying [`cap_std::fs::File`] + #[cfg(feature = "p3")] + pub(crate) fn as_file(&self) -> &Arc { + &self.file + } + pub(crate) async fn advise( &self, offset: u64, @@ -746,7 +752,7 @@ pub struct Dir { /// oflags back out using fcntl. pub open_mode: OpenMode, - allow_blocking_current_thread: bool, + pub(crate) allow_blocking_current_thread: bool, } impl Dir { @@ -793,6 +799,12 @@ impl Dir { } } + /// Returns reference to the underlying [`cap_std::fs::Dir`] + #[cfg(feature = "p3")] + pub(crate) fn as_dir(&self) -> &Arc { + &self.dir + } + pub(crate) async fn create_directory_at(&self, path: String) -> Result<(), ErrorCode> { if !self.perms.contains(DirPerms::MUTATE) { return Err(ErrorCode::NotPermitted); diff --git a/crates/wasi/src/p3/cli/host.rs b/crates/wasi/src/p3/cli/host.rs index c86ef58ad615..3915f5754c4e 100644 --- a/crates/wasi/src/p3/cli/host.rs +++ b/crates/wasi/src/p3/cli/host.rs @@ -8,70 +8,110 @@ use crate::p3::bindings::cli::{ use crate::p3::cli::{TerminalInput, TerminalOutput}; use anyhow::{Context as _, anyhow}; use bytes::BytesMut; +use core::pin::Pin; +use core::task::{Context, Poll}; use std::io::Cursor; -use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use wasmtime::StoreContextMut; use wasmtime::component::{ - Accessor, AccessorTask, GuardedStreamReader, GuardedStreamWriter, HasData, Resource, - StreamReader, StreamWriter, + Accessor, Destination, Resource, Source, StreamConsumer, StreamProducer, StreamReader, + StreamResult, }; -struct InputTask { - rx: T, - tx: StreamWriter, +struct InputStreamProducer { + rx: Pin>, } -impl AccessorTask> for InputTask -where - U: HasData, - V: AsyncRead + Send + Sync + Unpin + 'static, -{ - async fn run(mut self, store: &Accessor) -> wasmtime::Result<()> { - let mut buf = BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY); - let mut tx = GuardedStreamWriter::new(store, self.tx); - while !tx.is_closed() { - match self.rx.read_buf(&mut buf).await { - Ok(0) => return Ok(()), - Ok(_) => { - buf = tx.write_all(Cursor::new(buf)).await.into_inner(); - buf.clear(); - } - Err(_err) => { - // TODO: Report the error to the guest - return Ok(()); +impl StreamProducer for InputStreamProducer { + type Item = u8; + type Buffer = Cursor; + + fn poll_produce<'a>( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut<'a, D>, + dst: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll> { + if let Some(mut dst) = dst.as_direct_destination(store) { + if !dst.remaining().is_empty() { + let mut buf = ReadBuf::new(dst.remaining()); + match self.rx.as_mut().poll_read(cx, &mut buf) { + Poll::Ready(Ok(())) if buf.filled().is_empty() => { + return Poll::Ready(Ok(StreamResult::Dropped)); + } + Poll::Ready(Ok(())) => { + let n = buf.filled().len(); + dst.mark_written(n); + return Poll::Ready(Ok(StreamResult::Completed)); + } + Poll::Ready(Err(..)) => { + // TODO: Report the error to the guest + return Poll::Ready(Ok(StreamResult::Dropped)); + } + Poll::Pending if finish => return Poll::Ready(Ok(StreamResult::Cancelled)), + Poll::Pending => return Poll::Pending, } } } - Ok(()) + let mut buf = dst.take_buffer().into_inner(); + buf.clear(); + buf.reserve(DEFAULT_BUFFER_CAPACITY); + let mut rbuf = ReadBuf::uninit(buf.spare_capacity_mut()); + match self.rx.as_mut().poll_read(cx, &mut rbuf) { + Poll::Ready(Ok(())) if rbuf.filled().is_empty() => { + Poll::Ready(Ok(StreamResult::Dropped)) + } + Poll::Ready(Ok(())) => { + let n = rbuf.filled().len(); + // SAFETY: `ReadyBuf::filled` promised us `count` bytes have + // been initialized. + unsafe { buf.set_len(n) }; + dst.set_buffer(Cursor::new(buf)); + Poll::Ready(Ok(StreamResult::Completed)) + } + Poll::Ready(Err(..)) => { + // TODO: Report the error to the guest + Poll::Ready(Ok(StreamResult::Dropped)) + } + Poll::Pending if finish => Poll::Ready(Ok(StreamResult::Cancelled)), + Poll::Pending => Poll::Pending, + } } } -struct OutputTask { - rx: StreamReader, - tx: T, +struct OutputStreamConsumer { + tx: Pin>, } -impl AccessorTask> for OutputTask -where - U: HasData, - V: AsyncWrite + Send + Sync + Unpin + 'static, -{ - async fn run(mut self, store: &Accessor) -> wasmtime::Result<()> { - let mut buf = BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY); - let mut rx = GuardedStreamReader::new(store, self.rx); - while !rx.is_closed() { - buf = rx.read(buf).await; - match self.tx.write_all(&buf).await { - Ok(()) => { - buf.clear(); - continue; - } - Err(_err) => { - // TODO: Report the error to the guest - return Ok(()); - } +impl StreamConsumer for OutputStreamConsumer { + type Item = u8; + + fn poll_consume( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut, + src: &mut Source, + finish: bool, + ) -> Poll> { + let mut src = src.as_direct_source(store); + let buf = src.remaining(); + match self.tx.as_mut().poll_write(cx, buf) { + Poll::Ready(Ok(n)) if buf.is_empty() => { + debug_assert_eq!(n, 0); + Poll::Ready(Ok(StreamResult::Completed)) } + Poll::Ready(Ok(n)) => { + src.mark_read(n); + Poll::Ready(Ok(StreamResult::Completed)) + } + Poll::Ready(Err(..)) => { + // TODO: Report the error to the guest + Poll::Ready(Ok(StreamResult::Dropped)) + } + Poll::Pending if finish => Poll::Ready(Ok(StreamResult::Cancelled)), + Poll::Pending => Poll::Pending, } - Ok(()) } } @@ -140,17 +180,16 @@ impl terminal_stderr::Host for WasiCliCtxView<'_> { impl stdin::HostWithStore for WasiCli { async fn get_stdin(store: &Accessor) -> wasmtime::Result> { - store.with(|mut view| { - let instance = view.instance(); - let (tx, rx) = instance - .stream(&mut view) - .context("failed to create stream")?; - let stdin = view.get().ctx.stdin.async_stream(); - view.spawn(InputTask { - rx: Box::into_pin(stdin), - tx, - }); - Ok(rx) + let instance = store.instance(); + store.with(|mut store| { + let rx = store.get().ctx.stdin.async_stream(); + Ok(StreamReader::new( + instance, + &mut store, + InputStreamProducer { + rx: Box::into_pin(rx), + }, + )) }) } } @@ -162,12 +201,14 @@ impl stdout::HostWithStore for WasiCli { store: &Accessor, data: StreamReader, ) -> wasmtime::Result<()> { - store.with(|mut view| { - let tx = view.get().ctx.stdout.async_stream(); - view.spawn(OutputTask { - rx: data, - tx: Box::into_pin(tx), - }); + store.with(|mut store| { + let tx = store.get().ctx.stdout.async_stream(); + data.pipe( + store, + OutputStreamConsumer { + tx: Box::into_pin(tx), + }, + ); Ok(()) }) } @@ -180,12 +221,14 @@ impl stderr::HostWithStore for WasiCli { store: &Accessor, data: StreamReader, ) -> wasmtime::Result<()> { - store.with(|mut view| { - let tx = view.get().ctx.stderr.async_stream(); - view.spawn(OutputTask { - rx: data, - tx: Box::into_pin(tx), - }); + store.with(|mut store| { + let tx = store.get().ctx.stderr.async_stream(); + data.pipe( + store, + OutputStreamConsumer { + tx: Box::into_pin(tx), + }, + ); Ok(()) }) } diff --git a/crates/wasi/src/p3/filesystem/host.rs b/crates/wasi/src/p3/filesystem/host.rs index 34e8304ec0d6..a0ffe530e61e 100644 --- a/crates/wasi/src/p3/filesystem/host.rs +++ b/crates/wasi/src/p3/filesystem/host.rs @@ -1,20 +1,28 @@ -use crate::DirPerms; use crate::filesystem::{Descriptor, Dir, File, WasiFilesystem, WasiFilesystemCtxView}; -use crate::p3::DEFAULT_BUFFER_CAPACITY; use crate::p3::bindings::clocks::wall_clock; use crate::p3::bindings::filesystem::types::{ self, Advice, DescriptorFlags, DescriptorStat, DescriptorType, DirectoryEntry, ErrorCode, Filesize, MetadataHashValue, NewTimestamp, OpenFlags, PathFlags, }; use crate::p3::filesystem::{FilesystemError, FilesystemResult, preopens}; -use crate::{FilePerms, TrappableError}; -use anyhow::Context as _; +use crate::p3::{ + DEFAULT_BUFFER_CAPACITY, FutureOneshotProducer, FutureReadyProducer, StreamEmptyProducer, +}; +use crate::{DirPerms, FilePerms}; +use anyhow::{Context as _, anyhow}; use bytes::BytesMut; +use core::mem; +use core::pin::Pin; +use core::task::{Context, Poll, ready}; use std::io::Cursor; +use std::sync::Arc; use system_interface::fs::FileIoExt as _; +use tokio::sync::{mpsc, oneshot}; +use tokio::task::{JoinHandle, spawn_blocking}; +use wasmtime::StoreContextMut; use wasmtime::component::{ - Accessor, AccessorTask, FutureReader, FutureWriter, GuardedFutureWriter, GuardedStreamWriter, - Resource, ResourceTable, StreamReader, StreamWriter, + Accessor, Destination, FutureReader, Resource, ResourceTable, Source, StreamConsumer, + StreamProducer, StreamReader, StreamResult, VecBuffer, }; fn get_descriptor<'a>( @@ -24,7 +32,7 @@ fn get_descriptor<'a>( table .get(fd) .context("failed to get descriptor resource from table") - .map_err(TrappableError::trap) + .map_err(FilesystemError::trap) } fn get_file<'a>( @@ -108,125 +116,568 @@ fn systemtimespec_from(t: NewTimestamp) -> Result, - result_tx: FutureWriter>, -} - -impl AccessorTask> for ReadFileTask { - async fn run(self, store: &Accessor) -> wasmtime::Result<()> { - let mut buf = BytesMut::zeroed(DEFAULT_BUFFER_CAPACITY); - let mut offset = self.offset; - let mut data_tx = GuardedStreamWriter::new(store, self.data_tx); - let result_tx = GuardedFutureWriter::new(store, self.result_tx); - let res = loop { - match self - .file - .run_blocking(move |file| { - let n = file.read_at(&mut buf, offset)?; - buf.truncate(n); - std::io::Result::Ok(buf) - }) - .await - { - Ok(chunk) if chunk.is_empty() => { - break Ok(()); + result: Option>>, + task: Option>>, +} + +impl Drop for ReadStreamProducer { + fn drop(&mut self) { + self.close(Ok(())) + } +} + +impl ReadStreamProducer { + fn close(&mut self, res: Result<(), ErrorCode>) { + if let Some(tx) = self.result.take() { + _ = tx.send(res); + } + } +} + +impl StreamProducer for ReadStreamProducer { + type Item = u8; + type Buffer = Cursor; + + fn poll_produce<'a>( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut<'a, D>, + dst: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll> { + if let Some(task) = self.task.as_mut() { + let res = ready!(Pin::new(task).poll(cx)); + self.task = None; + match res { + Ok(Ok(buf)) if buf.is_empty() => { + self.close(Ok(())); + return Poll::Ready(Ok(StreamResult::Dropped)); } - Ok(chunk) => { - let Ok(n) = chunk.len().try_into() else { - break Err(ErrorCode::Overflow); + Ok(Ok(buf)) => { + let n = buf.len(); + dst.set_buffer(Cursor::new(buf)); + let Ok(n) = n.try_into() else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); }; - let Some(n) = offset.checked_add(n) else { - break Err(ErrorCode::Overflow); + let Some(n) = self.offset.checked_add(n) else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); }; - offset = n; - buf = data_tx.write_all(Cursor::new(chunk)).await.into_inner(); - if data_tx.is_closed() { - break Ok(()); + self.offset = n; + return Poll::Ready(Ok(StreamResult::Completed)); + } + Ok(Err(err)) => { + self.close(Err(err.into())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } + Err(err) => { + return Poll::Ready(Err(anyhow!(err).context("failed to join I/O task"))); + } + } + } + if finish { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } + if let Some(file) = self.file.as_blocking_file() { + if let Some(mut dst) = dst.as_direct_destination(store) { + let buf = dst.remaining(); + if !buf.is_empty() { + match file.read_at(buf, self.offset) { + Ok(0) => { + self.close(Ok(())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } + Ok(n) => { + dst.mark_written(n); + let Ok(n) = n.try_into() else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + let Some(n) = self.offset.checked_add(n) else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + self.offset = n; + return Poll::Ready(Ok(StreamResult::Completed)); + } + Err(err) => { + self.close(Err(err.into())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } } - buf.resize(DEFAULT_BUFFER_CAPACITY, 0); + } + } + let mut buf = dst.take_buffer().into_inner(); + buf.resize(DEFAULT_BUFFER_CAPACITY, 0); + match file.read_at(&mut buf, self.offset) { + Ok(0) => { + self.close(Ok(())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } + Ok(n) => { + buf.truncate(n); + dst.set_buffer(Cursor::new(buf)); + let Ok(n) = n.try_into() else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + let Some(n) = self.offset.checked_add(n) else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + self.offset = n; + return Poll::Ready(Ok(StreamResult::Completed)); } Err(err) => { - break Err(err.into()); + self.close(Err(err.into())); + return Poll::Ready(Ok(StreamResult::Dropped)); } } + } + let mut buf = dst.take_buffer().into_inner(); + buf.resize(DEFAULT_BUFFER_CAPACITY, 0); + let file = Arc::clone(self.file.as_file()); + let offset = self.offset; + let mut task = spawn_blocking(move || { + file.read_at(&mut buf, offset).map(|n| { + buf.truncate(n); + buf + }) + }); + let res = match Pin::new(&mut task).poll(cx) { + Poll::Ready(res) => res, + Poll::Pending => { + self.task = Some(task); + return Poll::Pending; + } }; - drop(self.file); - drop(data_tx); - result_tx.write(res).await; - Ok(()) + match res { + Ok(Ok(buf)) if buf.is_empty() => { + self.close(Ok(())); + Poll::Ready(Ok(StreamResult::Dropped)) + } + Ok(Ok(buf)) => { + let n = buf.len(); + dst.set_buffer(Cursor::new(buf)); + let Ok(n) = n.try_into() else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + let Some(n) = self.offset.checked_add(n) else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + self.offset = n; + Poll::Ready(Ok(StreamResult::Completed)) + } + Ok(Err(err)) => { + self.close(Err(err.into())); + Poll::Ready(Ok(StreamResult::Dropped)) + } + Err(err) => Poll::Ready(Err(anyhow!(err).context("failed to join I/O task"))), + } } } -struct ReadDirectoryTask { - dir: Dir, - data_tx: StreamWriter, - result_tx: FutureWriter>, -} - -impl AccessorTask> for ReadDirectoryTask { - async fn run(self, store: &Accessor) -> wasmtime::Result<()> { - let mut data_tx = GuardedStreamWriter::new(store, self.data_tx); - let result_tx = GuardedFutureWriter::new(store, self.result_tx); - let res = loop { - let mut entries = match self.dir.run_blocking(cap_std::fs::Dir::entries).await { - Ok(entries) => entries, - Err(err) => break Err(err.into()), +fn map_dir_entry( + entry: std::io::Result, +) -> Result, ErrorCode> { + match entry { + Ok(entry) => { + let meta = entry.metadata()?; + let Ok(name) = entry.file_name().into_string() else { + return Err(ErrorCode::IllegalByteSequence); }; - if let Err(err) = loop { - let Some((res, tail)) = self - .dir - .run_blocking(move |_| entries.next().map(|entry| (entry, entries))) - .await - else { - break Ok(()); + Ok(Some(DirectoryEntry { + type_: meta.file_type().into(), + name, + })) + } + Err(err) => { + // On windows, filter out files like `C:\DumpStack.log.tmp` which we + // can't get full metadata for. + #[cfg(windows)] + { + use windows_sys::Win32::Foundation::{ + ERROR_ACCESS_DENIED, ERROR_SHARING_VIOLATION, }; - entries = tail; - let entry = match res { - Ok(entry) => entry, - Err(err) => { - // On windows, filter out files like `C:\DumpStack.log.tmp` which we - // can't get full metadata for. - #[cfg(windows)] - { - use windows_sys::Win32::Foundation::{ - ERROR_ACCESS_DENIED, ERROR_SHARING_VIOLATION, - }; - if err.raw_os_error() == Some(ERROR_SHARING_VIOLATION as i32) - || err.raw_os_error() == Some(ERROR_ACCESS_DENIED as i32) - { - continue; + if err.raw_os_error() == Some(ERROR_SHARING_VIOLATION as i32) + || err.raw_os_error() == Some(ERROR_ACCESS_DENIED as i32) + { + return Ok(None); + } + } + Err(err.into()) + } + } +} + +struct BlockingDirectoryStreamProducer { + dir: Arc, + result: Option>>, +} + +impl Drop for BlockingDirectoryStreamProducer { + fn drop(&mut self) { + self.close(Ok(())) + } +} + +impl BlockingDirectoryStreamProducer { + fn close(&mut self, res: Result<(), ErrorCode>) { + if let Some(tx) = self.result.take() { + _ = tx.send(res); + } + } +} + +impl StreamProducer for BlockingDirectoryStreamProducer { + type Item = DirectoryEntry; + type Buffer = VecBuffer; + + fn poll_produce<'a>( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + _: StoreContextMut<'a, D>, + dst: &'a mut Destination<'a, Self::Item, Self::Buffer>, + _finish: bool, + ) -> Poll> { + let entries = match self.dir.entries() { + Ok(entries) => entries, + Err(err) => { + self.close(Err(err.into())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } + }; + let res = match entries + .filter_map(|entry| map_dir_entry(entry).transpose()) + .collect::, _>>() + { + Ok(entries) => { + dst.set_buffer(entries.into()); + Ok(()) + } + Err(err) => Err(err), + }; + self.close(res); + Poll::Ready(Ok(StreamResult::Dropped)) + } +} + +struct NonblockingDirectoryStreamProducer(DirStreamState); + +enum DirStreamState { + Init { + dir: Arc, + result: oneshot::Sender>, + }, + InProgress { + rx: mpsc::Receiver, + task: JoinHandle>, + result: oneshot::Sender>, + }, + Closed, +} + +impl Drop for NonblockingDirectoryStreamProducer { + fn drop(&mut self) { + self.close(Ok(())) + } +} + +impl NonblockingDirectoryStreamProducer { + fn close(&mut self, res: Result<(), ErrorCode>) { + if let DirStreamState::Init { result, .. } | DirStreamState::InProgress { result, .. } = + mem::replace(&mut self.0, DirStreamState::Closed) + { + _ = result.send(res); + } + } +} + +impl StreamProducer for NonblockingDirectoryStreamProducer { + type Item = DirectoryEntry; + type Buffer = Option; + + fn poll_produce<'a>( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut<'a, D>, + dst: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll> { + match mem::replace(&mut self.0, DirStreamState::Closed) { + DirStreamState::Init { .. } if finish => Poll::Ready(Ok(StreamResult::Cancelled)), + DirStreamState::Init { dir, result } => { + let (entry_tx, entry_rx) = mpsc::channel(1); + let task = spawn_blocking(move || { + let entries = dir.entries()?; + for entry in entries { + if let Some(entry) = map_dir_entry(entry)? { + if let Err(_) = entry_tx.blocking_send(entry) { + break; } } - break Err(err.into()); } + Ok(()) + }); + self.0 = DirStreamState::InProgress { + rx: entry_rx, + task, + result, }; - let meta = match entry.metadata() { - Ok(meta) => meta, - Err(err) => break Err(err.into()), + self.poll_produce(cx, store, dst, finish) + } + DirStreamState::InProgress { + mut rx, + mut task, + result, + } => { + let Poll::Ready(res) = rx.poll_recv(cx) else { + self.0 = DirStreamState::InProgress { rx, task, result }; + if finish { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } + return Poll::Pending; }; - let Ok(name) = entry.file_name().into_string() else { - break Err(ErrorCode::IllegalByteSequence); + match res { + Some(entry) => { + self.0 = DirStreamState::InProgress { rx, task, result }; + dst.set_buffer(Some(entry)); + Poll::Ready(Ok(StreamResult::Completed)) + } + None => { + let res = ready!(Pin::new(&mut task).poll(cx)) + .context("failed to join I/O task")?; + self.0 = DirStreamState::InProgress { rx, task, result }; + self.close(res); + Poll::Ready(Ok(StreamResult::Dropped)) + } + } + } + DirStreamState::Closed => Poll::Ready(Ok(StreamResult::Dropped)), + } + } +} + +struct WriteStreamConsumer { + file: File, + offset: u64, + result: Option>>, + buffer: BytesMut, + task: Option>>, +} + +impl Drop for WriteStreamConsumer { + fn drop(&mut self) { + self.close(Ok(())) + } +} + +impl WriteStreamConsumer { + fn close(&mut self, res: Result<(), ErrorCode>) { + if let Some(tx) = self.result.take() { + _ = tx.send(res); + } + } +} + +impl StreamConsumer for WriteStreamConsumer { + type Item = u8; + + fn poll_consume( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut, + src: &mut Source, + finish: bool, + ) -> Poll> { + let mut src = src.as_direct_source(store); + if let Some(task) = self.task.as_mut() { + let res = ready!(Pin::new(task).poll(cx)); + self.task = None; + match res { + Ok(Ok((buf, n))) => { + src.mark_read(n); + self.buffer = buf; + self.buffer.clear(); + let Ok(n) = n.try_into() else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + let Some(n) = self.offset.checked_add(n) else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + self.offset = n; + return Poll::Ready(Ok(StreamResult::Completed)); + } + Ok(Err(err)) => { + self.close(Err(err.into())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } + Err(err) => { + return Poll::Ready(Err(anyhow!(err).context("failed to join I/O task"))); + } + } + } + if finish { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } + if let Some(file) = self.file.as_blocking_file() { + match file.write_at(src.remaining(), self.offset) { + Ok(n) => { + src.mark_read(n); + let Ok(n) = n.try_into() else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + let Some(n) = self.offset.checked_add(n) else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + self.offset = n; + return Poll::Ready(Ok(StreamResult::Completed)); + } + Err(err) => { + self.close(Err(err.into())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } + } + } + debug_assert!(self.buffer.is_empty()); + self.buffer.extend_from_slice(src.remaining()); + let buf = mem::take(&mut self.buffer); + let file = Arc::clone(self.file.as_file()); + let offset = self.offset; + let mut task = spawn_blocking(move || file.write_at(&buf, offset).map(|n| (buf, n))); + let res = match Pin::new(&mut task).poll(cx) { + Poll::Ready(res) => res, + Poll::Pending => { + self.task = Some(task); + return Poll::Pending; + } + }; + match res { + Ok(Ok((buf, n))) => { + src.mark_read(n); + self.buffer = buf; + self.buffer.clear(); + let Ok(n) = n.try_into() else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); }; - data_tx - .write(Some(DirectoryEntry { - type_: meta.file_type().into(), - name, - })) - .await; - if data_tx.is_closed() { - break Ok(()); + let Some(n) = self.offset.checked_add(n) else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + self.offset = n; + Poll::Ready(Ok(StreamResult::Completed)) + } + Ok(Err(err)) => { + self.close(Err(err.into())); + Poll::Ready(Ok(StreamResult::Dropped)) + } + Err(err) => Poll::Ready(Err(anyhow!(err).context("failed to join I/O task"))), + } + } +} + +struct AppendStreamConsumer { + file: File, + result: Option>>, + buffer: BytesMut, + task: Option>>, +} + +impl Drop for AppendStreamConsumer { + fn drop(&mut self) { + self.close(Ok(())) + } +} + +impl AppendStreamConsumer { + fn close(&mut self, res: Result<(), ErrorCode>) { + if let Some(tx) = self.result.take() { + _ = tx.send(res); + } + } +} + +impl StreamConsumer for AppendStreamConsumer { + type Item = u8; + + fn poll_consume( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut, + src: &mut Source, + finish: bool, + ) -> Poll> { + let mut src = src.as_direct_source(store); + if let Some(task) = self.task.as_mut() { + let res = ready!(Pin::new(task).poll(cx)); + self.task = None; + match res { + Ok(Ok((buf, n))) => { + src.mark_read(n); + self.buffer = buf; + self.buffer.clear(); + return Poll::Ready(Ok(StreamResult::Completed)); } - } { - break Err(err); - }; + Ok(Err(err)) => { + self.close(Err(err.into())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } + Err(err) => { + return Poll::Ready(Err(anyhow!(err).context("failed to join I/O task"))); + } + } + } + if finish { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } + if let Some(file) = self.file.as_blocking_file() { + match file.append(src.remaining()) { + Ok(n) => { + src.mark_read(n); + return Poll::Ready(Ok(StreamResult::Completed)); + } + Err(err) => { + self.close(Err(err.into())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } + } + } + debug_assert!(self.buffer.is_empty()); + self.buffer.extend_from_slice(src.remaining()); + let buf = mem::take(&mut self.buffer); + let file = Arc::clone(self.file.as_file()); + let mut task = spawn_blocking(move || file.append(&buf).map(|n| (buf, n))); + let res = match Pin::new(&mut task).poll(cx) { + Poll::Ready(res) => res, + Poll::Pending => { + self.task = Some(task); + return Poll::Pending; + } }; - drop(self.dir); - drop(data_tx); - result_tx.write(res).await; - Ok(()) + match res { + Ok(Ok((buf, n))) => { + src.mark_read(n); + self.buffer = buf; + self.buffer.clear(); + Poll::Ready(Ok(StreamResult::Completed)) + } + Ok(Err(err)) => { + self.close(Err(err.into())); + Poll::Ready(Ok(StreamResult::Dropped)) + } + Err(err) => Poll::Ready(Err(anyhow!(err).context("failed to join I/O task"))), + } } } @@ -242,87 +693,97 @@ impl types::HostDescriptorWithStore for WasiFilesystem { fd: Resource, offset: Filesize, ) -> wasmtime::Result<(StreamReader, FutureReader>)> { - let (file, (data_tx, data_rx), (result_tx, result_rx)) = store.with(|mut store| { - let file = get_file(store.get().table, &fd).cloned()?; - let instance = store.instance(); - let data = instance - .stream(&mut store) - .context("failed to create stream")?; - let result = if !file.perms.contains(FilePerms::READ) { - instance.future(&mut store, || Err(types::ErrorCode::NotPermitted)) - } else { - instance.future(&mut store, || unreachable!()) + let instance = store.instance(); + store.with(|mut store| { + let file = get_file(store.get().table, &fd)?; + if !file.perms.contains(FilePerms::READ) { + return Ok(( + StreamReader::new(instance, &mut store, StreamEmptyProducer::default()), + FutureReader::new( + instance, + &mut store, + FutureReadyProducer(Err(ErrorCode::NotPermitted)), + ), + )); } - .context("failed to create future")?; - anyhow::Ok((file, data, result)) - })?; - if !file.perms.contains(FilePerms::READ) { - return Ok((data_rx, result_rx)); - } - store.spawn(ReadFileTask { - file, - offset, - data_tx, - result_tx, - }); - Ok((data_rx, result_rx)) + + let file = file.clone(); + let (result_tx, result_rx) = oneshot::channel(); + Ok(( + StreamReader::new( + instance, + &mut store, + ReadStreamProducer { + file, + offset, + result: Some(result_tx), + task: None, + }, + ), + FutureReader::new(instance, &mut store, FutureOneshotProducer(result_rx)), + )) + }) } async fn write_via_stream( store: &Accessor, fd: Resource, - mut data: StreamReader, - mut offset: Filesize, + data: StreamReader, + offset: Filesize, ) -> FilesystemResult<()> { - let file = store.get_file(&fd)?; - if !file.perms.contains(FilePerms::WRITE) { - return Err(types::ErrorCode::NotPermitted.into()); - } - let mut buf = Vec::with_capacity(DEFAULT_BUFFER_CAPACITY); - while !data.is_closed() { - buf = data.read(store, buf).await; - buf = file - .spawn_blocking(move |file| { - let mut pos = 0; - while pos != buf.len() { - let n = file.write_at(&buf[pos..], offset)?; - pos = pos.saturating_add(n); - let n = n.try_into().or(Err(ErrorCode::Overflow))?; - offset = offset.checked_add(n).ok_or(ErrorCode::Overflow)?; - } - FilesystemResult::Ok(buf) - }) - .await?; - offset = offset.saturating_add(buf.len() as _); - buf.clear(); - } + let (result_tx, result_rx) = oneshot::channel(); + store.with(|mut store| { + let file = get_file(store.get().table, &fd)?; + if !file.perms.contains(FilePerms::WRITE) { + return Err(ErrorCode::NotPermitted.into()); + } + let file = file.clone(); + data.pipe( + store, + WriteStreamConsumer { + file, + offset, + result: Some(result_tx), + buffer: BytesMut::default(), + task: None, + }, + ); + FilesystemResult::Ok(()) + })?; + result_rx + .await + .context("oneshot sender dropped") + .map_err(FilesystemError::trap)??; Ok(()) } async fn append_via_stream( store: &Accessor, fd: Resource, - mut data: StreamReader, + data: StreamReader, ) -> FilesystemResult<()> { - let file = store.get_file(&fd)?; - if !file.perms.contains(FilePerms::WRITE) { - return Err(types::ErrorCode::NotPermitted.into()); - } - let mut buf = Vec::with_capacity(DEFAULT_BUFFER_CAPACITY); - while !data.is_closed() { - buf = data.read(store, buf).await; - buf = file - .spawn_blocking(move |file| { - let mut pos = 0; - while pos != buf.len() { - let n = file.append(&buf[pos..])?; - pos = pos.saturating_add(n); - } - FilesystemResult::Ok(buf) - }) - .await?; - buf.clear(); - } + let (result_tx, result_rx) = oneshot::channel(); + store.with(|mut store| { + let file = get_file(store.get().table, &fd)?; + if !file.perms.contains(FilePerms::WRITE) { + return Err(ErrorCode::NotPermitted.into()); + } + let file = file.clone(); + data.pipe( + store, + AppendStreamConsumer { + file, + result: Some(result_tx), + buffer: BytesMut::default(), + task: None, + }, + ); + FilesystemResult::Ok(()) + })?; + result_rx + .await + .context("oneshot sender dropped") + .map_err(FilesystemError::trap)??; Ok(()) } @@ -395,29 +856,46 @@ impl types::HostDescriptorWithStore for WasiFilesystem { StreamReader, FutureReader>, )> { - let (dir, (data_tx, data_rx), (result_tx, result_rx)) = store.with(|mut store| { - let dir = get_dir(store.get().table, &fd).cloned()?; - let instance = store.instance(); - let data = instance - .stream(&mut store) - .context("failed to create stream")?; - let result = if !dir.perms.contains(DirPerms::READ) { - instance.future(&mut store, || Err(types::ErrorCode::NotPermitted)) - } else { - instance.future(&mut store, || unreachable!()) + let instance = store.instance(); + store.with(|mut store| { + let dir = get_dir(store.get().table, &fd)?; + if !dir.perms.contains(DirPerms::READ) { + return Ok(( + StreamReader::new(instance, &mut store, StreamEmptyProducer::default()), + FutureReader::new( + instance, + &mut store, + FutureReadyProducer(Err(ErrorCode::NotPermitted)), + ), + )); } - .context("failed to create future")?; - anyhow::Ok((dir, data, result)) - })?; - if !dir.perms.contains(DirPerms::READ) { - return Ok((data_rx, result_rx)); - } - store.spawn(ReadDirectoryTask { - dir, - data_tx, - result_tx, - }); - Ok((data_rx, result_rx)) + let allow_blocking_current_thread = dir.allow_blocking_current_thread; + let dir = Arc::clone(dir.as_dir()); + let (result_tx, result_rx) = oneshot::channel(); + let stream = if allow_blocking_current_thread { + StreamReader::new( + instance, + &mut store, + BlockingDirectoryStreamProducer { + dir, + result: Some(result_tx), + }, + ) + } else { + StreamReader::new( + instance, + &mut store, + NonblockingDirectoryStreamProducer(DirStreamState::Init { + dir, + result: result_tx, + }), + ) + }; + Ok(( + stream, + FutureReader::new(instance, &mut store, FutureOneshotProducer(result_rx)), + )) + }) } async fn sync(store: &Accessor, fd: Resource) -> FilesystemResult<()> { diff --git a/crates/wasi/src/p3/mod.rs b/crates/wasi/src/p3/mod.rs index 029cf66f5653..c3cdf63117b9 100644 --- a/crates/wasi/src/p3/mod.rs +++ b/crates/wasi/src/p3/mod.rs @@ -17,11 +17,68 @@ pub mod sockets; use crate::WasiView; use crate::p3::bindings::LinkOptions; -use wasmtime::component::Linker; +use anyhow::Context as _; +use core::marker::PhantomData; +use core::pin::Pin; +use core::task::{Context, Poll}; +use tokio::sync::oneshot; +use wasmtime::StoreContextMut; +use wasmtime::component::{ + Accessor, Destination, FutureProducer, Linker, StreamProducer, StreamResult, +}; // Default buffer capacity to use for reads of byte-sized values. const DEFAULT_BUFFER_CAPACITY: usize = 8192; +struct StreamEmptyProducer(PhantomData T>); + +impl Default for StreamEmptyProducer { + fn default() -> Self { + Self(PhantomData) + } +} + +impl StreamProducer for StreamEmptyProducer { + type Item = T; + type Buffer = Option; + + fn poll_produce<'a>( + self: Pin<&mut Self>, + _: &mut Context<'_>, + _: StoreContextMut<'a, D>, + _: &'a mut Destination<'a, Self::Item, Self::Buffer>, + _: bool, + ) -> Poll> { + Poll::Ready(Ok(StreamResult::Dropped)) + } +} + +struct FutureReadyProducer(T); + +impl FutureProducer for FutureReadyProducer +where + T: Send + 'static, +{ + type Item = T; + + async fn produce(self, _: &Accessor) -> wasmtime::Result { + Ok(self.0) + } +} + +struct FutureOneshotProducer(oneshot::Receiver); + +impl FutureProducer for FutureOneshotProducer +where + T: Send + 'static, +{ + type Item = T; + + async fn produce(self, _: &Accessor) -> wasmtime::Result { + self.0.await.context("oneshot sender dropped") + } +} + /// Add all WASI interfaces from this module into the `linker` provided. /// /// This function will add all interfaces implemented by this module to the diff --git a/crates/wasi/src/p3/sockets/host/types/tcp.rs b/crates/wasi/src/p3/sockets/host/types/tcp.rs index 532424cf0963..0b755cde134a 100644 --- a/crates/wasi/src/p3/sockets/host/types/tcp.rs +++ b/crates/wasi/src/p3/sockets/host/types/tcp.rs @@ -1,25 +1,27 @@ use super::is_addr_allowed; -use crate::TrappableError; -use crate::p3::DEFAULT_BUFFER_CAPACITY; use crate::p3::bindings::sockets::types::{ Duration, ErrorCode, HostTcpSocket, HostTcpSocketWithStore, IpAddressFamily, IpSocketAddress, TcpSocket, }; -use crate::p3::sockets::{SocketResult, WasiSockets}; +use crate::p3::sockets::{SocketError, SocketResult, WasiSockets}; +use crate::p3::{ + DEFAULT_BUFFER_CAPACITY, FutureOneshotProducer, FutureReadyProducer, StreamEmptyProducer, +}; use crate::sockets::{NonInheritedOptions, SocketAddrUse, SocketAddressFamily, WasiSocketsCtxView}; -use anyhow::Context; +use anyhow::Context as _; use bytes::BytesMut; +use core::pin::Pin; +use core::task::{Context, Poll}; use io_lifetimes::AsSocketlike as _; -use std::future::poll_fn; use std::io::Cursor; use std::net::{Shutdown, SocketAddr}; -use std::pin::pin; use std::sync::Arc; -use std::task::Poll; use tokio::net::{TcpListener, TcpStream}; +use tokio::sync::oneshot; +use wasmtime::StoreContextMut; use wasmtime::component::{ - Accessor, AccessorTask, FutureReader, FutureWriter, GuardedFutureWriter, GuardedStreamWriter, - Resource, ResourceTable, StreamReader, StreamWriter, + Accessor, Destination, FutureReader, Resource, ResourceTable, Source, StreamConsumer, + StreamProducer, StreamReader, StreamResult, }; fn get_socket<'a>( @@ -29,7 +31,7 @@ fn get_socket<'a>( table .get(socket) .context("failed to get socket resource from table") - .map_err(TrappableError::trap) + .map_err(SocketError::trap) } fn get_socket_mut<'a>( @@ -39,106 +41,202 @@ fn get_socket_mut<'a>( table .get_mut(socket) .context("failed to get socket resource from table") - .map_err(TrappableError::trap) + .map_err(SocketError::trap) } -struct ListenTask { +struct ListenStreamProducer { listener: Arc, family: SocketAddressFamily, - tx: StreamWriter>, options: NonInheritedOptions, + getter: for<'a> fn(&'a mut T) -> WasiSocketsCtxView<'a>, } -impl AccessorTask> for ListenTask { - async fn run(self, store: &Accessor) -> wasmtime::Result<()> { - let mut tx = GuardedStreamWriter::new(store, self.tx); - while !tx.is_closed() { - let Some(res) = ({ - let mut accept = pin!(self.listener.accept()); - let mut tx = pin!(tx.watch_reader()); - poll_fn(|cx| match tx.as_mut().poll(cx) { - Poll::Ready(()) => return Poll::Ready(None), - Poll::Pending => accept.as_mut().poll(cx).map(Some), - }) - .await - }) else { - return Ok(()); - }; - let socket = TcpSocket::new_accept(res.map(|p| p.0), &self.options, self.family) - .unwrap_or_else(|err| TcpSocket::new_error(err, self.family)); - let socket = store.with(|mut view| { - view.get() - .table - .push(socket) - .context("failed to push socket resource to table") - })?; - if let Some(socket) = tx.write(Some(socket)).await { - debug_assert!(tx.is_closed()); - store.with(|mut view| { - view.get() - .table - .delete(socket) - .context("failed to delete socket resource from table") - })?; - return Ok(()); - } - } - Ok(()) +impl StreamProducer for ListenStreamProducer +where + D: 'static, +{ + type Item = Resource; + type Buffer = Option; + + fn poll_produce<'a>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + mut store: StoreContextMut<'a, D>, + dst: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll> { + let res = match self.listener.poll_accept(cx) { + Poll::Ready(res) => res.map(|(stream, _)| stream), + Poll::Pending if finish => return Poll::Ready(Ok(StreamResult::Cancelled)), + Poll::Pending => return Poll::Pending, + }; + let socket = TcpSocket::new_accept(res, &self.options, self.family) + .unwrap_or_else(|err| TcpSocket::new_error(err, self.family)); + let WasiSocketsCtxView { table, .. } = (self.getter)(store.data_mut()); + let socket = table + .push(socket) + .context("failed to push socket resource to table")?; + dst.set_buffer(Some(socket)); + Poll::Ready(Ok(StreamResult::Completed)) } } -struct ReceiveTask { +struct ReceiveStreamProducer { stream: Arc, - data_tx: StreamWriter, - result_tx: FutureWriter>, + result: Option>>, } -impl AccessorTask> for ReceiveTask { - async fn run(self, store: &Accessor) -> wasmtime::Result<()> { - let mut buf = BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY); - let mut data_tx = GuardedStreamWriter::new(store, self.data_tx); - let result_tx = GuardedFutureWriter::new(store, self.result_tx); - let res = loop { - match self.stream.try_read_buf(&mut buf) { - Ok(0) => { - break Ok(()); - } - Ok(..) => { - buf = data_tx.write_all(Cursor::new(buf)).await.into_inner(); - if data_tx.is_closed() { - break Ok(()); +impl Drop for ReceiveStreamProducer { + fn drop(&mut self) { + self.close(Ok(())) + } +} + +impl ReceiveStreamProducer { + fn close(&mut self, res: Result<(), ErrorCode>) { + if let Some(tx) = self.result.take() { + _ = self + .stream + .as_socketlike_view::() + .shutdown(Shutdown::Read); + _ = tx.send(res); + } + } +} + +impl StreamProducer for ReceiveStreamProducer { + type Item = u8; + type Buffer = Cursor; + + fn poll_produce<'a>( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut<'a, D>, + dst: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll> { + let res = 'result: { + if let Some(mut dst) = dst.as_direct_destination(store) { + let buf = dst.remaining(); + if !buf.is_empty() { + loop { + match self.stream.try_read(buf) { + Ok(0) => break 'result Ok(()), + Ok(n) => { + dst.mark_written(n); + return Poll::Ready(Ok(StreamResult::Completed)); + } + Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { + match self.stream.poll_read_ready(cx) { + Poll::Ready(Ok(())) => continue, + Poll::Ready(Err(err)) => break 'result Err(err.into()), + Poll::Pending if finish => { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } + Poll::Pending => return Poll::Pending, + } + } + Err(err) => break 'result Err(err.into()), + } } - buf.clear(); } - Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { - let Some(res) = ({ - let mut readable = pin!(self.stream.readable()); - let mut tx = pin!(data_tx.watch_reader()); - poll_fn(|cx| match tx.as_mut().poll(cx) { - Poll::Ready(()) => return Poll::Ready(None), - Poll::Pending => readable.as_mut().poll(cx).map(Some), - }) - .await - }) else { - break Ok(()); - }; - if let Err(err) = res { - break Err(err.into()); + } + + let mut buf = dst.take_buffer().into_inner(); + buf.clear(); + buf.reserve(DEFAULT_BUFFER_CAPACITY); + loop { + match self.stream.try_read_buf(&mut buf) { + Ok(0) => break 'result Ok(()), + Ok(..) => { + dst.set_buffer(Cursor::new(buf)); + return Poll::Ready(Ok(StreamResult::Completed)); + } + Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { + match self.stream.poll_read_ready(cx) { + Poll::Ready(Ok(())) => continue, + Poll::Ready(Err(err)) => break 'result Err(err.into()), + Poll::Pending if finish => { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } + Poll::Pending => return Poll::Pending, + } } + Err(err) => break 'result Err(err.into()), } - Err(err) => { - break Err(err.into()); + } + }; + self.close(res); + Poll::Ready(Ok(StreamResult::Dropped)) + } +} + +struct SendStreamConsumer { + stream: Arc, + result: Option>>, +} + +impl Drop for SendStreamConsumer { + fn drop(&mut self) { + self.close(Ok(())) + } +} + +impl SendStreamConsumer { + fn close(&mut self, res: Result<(), ErrorCode>) { + if let Some(tx) = self.result.take() { + _ = self + .stream + .as_socketlike_view::() + .shutdown(Shutdown::Write); + _ = tx.send(res); + } + } +} + +impl StreamConsumer for SendStreamConsumer { + type Item = u8; + + fn poll_consume( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut, + src: &mut Source, + finish: bool, + ) -> Poll> { + let mut src = src.as_direct_source(store); + let res = 'result: { + if src.remaining().is_empty() { + match self.stream.poll_write_ready(cx) { + Poll::Ready(Ok(())) => return Poll::Ready(Ok(StreamResult::Completed)), + Poll::Ready(Err(err)) => break 'result Err(err.into()), + Poll::Pending if finish => return Poll::Ready(Ok(StreamResult::Cancelled)), + Poll::Pending => return Poll::Pending, + } + } + loop { + match self.stream.try_write(src.remaining()) { + Ok(n) => { + debug_assert!(n > 0); + src.mark_read(n); + return Poll::Ready(Ok(StreamResult::Completed)); + } + Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { + match self.stream.poll_write_ready(cx) { + Poll::Ready(Ok(())) => continue, + Poll::Ready(Err(err)) => break 'result Err(err.into()), + Poll::Pending if finish => { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } + Poll::Pending => return Poll::Pending, + } + } + Err(err) => break 'result Err(err.into()), } } }; - _ = self - .stream - .as_socketlike_view::() - .shutdown(Shutdown::Read); - drop(self.stream); - drop(data_tx); - result_tx.write(res).await; - Ok(()) + self.close(res); + Poll::Ready(Ok(StreamResult::Dropped)) } } @@ -152,8 +250,8 @@ impl HostTcpSocketWithStore for WasiSockets { if !is_addr_allowed(store, local_address, SocketAddrUse::TcpBind).await { return Err(ErrorCode::AccessDenied.into()); } - store.with(|mut view| { - let socket = get_socket_mut(view.get().table, &socket)?; + store.with(|mut store| { + let socket = get_socket_mut(store.get().table, &socket)?; socket.start_bind(local_address)?; socket.finish_bind()?; Ok(()) @@ -169,16 +267,17 @@ impl HostTcpSocketWithStore for WasiSockets { if !is_addr_allowed(store, remote_address, SocketAddrUse::TcpConnect).await { return Err(ErrorCode::AccessDenied.into()); } - let sock = store.with(|mut view| -> SocketResult<_> { - let socket = get_socket_mut(view.get().table, &socket)?; - Ok(socket.start_connect(&remote_address)?) + let sock = store.with(|mut store| { + let socket = get_socket_mut(store.get().table, &socket)?; + let socket = socket.start_connect(&remote_address)?; + SocketResult::Ok(socket) })?; // FIXME: handle possible cancellation of the outer `connect` // https://github.com/bytecodealliance/wasmtime/pull/11291#discussion_r2223917986 let res = sock.connect(remote_address).await; - store.with(|mut view| -> SocketResult<_> { - let socket = get_socket_mut(view.get().table, &socket)?; + store.with(|mut store| { + let socket = get_socket_mut(store.get().table, &socket)?; socket.finish_connect(res)?; Ok(()) }) @@ -188,98 +287,85 @@ impl HostTcpSocketWithStore for WasiSockets { store: &Accessor, socket: Resource, ) -> SocketResult>> { - store.with(|mut view| { - let socket = get_socket_mut(view.get().table, &socket)?; + let instance = store.instance(); + let getter = store.getter(); + store.with(|mut store| { + let socket = get_socket_mut(store.get().table, &socket)?; socket.start_listen()?; socket.finish_listen()?; let listener = socket.tcp_listener_arc().unwrap().clone(); let family = socket.address_family(); let options = socket.non_inherited_options().clone(); - let (tx, rx) = view - .instance() - .stream(&mut view) - .context("failed to create stream") - .map_err(TrappableError::trap)?; - let task = ListenTask { - listener, - family, - tx, - options, - }; - view.spawn(task); - Ok(rx) + Ok(StreamReader::new( + instance, + &mut store, + ListenStreamProducer { + listener, + family, + options, + getter, + }, + )) }) } async fn send( store: &Accessor, socket: Resource, - mut data: StreamReader, + data: StreamReader, ) -> SocketResult<()> { - let stream = store.with(|mut view| -> SocketResult<_> { - let sock = get_socket(view.get().table, &socket)?; + let (result_tx, result_rx) = oneshot::channel(); + store.with(|mut store| { + let sock = get_socket(store.get().table, &socket)?; let stream = sock.tcp_stream_arc()?; - Ok(Arc::clone(stream)) + let stream = Arc::clone(stream); + data.pipe( + store, + SendStreamConsumer { + stream, + result: Some(result_tx), + }, + ); + SocketResult::Ok(()) })?; - let mut buf = Vec::with_capacity(DEFAULT_BUFFER_CAPACITY); - let mut result = Ok(()); - while !data.is_closed() { - buf = data.read(store, buf).await; - let mut slice = buf.as_slice(); - while !slice.is_empty() { - match stream.try_write(&slice) { - Ok(n) => slice = &slice[n..], - Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { - if let Err(err) = stream.writable().await { - result = Err(ErrorCode::from(err).into()); - break; - } - } - Err(err) => { - result = Err(ErrorCode::from(err).into()); - break; - } - } - } - buf.clear(); - } - _ = stream - .as_socketlike_view::() - .shutdown(Shutdown::Write); - result + result_rx + .await + .context("oneshot sender dropped") + .map_err(SocketError::trap)??; + Ok(()) } async fn receive( store: &Accessor, socket: Resource, ) -> wasmtime::Result<(StreamReader, FutureReader>)> { - store.with(|mut view| { - let instance = view.instance(); - let (mut data_tx, data_rx) = instance - .stream(&mut view) - .context("failed to create stream")?; - let socket = get_socket_mut(view.get().table, &socket)?; + let instance = store.instance(); + store.with(|mut store| { + let socket = get_socket_mut(store.get().table, &socket)?; match socket.start_receive() { Some(stream) => { - let stream = stream.clone(); - let (result_tx, result_rx) = instance - .future(&mut view, || unreachable!()) - .context("failed to create future")?; - view.spawn(ReceiveTask { - stream, - data_tx, - result_tx, - }); - Ok((data_rx, result_rx)) - } - None => { - let (mut result_tx, result_rx) = instance - .future(&mut view, || Err(ErrorCode::InvalidState)) - .context("failed to create future")?; - result_tx.close(&mut view); - data_tx.close(&mut view); - Ok((data_rx, result_rx)) + let stream = Arc::clone(stream); + let (result_tx, result_rx) = oneshot::channel(); + Ok(( + StreamReader::new( + instance, + &mut store, + ReceiveStreamProducer { + stream, + result: Some(result_tx), + }, + ), + FutureReader::new(instance, &mut store, FutureOneshotProducer(result_rx)), + )) } + None => Ok(( + StreamReader::new(instance, &mut store, StreamEmptyProducer::default()), + FutureReader::new( + instance, + &mut store, + FutureReadyProducer(Err(ErrorCode::InvalidState)), + ), + )), } }) } @@ -293,7 +379,7 @@ impl HostTcpSocket for WasiSocketsCtxView<'_> { .table .push(socket) .context("failed to push socket resource to table") - .map_err(TrappableError::trap)?; + .map_err(SocketError::trap)?; Ok(resource) } diff --git a/crates/wasmtime/src/runtime/component/concurrent.rs b/crates/wasmtime/src/runtime/component/concurrent.rs index 8cffc1c5be7e..867be30849e8 100644 --- a/crates/wasmtime/src/runtime/component/concurrent.rs +++ b/crates/wasmtime/src/runtime/component/concurrent.rs @@ -91,9 +91,9 @@ use wasmtime_environ::component::{ pub use abort::JoinHandle; pub use futures_and_streams::{ - ErrorContext, FutureReader, FutureWriter, GuardedFutureReader, GuardedFutureWriter, - GuardedStreamReader, GuardedStreamWriter, ReadBuffer, StreamReader, StreamWriter, VecBuffer, - WriteBuffer, + Destination, DirectDestination, DirectSource, ErrorContext, FutureConsumer, FutureProducer, + FutureReader, GuardedFutureReader, GuardedStreamReader, ReadBuffer, Source, StreamConsumer, + StreamProducer, StreamReader, StreamResult, VecBuffer, WriteBuffer, }; pub(crate) use futures_and_streams::{ ResourcePair, lower_error_context_to_index, lower_future_to_index, lower_stream_to_index, @@ -457,32 +457,32 @@ where }) } + /// Returns the getter this accessor is using to project from `T` into + /// `D::Data`. + pub fn getter(&self) -> fn(&mut T) -> D::Data<'_> { + self.get_data + } + /// Changes this accessor to access `D2` instead of the current type /// parameter `D`. /// /// This changes the underlying data access from `T` to `D2::Data<'_>`. /// - /// Note that this is not a public or recommended API because it's easy to - /// cause panics with this by having two `Accessor` values live at the same - /// time. The returned `Accessor` does not refer to this `Accessor` meaning - /// that both can be used. You could, for example, call `Accessor::with` - /// simultaneously on both. That would cause a panic though. - /// - /// In short while there's nothing unsafe about this it's a footgun. It's - /// here for bindings generation where the provided accessor is transformed - /// into a new accessor and then this returned accessor is passed to - /// implementations. + /// # Panics /// - /// Note that one possible fix for this would be a lifetime parameter on - /// `Accessor` itself so the returned value could borrow from the original - /// value (or this could be `self`-by-value instead of `&mut self`) but in - /// attempting that it was found to be a bit too onerous in terms of - /// plumbing things around without a whole lot of benefit. + /// When using this API the returned value is disconnected from `&self` and + /// the lifetime binding the `self` argument. An `Accessor` only works + /// within the context of the closure or async closure that it was + /// originally given to, however. This means that due to the fact that the + /// returned value has no lifetime connection it's possible to use the + /// accessor outside of `&self`, the original accessor, and panic. /// - /// In short, this works, but must be treated with care. The current main - /// user, bindings generation, treats this with care. - #[doc(hidden)] - pub fn with_data(&self, get_data: fn(&mut T) -> D2::Data<'_>) -> Accessor { + /// The returned value should only be used within the scope of the original + /// `Accessor` that `self` refers to. + pub fn with_getter( + &self, + get_data: fn(&mut T) -> D2::Data<'_>, + ) -> Accessor { Accessor { token: self.token, get_data, @@ -1127,12 +1127,9 @@ impl Instance { // Create an "abortable future" here where internally the future will // hook calls to poll and possibly spawn more background tasks on each // iteration. - let (handle, future) = - JoinHandle::run(async move { HostTaskOutput::Result(task.run(&accessor).await) }); + let (handle, future) = JoinHandle::run(async move { task.run(&accessor).await }); self.concurrent_state_mut(store.0) - .push_future(Box::pin(async move { - future.await.unwrap_or(HostTaskOutput::Result(Ok(()))) - })); + .push_future(Box::pin(async move { future.await.unwrap_or(Ok(())) })); handle } @@ -1175,22 +1172,8 @@ impl Instance { let next = match self.set_tls(store.0, || next.as_mut().poll(cx)) { Poll::Ready(Some(output)) => { match output { - HostTaskOutput::Result(Err(e)) => return Poll::Ready(Err(e)), - HostTaskOutput::Result(Ok(())) => {} - HostTaskOutput::Function(fun) => { - // Defer calling this function to a worker fiber - // in case it involves calling a guest realloc - // function as part of a lowering operation. - // - // TODO: This isn't necessary for _all_ - // `HostOutput::Function`s, so we could optimize - // by adding another variant to `HostOutput` to - // distinguish which ones need it and which - // don't. - self.concurrent_state_mut(store.0).push_high_priority( - WorkItem::WorkerFunction(AlwaysMut::new(fun)), - ) - } + Err(e) => return Poll::Ready(Err(e)), + Ok(()) => {} } Poll::Ready(true) } @@ -2360,32 +2343,8 @@ impl Instance { let task = state.push(HostTask::new(caller_instance, Some(join_handle)))?; log::trace!("new host task child of {caller:?}: {task:?}"); - let token = StoreToken::new(store.as_context_mut()); - - // Map the output of the future to a `HostTaskOutput` responsible for - // lowering the result into the guest's stack and memory, as well as - // notifying any waiters that the task returned. - let mut future = Box::pin(async move { - let result = match future.await { - Some(result) => result, - // Task was cancelled; nothing left to do. - None => return HostTaskOutput::Result(Ok(())), - }; - HostTaskOutput::Function(Box::new(move |store, instance| { - let mut store = token.as_context_mut(store); - lower(store.as_context_mut(), instance, result?)?; - let state = instance.concurrent_state_mut(store.0); - state.get_mut(task)?.join_handle.take(); - Waitable::Host(task).set_event( - state, - Some(Event::Subtask { - status: Status::Returned, - }), - )?; - Ok(()) - })) - }); + let mut future = Box::pin(future); // Finally, poll the future. We can use a dummy `Waker` here because // we'll add the future to `ConcurrentState::futures` and poll it @@ -2398,10 +2357,11 @@ impl Instance { }); Ok(match poll { - Poll::Ready(output) => { + Poll::Ready(None) => unreachable!(), + Poll::Ready(Some(result)) => { // It finished immediately; lower the result and delete the // task. - output.consume(store.0, self)?; + lower(store.as_context_mut(), self, result?)?; log::trace!("delete host task {task:?} (already ready)"); self.concurrent_state_mut(store.0).delete(task)?; None @@ -2410,6 +2370,39 @@ impl Instance { // It hasn't finished yet; add the future to // `ConcurrentState::futures` so it will be polled by the event // loop and allocate a waitable handle to return to the guest. + + // Wrap the future in a closure responsible for lowering the result into + // the guest's stack and memory, as well as notifying any waiters that + // the task returned. + let future = Box::pin(async move { + let result = match future.await { + Some(result) => result?, + // Task was cancelled; nothing left to do. + None => return Ok(()), + }; + tls::get(move |store| { + // Here we schedule a task to run on a worker fiber to do + // the lowering since it may involve a call to the guest's + // realloc function. This is necessary because calling the + // guest while there are host embedder frames on the stack + // is unsound. + self.concurrent_state_mut(store).push_high_priority( + WorkItem::WorkerFunction(AlwaysMut::new(Box::new(move |store, _| { + lower(token.as_context_mut(store), self, result)?; + let state = self.concurrent_state_mut(store); + state.get_mut(task)?.join_handle.take(); + Waitable::Host(task).set_event( + state, + Some(Event::Subtask { + status: Status::Returned, + }), + ) + }))), + ); + Ok(()) + }) + }); + self.concurrent_state_mut(store.0).push_future(future); let handle = self.id().get_mut(store.0).guest_tables().0[caller_instance] .subtask_insert_host(task.rep())?; @@ -2467,13 +2460,14 @@ impl Instance { log::trace!("new host task child of {caller:?}: {task:?}"); - // Map the output of the future to a `HostTaskOutput` which will take - // care of stashing the result in `GuestTask::result` and resuming this - // fiber when the host task completes. - let mut future = Box::pin(future.map(move |result| { - HostTaskOutput::Function(Box::new(move |store, instance| { - let state = instance.concurrent_state_mut(store); - state.get_mut(caller)?.result = Some(Box::new(result?) as _); + // Wrap the future in a closure which will take care of stashing the + // result in `GuestTask::result` and resuming this fiber when the host + // task completes. + let mut future = Box::pin(async move { + let result = future.await?; + tls::get(move |store| { + let state = self.concurrent_state_mut(store); + state.get_mut(caller)?.result = Some(Box::new(result) as _); Waitable::Host(task).set_event( state, @@ -2483,8 +2477,8 @@ impl Instance { )?; Ok(()) - })) - })) as HostTaskFuture; + }) + }) as HostTaskFuture; // Finally, poll the future. We can use a dummy `Waker` here because // we'll add the future to `ConcurrentState::futures` and poll it @@ -2497,17 +2491,16 @@ impl Instance { }); match poll { - Poll::Ready(output) => { - // It completed immediately; run the `HostTaskOutput` function - // to stash the result and delete the task. - output.consume(store, self)?; + Poll::Ready(result) => { + // It completed immediately; check the result and delete the task. + result?; log::trace!("delete host task {task:?} (already ready)"); self.concurrent_state_mut(store).delete(task)?; } Poll::Pending => { // It did not complete immediately; add it to // `ConcurrentState::futures` so it will be polled via the event - // loop, then use `GuestTask::sync_call_set` to wait for the + // loop; then use `GuestTask::sync_call_set` to wait for the // task to complete, suspending the current fiber until it does // so. let state = self.concurrent_state_mut(store); @@ -3413,27 +3406,7 @@ impl VMComponentAsyncStore for StoreInner { } } -/// Represents the output of a host task or background task. -pub(crate) enum HostTaskOutput { - /// A plain result - Result(Result<()>), - /// A function to be run after the future completes (e.g. post-processing - /// which requires access to the store and instance). - Function(Box Result<()> + Send>), -} - -impl HostTaskOutput { - /// Retrieve the result of the host or background task, running the - /// post-processing function if present. - fn consume(self, store: &mut dyn VMStore, instance: Instance) -> Result<()> { - match self { - Self::Function(fun) => fun(store, instance), - Self::Result(result) => result, - } - } -} - -type HostTaskFuture = Pin + Send + 'static>>; +type HostTaskFuture = Pin> + Send + 'static>>; /// Represents the state of a pending host task. struct HostTask { diff --git a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs index 3da9eedf4c05..9ae9dfc5e55f 100644 --- a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs +++ b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs @@ -1,6 +1,6 @@ use super::table::{TableDebug, TableId}; use super::{Event, GlobalErrorContextRefCount, Waitable, WaitableCommon}; -use crate::component::concurrent::{ConcurrentState, WorkItem}; +use crate::component::concurrent::{Accessor, ConcurrentState, WorkItem, tls}; use crate::component::func::{self, LiftContext, LowerContext, Options}; use crate::component::matching::InstanceType; use crate::component::values::{ErrorContextAny, FutureAny, StreamAny}; @@ -9,9 +9,10 @@ use crate::store::{StoreOpaque, StoreToken}; use crate::vm::component::{ComponentInstance, HandleTable, TransmitLocalState}; use crate::vm::{AlwaysMut, VMStore}; use crate::{AsContextMut, StoreContextMut, ValRaw}; -use anyhow::{Context, Result, anyhow, bail}; +use anyhow::{Context as _, Result, anyhow, bail}; use buffers::Extender; use buffers::UntypedWriteBuffer; +use futures::FutureExt; use futures::channel::oneshot; use std::boxed::Box; use std::fmt; @@ -21,8 +22,8 @@ use std::marker::PhantomData; use std::mem::{self, MaybeUninit}; use std::pin::Pin; use std::string::{String, ToString}; -use std::sync::Arc; -use std::task::{Poll, Waker}; +use std::sync::{Arc, Mutex}; +use std::task::{Context, Poll, Waker}; use std::vec::Vec; use wasmtime_environ::component::{ CanonicalAbiInfo, ComponentTypes, InterfaceType, OptionsIndex, @@ -108,22 +109,6 @@ impl TransmitIndex { } } -/// Action to take after writing -enum PostWrite { - /// Continue performing writes - Continue, - /// Drop the channel post-write - Drop, -} - -/// Represents the result of a host-initiated stream or future read or write. -struct HostResult { - /// The buffer provided when reading or writing. - buffer: B, - /// Whether the other end of the stream or future has been dropped. - dropped: bool, -} - /// Retrieve the payload type of the specified stream or future, or `None` if it /// has no payload type. fn payload(ty: TransmitIndex, types: &Arc) -> Option { @@ -146,198 +131,71 @@ fn get_mut_by_index_from( } } -/// Complete a write initiated by a host-owned future or stream by matching it -/// with the specified `Reader`. -fn accept_reader, U: 'static>( +fn lower, U: 'static>( mut store: StoreContextMut, instance: Instance, - reader: Reader, - mut buffer: B, - kind: TransmitKind, -) -> Result<(HostResult, ReturnCode)> { - Ok(match reader { - Reader::Guest { - options, - ty, - address, - count, - } => { - let types = instance.id().get(store.0).component().types().clone(); - let count = buffer.remaining().len().min(count); - - let lower = &mut if T::MAY_REQUIRE_REALLOC { - LowerContext::new - } else { - LowerContext::new_without_realloc - }(store.as_context_mut(), options, &types, instance); - - if address % usize::try_from(T::ALIGN32)? != 0 { - bail!("read pointer not aligned"); - } - lower - .as_slice_mut() - .get_mut(address..) - .and_then(|b| b.get_mut(..T::SIZE32 * count)) - .ok_or_else(|| anyhow::anyhow!("read pointer out of bounds of memory"))?; - - if let Some(ty) = payload(ty, &types) { - T::linear_store_list_to_memory(lower, ty, address, &buffer.remaining()[..count])?; - } - - buffer.skip(count); - ( - HostResult { - buffer, - dropped: false, - }, - ReturnCode::completed(kind, count.try_into().unwrap()), - ) - } - Reader::Host { accept } => { - let count = buffer.remaining().len(); - let mut untyped = UntypedWriteBuffer::new(&mut buffer); - let count = accept(&mut untyped, count); - ( - HostResult { - buffer, - dropped: false, - }, - ReturnCode::completed(kind, count.try_into().unwrap()), - ) + options: &Options, + ty: TransmitIndex, + address: usize, + count: usize, + buffer: &mut B, +) -> Result<()> { + let types = instance.id().get(store.0).component().types().clone(); + let count = buffer.remaining().len().min(count); + + let lower = &mut if T::MAY_REQUIRE_REALLOC { + LowerContext::new + } else { + LowerContext::new_without_realloc + }(store.as_context_mut(), options, &types, instance); + + if address % usize::try_from(T::ALIGN32)? != 0 { + bail!("read pointer not aligned"); + } + lower + .as_slice_mut() + .get_mut(address..) + .and_then(|b| b.get_mut(..T::SIZE32 * count)) + .ok_or_else(|| anyhow::anyhow!("read pointer out of bounds of memory"))?; + + if let Some(ty) = payload(ty, &types) { + T::linear_store_list_to_memory(lower, ty, address, &buffer.remaining()[..count])?; + } + + buffer.skip(count); + + Ok(()) +} + +fn lift, U>( + lift: &mut LiftContext<'_>, + ty: Option, + buffer: &mut B, + address: usize, + count: usize, +) -> Result<()> { + let count = count.min(buffer.remaining_capacity()); + if T::IS_RUST_UNIT_TYPE { + // SAFETY: `T::IS_RUST_UNIT_TYPE` is only true for `()`, a + // zero-sized type, so `MaybeUninit::uninit().assume_init()` + // is a valid way to populate the zero-sized buffer. + buffer.extend( + iter::repeat_with(|| unsafe { MaybeUninit::uninit().assume_init() }).take(count), + ) + } else { + let ty = ty.unwrap(); + if address % usize::try_from(T::ALIGN32)? != 0 { + bail!("write pointer not aligned"); } - Reader::End => ( - HostResult { - buffer, - dropped: true, - }, - ReturnCode::Dropped(0), - ), - }) -} - -/// Complete a read initiated by a host-owned future or stream by matching it with the -/// specified `Writer`. -fn accept_writer, U>( - writer: Writer, - mut buffer: B, - kind: TransmitKind, -) -> Result<(HostResult, ReturnCode)> { - Ok(match writer { - Writer::Guest { - lift, - ty, - address, - count, - } => { - let count = count.min(buffer.remaining_capacity()); - if T::IS_RUST_UNIT_TYPE { - // SAFETY: `T::IS_RUST_UNIT_TYPE` is only true for `()`, a - // zero-sized type, so `MaybeUninit::uninit().assume_init()` - // is a valid way to populate the zero-sized buffer. - buffer.extend( - iter::repeat_with(|| unsafe { MaybeUninit::uninit().assume_init() }) - .take(count), - ) - } else { - let ty = ty.unwrap(); - if address % usize::try_from(T::ALIGN32)? != 0 { - bail!("write pointer not aligned"); - } - lift.memory() - .get(address..) - .and_then(|b| b.get(..T::SIZE32 * count)) - .ok_or_else(|| anyhow::anyhow!("write pointer out of bounds of memory"))?; + lift.memory() + .get(address..) + .and_then(|b| b.get(..T::SIZE32 * count)) + .ok_or_else(|| anyhow::anyhow!("write pointer out of bounds of memory"))?; - let list = &WasmList::new(address, count, lift, ty)?; - T::linear_lift_into_from_memory(lift, list, &mut Extender(&mut buffer))? - } - ( - HostResult { - buffer, - dropped: false, - }, - ReturnCode::completed(kind, count.try_into().unwrap()), - ) - } - Writer::Host { - buffer: input, - count, - } => { - let count = count.min(buffer.remaining_capacity()); - buffer.move_from(input.get_mut::(), count); - ( - HostResult { - buffer, - dropped: false, - }, - ReturnCode::completed(kind, count.try_into().unwrap()), - ) - } - Writer::End => ( - HostResult { - buffer, - dropped: true, - }, - ReturnCode::Dropped(0), - ), - }) -} - -/// Return a `Future` which will resolve once the reader end corresponding to -/// the specified writer end of a future or stream is dropped. -async fn watch_reader(accessor: impl AsAccessor, instance: Instance, id: TableId) { - future::poll_fn(|cx| { - accessor - .as_accessor() - .with(|mut access| { - let concurrent_state = instance.concurrent_state_mut(access.as_context_mut().0); - let state_id = concurrent_state.get_mut(id)?.state; - let state = concurrent_state.get_mut(state_id)?; - anyhow::Ok(if matches!(&state.read, ReadState::Dropped) { - Poll::Ready(()) - } else { - state.reader_watcher = Some(cx.waker().clone()); - Poll::Pending - }) - }) - .unwrap_or(Poll::Ready(())) - }) - .await -} - -/// Return a `Future` which will resolve once the writer end corresponding to -/// the specified reader end of a future or stream is dropped. -async fn watch_writer(accessor: impl AsAccessor, instance: Instance, id: TableId) { - future::poll_fn(|cx| { - accessor - .as_accessor() - .with(|mut access| { - let concurrent_state = instance.concurrent_state_mut(access.as_context_mut().0); - let state_id = concurrent_state.get_mut(id)?.state; - let state = concurrent_state.get_mut(state_id)?; - anyhow::Ok( - if matches!( - &state.write, - WriteState::Dropped - | WriteState::GuestReady { - post_write: PostWrite::Drop, - .. - } - | WriteState::HostReady { - post_write: PostWrite::Drop, - .. - } - ) { - Poll::Ready(()) - } else { - state.writer_watcher = Some(cx.waker().clone()); - Poll::Pending - }, - ) - }) - .unwrap_or(Poll::Ready(())) - }) - .await + let list = &WasmList::new(address, count, lift, ty)?; + T::linear_lift_into_from_memory(lift, list, &mut Extender(buffer))? + } + Ok(()) } /// Represents the state associated with an error context @@ -355,200 +213,549 @@ pub(super) struct FlatAbi { pub(super) align: u32, } -/// Represents the writable end of a Component Model `future`. -/// -/// Note that `FutureWriter` instances must be disposed of using either `write` -/// or `close`; otherwise the in-store representation will leak and the reader -/// end will hang indefinitely. Consider using [`GuardedFutureWriter`] to -/// ensure that disposal happens automatically. -pub struct FutureWriter { - default: fn() -> T, - id: TableId, +/// Represents the buffer for a host- or guest-initiated stream read. +pub struct Destination<'a, T, B> { instance: Instance, + id: TableId, + buffer: &'a mut B, + _phantom: PhantomData T>, } -impl FutureWriter { - fn new(default: fn() -> T, id: TableId, instance: Instance) -> Self { - Self { - default, - id, - instance, - } +impl<'a, T, B> Destination<'a, T, B> { + /// Take the buffer out of `self`, leaving a default-initialized one in its + /// place. + /// + /// This can be useful for reusing the previously-stored buffer's capacity + /// instead of allocating a fresh one. + pub fn take_buffer(&mut self) -> B + where + B: Default, + { + mem::take(self.buffer) } - /// Write the specified value to this `future`. + /// Store the specified buffer in `self`. /// - /// The returned `Future` will yield `true` if the read end accepted the - /// value; otherwise it will return `false`, meaning the read end was dropped - /// before the value could be delivered. + /// Any items contained in the buffer will be delivered to the reader after + /// the `StreamProducer::poll_produce` call to which this `Destination` was + /// passed returns (unless overwritten by another call to `set_buffer`). /// - /// # Panics + /// If items are stored via a buffer _and_ written via a `DirectDestination` + /// view of `self`, then the items in the buffer will be delivered after the + /// ones written using `DirectDestination`. + pub fn set_buffer(&mut self, buffer: B) { + *self.buffer = buffer; + } + + /// Return the remaining number of items the current read has capacity to + /// accept, if known. /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn write(self, accessor: impl AsAccessor, value: T) -> bool - where - T: func::Lower + Send + Sync + 'static, - { - self.guard(accessor).write(value).await + /// This will return `Some(_)` if the reader is a guest; it will return + /// `None` if the reader is the host. + /// + /// Note that, if this returns `None(0)`, the producer must still attempt to + /// produce at least one item if the value of `finish` passed to + /// `StreamProducer::poll_produce` is false. In that case, the reader is + /// effectively asking when the producer will be able to produce items + /// without blocking (or reach a terminal state such as end-of-stream), + /// meaning the next non-zero read must complete without blocking. + pub fn remaining(&self, mut store: impl AsContextMut) -> Option { + let transmit = self + .instance + .concurrent_state_mut(store.as_context_mut().0) + .get_mut(self.id) + .unwrap(); + + if let &ReadState::GuestReady { count, .. } = &transmit.read { + let &WriteState::HostReady { guest_offset, .. } = &transmit.write else { + unreachable!() + }; + + Some(count - guest_offset) + } else { + None + } } +} - /// Mut-ref signature instead of by-value signature for - /// `GuardedFutureWriter` to more easily call. - async fn write_(&mut self, accessor: impl AsAccessor, value: T) -> bool - where - T: func::Lower + Send + Sync + 'static, - { - let accessor = accessor.as_accessor(); +impl<'a, B> Destination<'a, u8, B> { + /// Return a `DirectDestination` view of `self` if the guest is reading. + pub fn as_direct_destination( + &mut self, + store: StoreContextMut<'a, D>, + ) -> Option> { + if let ReadState::GuestReady { .. } = self + .instance + .concurrent_state_mut(store.0) + .get_mut(self.id) + .unwrap() + .read + { + Some(DirectDestination { + instance: self.instance, + id: self.id, + store, + }) + } else { + None + } + } +} - let result = self +/// Represents a read from a `stream`, providing direct access to the +/// writer's buffer. +pub struct DirectDestination<'a, D: 'static> { + instance: Instance, + id: TableId, + store: StoreContextMut<'a, D>, +} + +impl DirectDestination<'_, D> { + /// Provide direct access to the writer's buffer. + pub fn remaining(&mut self) -> &mut [u8] { + let transmit = self .instance - .host_write_async(accessor, self.id, Some(value), TransmitKind::Future) - .await; + .concurrent_state_mut(self.store.as_context_mut().0) + .get_mut(self.id) + .unwrap(); + + let &ReadState::GuestReady { + address, + count, + options, + .. + } = &transmit.read + else { + unreachable!() + }; + + let &WriteState::HostReady { guest_offset, .. } = &transmit.write else { + unreachable!() + }; + + options + .memory_mut(self.store.0) + .get_mut((address + guest_offset)..) + .and_then(|b| b.get_mut(..(count - guest_offset))) + .unwrap() + } - match result { - Ok(HostResult { dropped, .. }) => !dropped, - Err(_) => todo!("guarantee buffer recovery if `host_write` fails"), + /// Mark the specified number of bytes as written to the writer's buffer. + /// + /// This will panic if the count is larger than the size of the + /// buffer returned by `Self::remaining`. + pub fn mark_written(&mut self, count: usize) { + let transmit = self + .instance + .concurrent_state_mut(self.store.as_context_mut().0) + .get_mut(self.id) + .unwrap(); + + let ReadState::GuestReady { + count: read_count, .. + } = &transmit.read + else { + unreachable!() + }; + + let WriteState::HostReady { guest_offset, .. } = &mut transmit.write else { + unreachable!() + }; + + if *guest_offset + count > *read_count { + panic!("write count ({count}) must be less than or equal to read count ({read_count})") + } else { + *guest_offset += count; } } +} + +/// Represents the state of a `Stream{Producer,Consumer}`. +#[derive(Copy, Clone, Debug)] +pub enum StreamResult { + /// The operation completed normally, and the producer or consumer may be + /// able to produce or consume more items, respectively. + Completed, + /// The operation was interrupted (i.e. it wrapped up early after receiving + /// a `finish` parameter value of true in a call to `poll_produce` or + /// `poll_consume`), and the producer or consumer may be able to produce or + /// consume more items, respectively. + Cancelled, + /// The operation completed normally, but the producer or consumer will + /// _not_ able to produce or consume more items, respectively. + Dropped, +} + +/// Represents the host-owned write end of a stream. +pub trait StreamProducer: Send + 'static { + /// The payload type of this stream. + type Item; - /// Wait for the read end of this `future` is dropped. + /// The `WriteBuffer` type to use when delivering items. + type Buffer: WriteBuffer + Default; + + /// Handle a host- or guest-initiated read by delivering zero or more items + /// to the specified destination. /// - /// The [`Accessor`] provided can be acquired from [`Instance::run_concurrent`] or - /// from within a host function for example. + /// This will be called whenever the reader starts a read. /// - /// # Panics + /// If the implementation is able to produce one or more items immediately, + /// it should write them to `destination` and return either + /// `Poll::Ready(Ok(StreamResult::Completed))` if it expects to produce more + /// items, or `Poll::Ready(Ok(StreamResult::Dropped))` if it cannot produce + /// any more items. /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn watch_reader(&mut self, accessor: impl AsAccessor) { - watch_reader(accessor, self.instance, self.id).await - } - - /// Close this `FutureWriter`, writing the default value. + /// If the implementation is unable to produce any items immediately, but + /// expects to do so later, and `finish` is _false_, it should store the + /// waker from `cx` for later and return `Poll::Pending` without writing + /// anything to `destination`. Later, it should alert the waker when either + /// the items arrive, the stream has ended, or an error occurs. /// - /// # Panics + /// If the implementation is unable to produce any items immediately, but + /// expects to do so later, and `finish` is _true_, it should, if possible, + /// return `Poll::Ready(Ok(StreamResult::Cancelled))` immediately without + /// writing anything to `destination`. However, that might not be possible + /// if an earlier call to `poll_produce` kicked off an asynchronous + /// operation which needs to be completed (and possibly interrupted) + /// gracefully, in which case the implementation may return `Poll::Pending` + /// and later alert the waker as described above. In other words, when + /// `finish` is true, the implementation should prioritize returning a + /// result to the reader (even if no items can be produced) rather than wait + /// indefinitely for at least one item to arrive. /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. Usage of this future after calling `close` will also cause - /// a panic. - pub fn close(&mut self, mut store: impl AsContextMut) + /// In all of the above cases, the implementation may alternatively choose + /// to return `Err(_)` to indicate an unrecoverable error. This will cause + /// the guest (if any) to trap and render the component instance (if any) + /// unusable. The implementation should report errors that _are_ + /// recoverable by other means (e.g. by writing to a `future`) and return + /// `Poll::Ready(Ok(StreamResult::Dropped))`. + /// + /// Note that the implementation should never return `Poll::Pending` after + /// writing one or more items to `destination`; if it does, the caller will + /// trap as if `Err(_)` was returned. Conversely, it should only return + /// `Poll::Ready(Ok(StreamResult::Cancelled))` without writing any items to + /// `destination` if called with `finish` set to true. If it does so when + /// `finish` is false, the caller will trap. Additionally, it should only + /// return `Poll::Ready(Ok(StreamResult::Completed))` after writing at least + /// one item to `destination` if it has capacity to accept that item; + /// otherwise, the caller will trap. + /// + /// If more items are written to `destination` than the reader has immediate + /// capacity to accept, they will be retained in memory by the caller and + /// used to satisify future reads, in which case `poll_produce` will only be + /// called again once all those items have been delivered. This is + /// particularly important for zero-length reads, in which case the + /// implementation is expected to either: + /// + /// 1. Produce at least one item (if possible, and if `finish` is false) so + /// that it is ready to be delivered immediately upon the next + /// non-zero-length read. + /// + /// 2. Produce at least one item the next time `poll_produce` is called with + /// non-zero capacity and `finish` set to false. + fn poll_produce<'a>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut<'a, D>, + destination: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll>; +} + +/// Represents the buffer for a host- or guest-initiated stream write. +pub struct Source<'a, T> { + instance: Instance, + id: TableId, + host_buffer: Option<&'a mut dyn WriteBuffer>, +} + +impl Source<'_, T> { + /// Accept zero or more items from the writer. + pub fn read(&mut self, mut store: S, buffer: &mut B) -> Result<()> where - T: func::Lower + Send + Sync + 'static, + T: func::Lift + 'static, + B: ReadBuffer, { - let id = mem::replace(&mut self.id, TableId::new(u32::MAX)); - let default = self.default; - self.instance - .host_drop_writer(store.as_context_mut(), id, Some(&move || Ok(default()))) - .unwrap(); + if let Some(input) = &mut self.host_buffer { + let count = input.remaining().len().min(buffer.remaining_capacity()); + buffer.move_from(*input, count); + } else { + let store = store.as_context_mut(); + let transmit = self + .instance + .concurrent_state_mut(store.0) + .get_mut(self.id)?; + + let &ReadState::HostReady { guest_offset, .. } = &transmit.read else { + unreachable!(); + }; + + let &WriteState::GuestReady { + ty, + address, + count, + options, + .. + } = &transmit.write + else { + unreachable!() + }; + + let cx = &mut LiftContext::new(store.0.store_opaque_mut(), &options, self.instance); + let ty = payload(ty, cx.types); + let old_remaining = buffer.remaining_capacity(); + lift::( + cx, + ty, + buffer, + address + (T::SIZE32 * guest_offset), + count - guest_offset, + )?; + + let transmit = self + .instance + .concurrent_state_mut(store.0) + .get_mut(self.id)?; + + let ReadState::HostReady { guest_offset, .. } = &mut transmit.read else { + unreachable!(); + }; + + *guest_offset += old_remaining - buffer.remaining_capacity(); + } + + Ok(()) } - /// Convenience method around [`Self::close`]. - pub fn close_with(&mut self, accessor: impl AsAccessor) + /// Return the number of items remaining to be read from the current write + /// operation. + pub fn remaining(&self, mut store: impl AsContextMut) -> usize where - T: func::Lower + Send + Sync + 'static, + T: 'static, { - accessor.as_accessor().with(|access| self.close(access)) + let transmit = self + .instance + .concurrent_state_mut(store.as_context_mut().0) + .get_mut(self.id) + .unwrap(); + + if let &WriteState::GuestReady { count, .. } = &transmit.write { + let &ReadState::HostReady { guest_offset, .. } = &transmit.read else { + unreachable!() + }; + + count - guest_offset + } else if let Some(host_buffer) = &self.host_buffer { + host_buffer.remaining().len() + } else { + unreachable!() + } } +} - /// Returns a [`GuardedFutureWriter`] which will auto-close this future on - /// drop and clean it up from the store. - /// - /// Note that the `accessor` provided must own this future and is - /// additionally transferred to the `GuardedFutureWriter` return value. - pub fn guard(self, accessor: A) -> GuardedFutureWriter - where - T: func::Lower + Send + Sync + 'static, - A: AsAccessor, - { - GuardedFutureWriter::new(accessor, self) +impl Source<'_, u8> { + /// Return a `DirectSource` view of `self`. + pub fn as_direct_source<'a, D>( + &mut self, + store: StoreContextMut<'a, D>, + ) -> DirectSource<'a, D> { + DirectSource { + instance: self.instance, + id: self.id, + store, + } } } -/// A [`FutureWriter`] paired with an [`Accessor`]. -/// -/// This is an RAII wrapper around [`FutureWriter`] that ensures it is closed -/// when dropped. This can be created through [`GuardedFutureWriter::new`] or -/// [`FutureWriter::guard`]. -pub struct GuardedFutureWriter -where - T: func::Lower + Send + Sync + 'static, - A: AsAccessor, -{ - // This field is `None` to implement the conversion from this guard back to - // `FutureWriter`. When `None` is seen in the destructor it will cause the - // destructor to do nothing. - writer: Option>, - accessor: A, +/// Represents a write to a `stream`, providing direct access to the +/// writer's buffer. +pub struct DirectSource<'a, D: 'static> { + instance: Instance, + id: TableId, + store: StoreContextMut<'a, D>, } -impl GuardedFutureWriter -where - T: func::Lower + Send + Sync + 'static, - A: AsAccessor, -{ - /// Create a new `GuardedFutureWriter` with the specified `accessor` and - /// `writer`. - pub fn new(accessor: A, writer: FutureWriter) -> Self { - Self { - writer: Some(writer), - accessor, - } - } +impl DirectSource<'_, D> { + /// Provide direct access to the writer's buffer. + pub fn remaining(&mut self) -> &[u8] { + let transmit = self + .instance + .concurrent_state_mut(self.store.as_context_mut().0) + .get_mut(self.id) + .unwrap(); - /// Wrapper for [`FutureWriter::write`]. - pub async fn write(mut self, value: T) -> bool - where - T: func::Lower + Send + Sync + 'static, - { - self.writer - .as_mut() - .unwrap() - .write_(&self.accessor, value) - .await - } + let &WriteState::GuestReady { + address, + count, + options, + .. + } = &transmit.write + else { + unreachable!() + }; - /// Wrapper for [`FutureWriter::watch_reader`] - pub async fn watch_reader(&mut self) { - self.writer - .as_mut() + let &ReadState::HostReady { guest_offset, .. } = &transmit.read else { + unreachable!() + }; + + options + .memory(self.store.0) + .get((address + guest_offset)..) + .and_then(|b| b.get(..(count - guest_offset))) .unwrap() - .watch_reader(&self.accessor) - .await } - /// Extracts the underlying [`FutureWriter`] from this guard, returning it - /// back. - pub fn into_future(self) -> FutureWriter { - self.into() + /// Mark the specified number of bytes as read from the writer's buffer. + /// + /// This will panic if the count is larger than the size of the buffer + /// returned by `Self::remaining`. + pub fn mark_read(&mut self, count: usize) { + let transmit = self + .instance + .concurrent_state_mut(self.store.as_context_mut().0) + .get_mut(self.id) + .unwrap(); + + let WriteState::GuestReady { + count: write_count, .. + } = &transmit.write + else { + unreachable!() + }; + + let ReadState::HostReady { guest_offset, .. } = &mut transmit.read else { + unreachable!() + }; + + if *guest_offset + count > *write_count { + panic!("read count ({count}) must be less than or equal to write count ({write_count})") + } else { + *guest_offset += count; + } } } -impl From> for FutureWriter -where - T: func::Lower + Send + Sync + 'static, - A: AsAccessor, -{ - fn from(mut guard: GuardedFutureWriter) -> Self { - guard.writer.take().unwrap() - } +/// Represents the host-owned read end of a stream. +pub trait StreamConsumer: Send + 'static { + /// The payload type of this stream. + type Item; + + /// Handle a host- or guest-initiated write by accepting zero or more items + /// from the specified source. + /// + /// This will be called whenever the writer starts a write. + /// + /// If the implementation is able to consume one or more items immediately, + /// it should take them from `source` and return either + /// `Poll::Ready(Ok(StreamResult::Completed))` if it expects to be able to consume + /// more items, or `Poll::Ready(Ok(StreamResult::Dropped))` if it cannot + /// accept any more items. Alternatively, it may return `Poll::Pending` to + /// indicate that the caller should delay sending a `COMPLETED` event to the + /// writer until a later call to this function returns `Poll::Ready(_)`. + /// For more about that, see the `Backpressure` section below. + /// + /// If the implementation cannot consume any items immediately and `finish` + /// is _false_, it should store the waker from `cx` for later and return + /// `Poll::Pending` without writing anything to `destination`. Later, it + /// should alert the waker when either (1) the items arrive, (2) the stream + /// has ended, or (3) an error occurs. + /// + /// If the implementation cannot consume any items immediately and `finish` + /// is _true_, it should, if possible, return + /// `Poll::Ready(Ok(StreamResult::Cancelled))` immediately without taking + /// anything from `source`. However, that might not be possible if an + /// earlier call to `poll_consume` kicked off an asynchronous operation + /// which needs to be completed (and possibly interrupted) gracefully, in + /// which case the implementation may return `Poll::Pending` and later alert + /// the waker as described above. In other words, when `finish` is true, + /// the implementation should prioritize returning a result to the reader + /// (even if no items can be consumed) rather than wait indefinitely for at + /// capacity to free up. + /// + /// In all of the above cases, the implementation may alternatively choose + /// to return `Err(_)` to indicate an unrecoverable error. This will cause + /// the guest (if any) to trap and render the component instance (if any) + /// unusable. The implementation should report errors that _are_ + /// recoverable by other means (e.g. by writing to a `future`) and return + /// `Poll::Ready(Ok(StreamResult::Dropped))`. + /// + /// Note that the implementation should only return + /// `Poll::Ready(Ok(StreamResult::Cancelled))` without having taken any + /// items from `source` if called with `finish` set to true. If it does so + /// when `finish` is false, the caller will trap. Additionally, it should + /// only return `Poll::Ready(Ok(StreamResult::Completed))` after taking at + /// least one item from `source` if there is an item available; otherwise, + /// the caller will trap. If `poll_consume` is called with no items in + /// `source`, it should only return `Poll::Ready(_)` once it is able to + /// accept at least one item during the next call to `poll_consume`. + /// + /// Note that any items which the implementation of this trait takes from + /// `source` become the responsibility of that implementation. For that + /// reason, an implementation which forwards items to an upstream sink + /// should reserve capacity in that sink before taking items out of + /// `source`, if possible. Alternatively, it might buffer items which can't + /// be forwarded immediately and send them once capacity is freed up. + /// + /// ## Backpressure + /// + /// As mentioned above, an implementation might choose to return + /// `Poll::Pending` after taking items from `source`, which tells the caller + /// to delay sending a `COMPLETED` event to the writer. This can be used as + /// a form of backpressure when the items are forwarded to an upstream sink + /// asynchronously. Note, however, that it's not possible to "put back" + /// items into `source` once they've been taken out, so if the upstream sink + /// is unable to accept all the items, that cannot be communicated to the + /// writer at this level of abstraction. Just as with application-specific, + /// recoverable errors, information about which items could be forwarded and + /// which could not must be communicated out-of-band, e.g. by writing to an + /// application-specific `future`. + /// + /// Similarly, if the writer cancels the write after items have been taken + /// from `source` but before the items have all been forwarded to an + /// upstream sink, `poll_consume` will be called with `finish` set to true, + /// and the implementation may either: + /// + /// - Interrupt the forwarding process gracefully. This may be preferrable + /// if there is an out-of-band channel for communicating to the writer how + /// many items were forwarded before being interrupted. + /// + /// - Allow the forwarding to complete without interrupting it. This is + /// usually preferable if there's no out-of-band channel for reporting back + /// to the writer how many items were forwarded. + fn poll_consume( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut, + source: &mut Source, + finish: bool, + ) -> Poll>; } -impl Drop for GuardedFutureWriter -where - T: func::Lower + Send + Sync + 'static, - A: AsAccessor, -{ - fn drop(&mut self) { - if let Some(writer) = &mut self.writer { - writer.close_with(&self.accessor) - } - } +/// Represents a host-owned write end of a future. +pub trait FutureProducer: Send + 'static { + /// The payload type of this future. + type Item; + + /// Handle a host- or guest-initiated read by producing a value. + fn produce(self, accessor: &Accessor) -> impl Future> + Send; +} + +/// Represents a host-owned read end of a future. +pub trait FutureConsumer: Send + 'static { + /// The payload type of this future. + type Item; + + /// Handle a host- or guest-initiated write by consuming a value. + fn consume( + self, + accessor: &Accessor, + value: Self::Item, + ) -> impl Future> + Send; } /// Represents the readable end of a Component Model `future`. /// -/// Note that `FutureReader` instances must be disposed of using either `read` +/// Note that `FutureReader` instances must be disposed of using either `pipe` /// or `close`; otherwise the in-store representation will leak and the writer /// end will hang indefinitely. Consider using [`GuardedFutureReader`] to /// ensure that disposal happens automatically. @@ -559,7 +766,74 @@ pub struct FutureReader { } impl FutureReader { - fn new(id: TableId, instance: Instance) -> Self { + /// Create a new future with the specified producer. + pub fn new( + instance: Instance, + mut store: S, + producer: impl FutureProducer, + ) -> Self + where + T: func::Lower + func::Lift + Send + Sync + 'static, + { + struct Producer(F); + + impl> + Send + 'static> + StreamProducer for Producer + { + type Item = T; + type Buffer = Option; + + fn poll_produce<'a>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut, + destination: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll> { + // SAFETY: This is a standard pin-projection, and we never move + // out of `self`. + let future = unsafe { self.map_unchecked_mut(|v| &mut v.0) }; + + match tls::set(store.0, || future.poll(cx)) { + Poll::Pending => { + if finish { + Poll::Ready(Ok(StreamResult::Cancelled)) + } else { + Poll::Pending + } + } + Poll::Ready(value) => { + destination.set_buffer(Some(value?)); + + // Here we return `StreamResult::Completed` even though + // we've produced the last item we'll ever produce. + // That's because the ABI expects + // `ReturnCode::Completed(1)` rather than + // `ReturnCode::Dropped(1)`. In any case, we won't be + // called again since the future will have resolved. + Poll::Ready(Ok(StreamResult::Completed)) + } + } + } + } + + let mut store = store.as_context_mut(); + let token = StoreToken::new(store.as_context_mut()); + Self::new_( + instance.new_transmit( + store, + TransmitKind::Future, + Producer(async move { + producer + .produce(&Accessor::new(token, Some(instance))) + .await + }), + ), + instance, + ) + } + + fn new_(id: TableId, instance: Instance) -> Self { Self { instance, id, @@ -567,58 +841,79 @@ impl FutureReader { } } - /// Read the value from this `future`. - /// - /// The returned `Future` will yield `Err` if the guest has trapped - /// before it could produce a result. - /// - /// The [`Accessor`] provided can be acquired from [`Instance::run_concurrent`] or - /// from within a host function for example. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn read(self, accessor: impl AsAccessor) -> Option - where - T: func::Lift + Send + 'static, + /// Set the consumer that accepts the result of this future. + pub fn pipe( + self, + store: S, + consumer: impl FutureConsumer + Unpin, + ) where + T: func::Lift + 'static, { - self.guard(accessor).read().await - } + enum Consumer { + Start(C, Instance), + Poll(Pin> + Send>>), + Invalid, + } - async fn read_(&mut self, accessor: impl AsAccessor) -> Option - where - T: func::Lift + Send + 'static, - { - let accessor = accessor.as_accessor(); + impl + Unpin> + StreamConsumer for Consumer + { + type Item = T; + + fn poll_consume( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + mut store: StoreContextMut, + source: &mut Source, + finish: bool, + ) -> Poll> { + let me = self.get_mut(); + + if let Consumer::Start(consumer, instance) = mem::replace(me, Consumer::Invalid) { + let token = StoreToken::new(store.as_context_mut()); + let value = &mut None; + source.read(store.as_context_mut(), value)?; + let value = value.take().unwrap(); + *me = Consumer::Poll(Box::pin(async move { + consumer + .consume(&Accessor::new(token, Some(instance)), value) + .await + })); + } - let result = self - .instance - .host_read_async(accessor, self.id, None, TransmitKind::Future) - .await; + let Consumer::Poll(future) = me else { + unreachable!(); + }; - if let Ok(HostResult { - mut buffer, - dropped: false, - }) = result - { - buffer.take() - } else { - None + match tls::set(store.0, || future.as_mut().poll(cx)) { + Poll::Pending => { + if finish { + Poll::Ready(Ok(StreamResult::Cancelled)) + } else { + Poll::Pending + } + } + Poll::Ready(result) => { + result?; + + // Here we return `StreamResult::Completed` even though + // we've consumed the last item we'll ever consume. + // That's because the ABI expects + // `ReturnCode::Completed(1)` rather than + // `ReturnCode::Dropped(1)`. In any case, we won't be + // called again since the future will have resolved. + Poll::Ready(Ok(StreamResult::Completed)) + } + } + } } - } - /// Wait for the write end of this `future` to be dropped. - /// - /// The [`Accessor`] provided can be acquired from - /// [`Instance::run_concurrent`] or from within a host function for example. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn watch_writer(&mut self, accessor: impl AsAccessor) { - watch_writer(accessor, self.instance, self.id).await; + self.instance.set_consumer( + store, + self.id, + TransmitKind::Future, + Consumer::Start(consumer, self.instance), + ); } /// Convert this `FutureReader` into a [`Val`]. @@ -639,7 +934,7 @@ impl FutureReader { let store = store.as_context_mut(); let id = TableId::::new(*rep); instance.concurrent_state_mut(store.0).get_mut(id)?; // Just make sure it's present - Ok(Self::new(id, instance)) + Ok(Self::new_(id, instance)) } /// Transfer ownership of the read end of a future from a guest to the host. @@ -663,7 +958,7 @@ impl FutureReader { bail!("cannot lift future after previous read succeeded"); } - Ok(Self::new(id, cx.instance_handle())) + Ok(Self::new_(id, cx.instance_handle())) } _ => func::bad_type_info(), } @@ -735,380 +1030,128 @@ pub(crate) fn lower_future_to_index( .common .handle = Some(handle); - Ok(handle) - } - _ => func::bad_type_info(), - } -} - -// SAFETY: This relies on the `ComponentType` implementation for `u32` being -// safe and correct since we lift and lower future handles as `u32`s. -unsafe impl func::ComponentType for FutureReader { - const ABI: CanonicalAbiInfo = CanonicalAbiInfo::SCALAR4; - - type Lower = ::Lower; - - fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> { - match ty { - InterfaceType::Future(_) => Ok(()), - other => bail!("expected `future`, found `{}`", func::desc(other)), - } - } -} - -// SAFETY: See the comment on the `ComponentType` `impl` for this type. -unsafe impl func::Lower for FutureReader { - fn linear_lower_to_flat( - &self, - cx: &mut LowerContext<'_, U>, - ty: InterfaceType, - dst: &mut MaybeUninit, - ) -> Result<()> { - lower_future_to_index(self.id.rep(), cx, ty)?.linear_lower_to_flat( - cx, - InterfaceType::U32, - dst, - ) - } - - fn linear_lower_to_memory( - &self, - cx: &mut LowerContext<'_, U>, - ty: InterfaceType, - offset: usize, - ) -> Result<()> { - lower_future_to_index(self.id.rep(), cx, ty)?.linear_lower_to_memory( - cx, - InterfaceType::U32, - offset, - ) - } -} - -// SAFETY: See the comment on the `ComponentType` `impl` for this type. -unsafe impl func::Lift for FutureReader { - fn linear_lift_from_flat( - cx: &mut LiftContext<'_>, - ty: InterfaceType, - src: &Self::Lower, - ) -> Result { - let index = u32::linear_lift_from_flat(cx, InterfaceType::U32, src)?; - Self::lift_from_index(cx, ty, index) - } - - fn linear_lift_from_memory( - cx: &mut LiftContext<'_>, - ty: InterfaceType, - bytes: &[u8], - ) -> Result { - let index = u32::linear_lift_from_memory(cx, InterfaceType::U32, bytes)?; - Self::lift_from_index(cx, ty, index) - } -} - -/// A [`FutureReader`] paired with an [`Accessor`]. -/// -/// This is an RAII wrapper around [`FutureReader`] that ensures it is closed -/// when dropped. This can be created through [`GuardedFutureReader::new`] or -/// [`FutureReader::guard`]. -pub struct GuardedFutureReader -where - A: AsAccessor, -{ - // This field is `None` to implement the conversion from this guard back to - // `FutureReader`. When `None` is seen in the destructor it will cause the - // destructor to do nothing. - reader: Option>, - accessor: A, -} - -impl GuardedFutureReader -where - A: AsAccessor, -{ - /// Create a new `GuardedFutureReader` with the specified `accessor` and `reader`. - pub fn new(accessor: A, reader: FutureReader) -> Self { - Self { - reader: Some(reader), - accessor, - } - } - - /// Wrapper for [`FutureReader::read`]. - pub async fn read(mut self) -> Option - where - T: func::Lift + Send + 'static, - { - self.reader.as_mut().unwrap().read_(&self.accessor).await - } - - /// Wrapper for [`FutureReader::watch_writer`]. - pub async fn watch_writer(&mut self) { - self.reader - .as_mut() - .unwrap() - .watch_writer(&self.accessor) - .await - } - - /// Extracts the underlying [`FutureReader`] from this guard, returning it - /// back. - pub fn into_future(self) -> FutureReader { - self.into() - } -} - -impl From> for FutureReader -where - A: AsAccessor, -{ - fn from(mut guard: GuardedFutureReader) -> Self { - guard.reader.take().unwrap() - } -} - -impl Drop for GuardedFutureReader -where - A: AsAccessor, -{ - fn drop(&mut self) { - if let Some(reader) = &mut self.reader { - reader.close_with(&self.accessor) - } - } -} - -/// Represents the writable end of a Component Model `stream`. -/// -/// Note that `StreamWriter` instances must be disposed of using `close`; -/// otherwise the in-store representation will leak and the reader end will hang -/// indefinitely. Consider using [`GuardedStreamWriter`] to ensure that -/// disposal happens automatically. -pub struct StreamWriter { - instance: Instance, - id: TableId, - closed: bool, - _phantom: PhantomData, -} - -impl StreamWriter { - fn new(id: TableId, instance: Instance) -> Self { - Self { - instance, - id, - closed: false, - _phantom: PhantomData, - } - } - - /// Returns whether this stream is "closed" meaning that the other end of - /// the stream has been dropped. - pub fn is_closed(&self) -> bool { - self.closed - } - - /// Write the specified items to the `stream`. - /// - /// Note that this will only write as many items as the reader accepts - /// during its current or next read. Use `write_all` to loop until the - /// buffer is drained or the read end is dropped. - /// - /// The returned `Future` will yield the input buffer back, - /// possibly consuming a subset of the items or nothing depending on the - /// number of items the reader accepted. - /// - /// The [`is_closed`](Self::is_closed) method can be used to determine - /// whether the stream was learned to be closed after this operation completes. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn write(&mut self, accessor: impl AsAccessor, buffer: B) -> B - where - T: func::Lower + 'static, - B: WriteBuffer, - { - let result = self - .instance - .host_write_async( - accessor.as_accessor(), - self.id, - buffer, - TransmitKind::Stream, - ) - .await; - - match result { - Ok(HostResult { buffer, dropped }) => { - if self.closed { - debug_assert!(dropped); - } - self.closed = dropped; - buffer - } - Err(_) => todo!("guarantee buffer recovery if `host_write` fails"), + Ok(handle) } + _ => func::bad_type_info(), } +} - /// Write the specified values until either the buffer is drained or the - /// read end is dropped. - /// - /// The buffer is returned back to the caller and may still contain items - /// within it if the other end of this stream was dropped. Use the - /// [`is_closed`](Self::is_closed) method to determine if the other end is - /// dropped. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn write_all(&mut self, accessor: impl AsAccessor, mut buffer: B) -> B - where - T: func::Lower + 'static, - B: WriteBuffer, - { - let accessor = accessor.as_accessor(); - while !self.is_closed() && buffer.remaining().len() > 0 { - buffer = self.write(accessor, buffer).await; +// SAFETY: This relies on the `ComponentType` implementation for `u32` being +// safe and correct since we lift and lower future handles as `u32`s. +unsafe impl func::ComponentType for FutureReader { + const ABI: CanonicalAbiInfo = CanonicalAbiInfo::SCALAR4; + + type Lower = ::Lower; + + fn typecheck(ty: &InterfaceType, _types: &InstanceType<'_>) -> Result<()> { + match ty { + InterfaceType::Future(_) => Ok(()), + other => bail!("expected `future`, found `{}`", func::desc(other)), } - buffer } +} - /// Wait for the read end of this `stream` to be dropped. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn watch_reader(&mut self, accessor: impl AsAccessor) { - watch_reader(accessor, self.instance, self.id).await +// SAFETY: See the comment on the `ComponentType` `impl` for this type. +unsafe impl func::Lower for FutureReader { + fn linear_lower_to_flat( + &self, + cx: &mut LowerContext<'_, U>, + ty: InterfaceType, + dst: &mut MaybeUninit, + ) -> Result<()> { + lower_future_to_index(self.id.rep(), cx, ty)?.linear_lower_to_flat( + cx, + InterfaceType::U32, + dst, + ) } - /// Close this `StreamWriter`, writing the default value. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. Usage of this future after calling `close` will also cause - /// a panic. - pub fn close(&mut self, mut store: impl AsContextMut) { - // `self` should never be used again, but leave an invalid handle there just in case. - let id = mem::replace(&mut self.id, TableId::new(u32::MAX)); - self.instance - .host_drop_writer(store.as_context_mut(), id, None::<&dyn Fn() -> Result<()>>) - .unwrap() + fn linear_lower_to_memory( + &self, + cx: &mut LowerContext<'_, U>, + ty: InterfaceType, + offset: usize, + ) -> Result<()> { + lower_future_to_index(self.id.rep(), cx, ty)?.linear_lower_to_memory( + cx, + InterfaceType::U32, + offset, + ) } +} - /// Convenience method around [`Self::close`]. - pub fn close_with(&mut self, accessor: impl AsAccessor) { - accessor.as_accessor().with(|access| self.close(access)) +// SAFETY: See the comment on the `ComponentType` `impl` for this type. +unsafe impl func::Lift for FutureReader { + fn linear_lift_from_flat( + cx: &mut LiftContext<'_>, + ty: InterfaceType, + src: &Self::Lower, + ) -> Result { + let index = u32::linear_lift_from_flat(cx, InterfaceType::U32, src)?; + Self::lift_from_index(cx, ty, index) } - /// Returns a [`GuardedStreamWriter`] which will auto-close this stream on - /// drop and clean it up from the store. - /// - /// Note that the `accessor` provided must own this future and is - /// additionally transferred to the `GuardedStreamWriter` return value. - pub fn guard(self, accessor: A) -> GuardedStreamWriter - where - A: AsAccessor, - { - GuardedStreamWriter::new(accessor, self) + fn linear_lift_from_memory( + cx: &mut LiftContext<'_>, + ty: InterfaceType, + bytes: &[u8], + ) -> Result { + let index = u32::linear_lift_from_memory(cx, InterfaceType::U32, bytes)?; + Self::lift_from_index(cx, ty, index) } } -/// A [`StreamWriter`] paired with an [`Accessor`]. +/// A [`FutureReader`] paired with an [`Accessor`]. /// -/// This is an RAII wrapper around [`StreamWriter`] that ensures it is closed -/// when dropped. This can be created through [`GuardedStreamWriter::new`] or -/// [`StreamWriter::guard`]. -pub struct GuardedStreamWriter +/// This is an RAII wrapper around [`FutureReader`] that ensures it is closed +/// when dropped. This can be created through [`GuardedFutureReader::new`] or +/// [`FutureReader::guard`]. +pub struct GuardedFutureReader where A: AsAccessor, { // This field is `None` to implement the conversion from this guard back to - // `StreamWriter`. When `None` is seen in the destructor it will cause the + // `FutureReader`. When `None` is seen in the destructor it will cause the // destructor to do nothing. - writer: Option>, + reader: Option>, accessor: A, } -impl GuardedStreamWriter +impl GuardedFutureReader where A: AsAccessor, { - /// Create a new `GuardedStreamWriter` with the specified `accessor` and `writer`. - pub fn new(accessor: A, writer: StreamWriter) -> Self { + /// Create a new `GuardedFutureReader` with the specified `accessor` and `reader`. + pub fn new(accessor: A, reader: FutureReader) -> Self { Self { - writer: Some(writer), + reader: Some(reader), accessor, } } - /// Wrapper for [`StreamWriter::is_closed`]. - pub fn is_closed(&self) -> bool { - self.writer.as_ref().unwrap().is_closed() - } - - /// Wrapper for [`StreamWriter::write`]. - pub async fn write(&mut self, buffer: B) -> B - where - T: func::Lower + 'static, - B: WriteBuffer, - { - self.writer - .as_mut() - .unwrap() - .write(&self.accessor, buffer) - .await - } - - /// Wrapper for [`StreamWriter::write_all`]. - pub async fn write_all(&mut self, buffer: B) -> B - where - T: func::Lower + 'static, - B: WriteBuffer, - { - self.writer - .as_mut() - .unwrap() - .write_all(&self.accessor, buffer) - .await - } - - /// Wrapper for [`StreamWriter::watch_reader`]. - pub async fn watch_reader(&mut self) { - self.writer - .as_mut() - .unwrap() - .watch_reader(&self.accessor) - .await - } - - /// Extracts the underlying [`StreamWriter`] from this guard, returning it + /// Extracts the underlying [`FutureReader`] from this guard, returning it /// back. - pub fn into_stream(self) -> StreamWriter { + pub fn into_future(self) -> FutureReader { self.into() } } -impl From> for StreamWriter +impl From> for FutureReader where A: AsAccessor, { - fn from(mut guard: GuardedStreamWriter) -> Self { - guard.writer.take().unwrap() + fn from(mut guard: GuardedFutureReader) -> Self { + guard.reader.take().unwrap() } } -impl Drop for GuardedStreamWriter +impl Drop for GuardedFutureReader where A: AsAccessor, { fn drop(&mut self) { - if let Some(writer) = &mut self.writer { - writer.close_with(&self.accessor) + if let Some(reader) = &mut self.reader { + reader.close_with(&self.accessor) } } } @@ -1122,75 +1165,40 @@ where pub struct StreamReader { instance: Instance, id: TableId, - closed: bool, _phantom: PhantomData, } impl StreamReader { - fn new(id: TableId, instance: Instance) -> Self { + /// Create a new stream with the specified producer. + pub fn new( + instance: Instance, + store: S, + producer: impl StreamProducer, + ) -> Self + where + T: func::Lower + func::Lift + Send + Sync + 'static, + { + Self::new_( + instance.new_transmit(store, TransmitKind::Stream, producer), + instance, + ) + } + + fn new_(id: TableId, instance: Instance) -> Self { Self { instance, id, - closed: false, _phantom: PhantomData, } } - /// Returns whether this stream is "closed" meaning that the other end of - /// the stream has been dropped. - pub fn is_closed(&self) -> bool { - self.closed - } - - /// Read values from this `stream`. - /// - /// The returned `Future` will yield a `(Some(_), _)` if the read completed - /// (possibly with zero items if the write was empty). It will return - /// `(None, _)` if the read failed due to the closure of the write end. In - /// either case, the returned buffer will be the same one passed as a - /// parameter, with zero or more items added. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn read(&mut self, accessor: impl AsAccessor, buffer: B) -> B + /// Set the consumer that accepts the items delivered to this stream. + pub fn pipe(self, store: S, consumer: impl StreamConsumer) where - T: func::Lift + 'static, - B: ReadBuffer + Send + 'static, + T: 'static, { - let result = self - .instance - .host_read_async( - accessor.as_accessor(), - self.id, - buffer, - TransmitKind::Stream, - ) - .await; - - match result { - Ok(HostResult { buffer, dropped }) => { - if self.closed { - debug_assert!(dropped); - } - self.closed = dropped; - buffer - } - Err(_) => { - todo!("guarantee buffer recovery if `host_read` fails") - } - } - } - - /// Wait until the write end of this `stream` is dropped. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn watch_writer(&mut self, accessor: impl AsAccessor) { - watch_writer(accessor, self.instance, self.id).await + self.instance + .set_consumer(store, self.id, TransmitKind::Stream, consumer); } /// Convert this `StreamReader` into a [`Val`]. @@ -1211,7 +1219,7 @@ impl StreamReader { let store = store.as_context_mut(); let id = TableId::::new(*rep); instance.concurrent_state_mut(store.0).get_mut(id)?; // Just make sure it's present - Ok(Self::new(id, instance)) + Ok(Self::new_(id, instance)) } /// Transfer ownership of the read end of a stream from a guest to the host. @@ -1231,7 +1239,7 @@ impl StreamReader { .get_mut(id)? .common .handle = None; - Ok(Self::new(id, cx.instance_handle())) + Ok(Self::new_(id, cx.instance_handle())) } _ => func::bad_type_info(), } @@ -1403,33 +1411,6 @@ where } } - /// Wrapper for `StreamReader::is_closed` - pub fn is_closed(&self) -> bool { - self.reader.as_ref().unwrap().is_closed() - } - - /// Wrapper for `StreamReader::read`. - pub async fn read(&mut self, buffer: B) -> B - where - T: func::Lift + 'static, - B: ReadBuffer + Send + 'static, - { - self.reader - .as_mut() - .unwrap() - .read(&self.accessor, buffer) - .await - } - - /// Wrapper for `StreamReader::watch_writer`. - pub async fn watch_writer(&mut self) { - self.reader - .as_mut() - .unwrap() - .watch_writer(&self.accessor) - .await - } - /// Extracts the underlying [`StreamReader`] from this guard, returning it /// back. pub fn into_stream(self) -> StreamReader { @@ -1605,14 +1586,6 @@ struct TransmitState { write: WriteState, /// See `ReadState` read: ReadState, - /// The `Waker`, if any, to be woken when the write end of the stream or - /// future is dropped. - /// - /// This will signal to the host-owned read end that the write end has been - /// dropped. - writer_watcher: Option, - /// Like `writer_watcher`, but for the reverse direction. - reader_watcher: Option, /// Whether futher values may be transmitted via this stream or future. done: bool, } @@ -1624,8 +1597,6 @@ impl Default for TransmitState { read_handle: TableId::new(u32::MAX), read: ReadState::Open, write: WriteState::Open, - reader_watcher: None, - writer_watcher: None, done: false, } } @@ -1649,13 +1620,17 @@ enum WriteState { address: usize, count: usize, handle: u32, - post_write: PostWrite, }, - /// The write end is owned by a host task and a write is pending. + /// The write end is owned by the host, which is ready to produce items. HostReady { - accept: - Box Result + Send + Sync>, - post_write: PostWrite, + produce: Box< + dyn Fn() -> Pin> + Send + 'static>> + + Send + + Sync, + >, + guest_offset: usize, + cancel: bool, + cancel_waker: Option, }, /// The write end has been dropped. Dropped, @@ -1685,9 +1660,27 @@ enum ReadState { count: usize, handle: u32, }, - /// The read end is owned by a host task and a read is pending. + /// The read end is owned by a host task, and it is ready to consume items. HostReady { - accept: Box Result + Send + Sync>, + consume: Box< + dyn Fn() -> Pin> + Send + 'static>> + + Send + + Sync, + >, + guest_offset: usize, + cancel: bool, + cancel_waker: Option, + }, + /// Both the read and write ends are owned by the host. + HostToHost { + accept: Box< + dyn for<'a> Fn( + &'a mut UntypedWriteBuffer<'a>, + ) + -> Pin> + Send + 'a>> + + Send + + Sync, + >, }, /// The read end has been dropped. Dropped, @@ -1699,391 +1692,565 @@ impl fmt::Debug for ReadState { Self::Open => f.debug_tuple("Open").finish(), Self::GuestReady { .. } => f.debug_tuple("GuestReady").finish(), Self::HostReady { .. } => f.debug_tuple("HostReady").finish(), + Self::HostToHost { .. } => f.debug_tuple("HostToHost").finish(), Self::Dropped => f.debug_tuple("Dropped").finish(), } } } -/// Parameter type to pass to a `ReadState::HostReady` closure. -/// -/// See also `accept_writer`. -enum Writer<'a> { - /// The write end is owned by a guest task. - Guest { - lift: &'a mut LiftContext<'a>, - ty: Option, - address: usize, - count: usize, - }, - /// The write end is owned by the host. - Host { - buffer: &'a mut UntypedWriteBuffer<'a>, - count: usize, - }, - /// The write end has been dropped. - End, -} - -/// Parameter type to pass to a `WriteState::HostReady` closure. -/// -/// See also `accept_reader`. -enum Reader<'a> { - /// The read end is owned by a guest task. - Guest { - options: &'a Options, - ty: TransmitIndex, - address: usize, - count: usize, - }, - /// The read end is owned by the host. - Host { - accept: Box usize + 'a>, - }, - /// The read end has been dropped. - End, +fn return_code(kind: TransmitKind, state: StreamResult, guest_offset: usize) -> ReturnCode { + let count = guest_offset.try_into().unwrap(); + match state { + StreamResult::Dropped => ReturnCode::Dropped(count), + StreamResult::Completed => ReturnCode::completed(kind, count), + StreamResult::Cancelled => ReturnCode::Cancelled(count), + } } impl Instance { - /// Create a new Component Model `future` as pair of writable and readable ends, - /// the latter of which may be passed to guest code. - /// - /// `default` is a callback to be used if the writable end of the future is - /// closed without having written a value. You may supply e.g. `|| - /// unreachable!()` if you're sure that won't happen. - pub fn future( + fn new_transmit>( self, - mut store: impl AsContextMut, - default: fn() -> T, - ) -> Result<(FutureWriter, FutureReader)> { - let (write, read) = self - .concurrent_state_mut(store.as_context_mut().0) - .new_transmit()?; + mut store: S, + kind: TransmitKind, + producer: P, + ) -> TableId + where + P::Item: func::Lower, + { + let mut store = store.as_context_mut(); + let token = StoreToken::new(store.as_context_mut()); + let state = self.concurrent_state_mut(store.0); + let (_, read) = state.new_transmit().unwrap(); + let producer = Arc::new(Mutex::new(Some((Box::pin(producer), P::Buffer::default())))); + let id = state.get_mut(read).unwrap().state; + let produce = Box::new(move || { + let producer = producer.clone(); + async move { + let (mut mine, mut buffer) = producer.lock().unwrap().take().unwrap(); + + let (result, cancelled) = if buffer.remaining().is_empty() { + future::poll_fn(|cx| { + tls::get(|store| { + let &WriteState::HostReady { cancel, .. } = + &self.concurrent_state_mut(store).get_mut(id).unwrap().write + else { + unreachable!(); + }; + + let poll = mine.as_mut().poll_produce( + cx, + token.as_context_mut(store), + &mut Destination { + instance: self, + id, + buffer: &mut buffer, + _phantom: PhantomData, + }, + cancel, + ); + + { + let WriteState::HostReady { + guest_offset, + cancel, + cancel_waker, + .. + } = &mut self + .concurrent_state_mut(store) + .get_mut(id) + .unwrap() + .write + else { + unreachable!(); + }; + + if let Poll::Pending = &poll { + if !buffer.remaining().is_empty() || *guest_offset > 0 { + return Poll::Ready(Err(anyhow!( + "StreamProducer::poll_produce returned Poll::Pending \ + after producing at least one item" + ))); + } + + *cancel_waker = Some(cx.waker().clone()); + } else { + *cancel_waker = None; + *cancel = false; + } + } + + poll.map(|v| v.map(|result| (result, cancel))) + }) + }) + .await? + } else { + (StreamResult::Completed, false) + }; + + let (guest_offset, count) = tls::get(|store| { + let transmit = self.concurrent_state_mut(store).get_mut(id).unwrap(); + ( + match &transmit.write { + &WriteState::HostReady { guest_offset, .. } => guest_offset, + _ => unreachable!(), + }, + match &transmit.read { + &ReadState::GuestReady { count, .. } => count, + ReadState::HostToHost { .. } => 1, + _ => unreachable!(), + }, + ) + }); + + match result { + StreamResult::Completed => { + if count > 1 && buffer.remaining().is_empty() && guest_offset == 0 { + bail!( + "StreamProducer::poll_produce returned StreamResult::Completed \ + without producing any items" + ); + } + } + StreamResult::Cancelled => { + if !cancelled { + bail!( + "StreamProducer::poll_produce returned StreamResult::Cancelled \ + without being given a `finish` parameter value of true" + ); + } + } + StreamResult::Dropped => {} + } + + let write = !buffer.remaining().is_empty(); + + *producer.lock().unwrap() = Some((mine, buffer)); + + if write { + self.write(token, id, producer, kind).await?; + } - Ok(( - FutureWriter::new(default, write, self), - FutureReader::new(read, self), - )) + Ok(result) + } + .boxed() + }); + state.get_mut(id).unwrap().write = WriteState::HostReady { + produce, + guest_offset: 0, + cancel: false, + cancel_waker: None, + }; + read } - /// Create a new Component Model `stream` as pair of writable and readable ends, - /// the latter of which may be passed to guest code. - pub fn stream( + fn set_consumer>( self, - mut store: impl AsContextMut, - ) -> Result<(StreamWriter, StreamReader)> { - let (write, read) = self - .concurrent_state_mut(store.as_context_mut().0) - .new_transmit()?; + mut store: S, + id: TableId, + kind: TransmitKind, + consumer: C, + ) { + let mut store = store.as_context_mut(); + let token = StoreToken::new(store.as_context_mut()); + let state = self.concurrent_state_mut(store.0); + let id = state.get_mut(id).unwrap().state; + let transmit = state.get_mut(id).unwrap(); + let consumer = Arc::new(Mutex::new(Some(Box::pin(consumer)))); + let consume_with_buffer = { + let consumer = consumer.clone(); + async move |mut host_buffer: Option<&mut dyn WriteBuffer>| { + let mut mine = consumer.lock().unwrap().take().unwrap(); + + let host_buffer_remaining_before = + host_buffer.as_deref_mut().map(|v| v.remaining().len()); + + let (result, cancelled) = future::poll_fn(|cx| { + tls::get(|store| { + let cancel = + match &self.concurrent_state_mut(store).get_mut(id).unwrap().read { + &ReadState::HostReady { cancel, .. } => cancel, + ReadState::Open => false, + _ => unreachable!(), + }; + + let poll = mine.as_mut().poll_consume( + cx, + token.as_context_mut(store), + &mut Source { + instance: self, + id, + host_buffer: host_buffer.as_deref_mut(), + }, + cancel, + ); + + if let ReadState::HostReady { + cancel_waker, + cancel, + .. + } = &mut self.concurrent_state_mut(store).get_mut(id).unwrap().read + { + if let Poll::Pending = &poll { + *cancel_waker = Some(cx.waker().clone()); + } else { + *cancel_waker = None; + *cancel = false; + } + } - Ok(( - StreamWriter::new(write, self), - StreamReader::new(read, self), - )) - } + poll.map(|v| v.map(|result| (result, cancel))) + }) + }) + .await?; + + let (guest_offset, count) = tls::get(|store| { + let transmit = self.concurrent_state_mut(store).get_mut(id).unwrap(); + ( + match &transmit.read { + &ReadState::HostReady { guest_offset, .. } => guest_offset, + ReadState::Open => 0, + _ => unreachable!(), + }, + match &transmit.write { + &WriteState::GuestReady { count, .. } => count, + WriteState::HostReady { .. } => host_buffer_remaining_before.unwrap(), + _ => unreachable!(), + }, + ) + }); + + match result { + StreamResult::Completed => { + if count > 0 + && guest_offset == 0 + && host_buffer_remaining_before + .zip(host_buffer.map(|v| v.remaining().len())) + .map(|(before, after)| before == after) + .unwrap_or(false) + { + bail!( + "StreamConsumer::poll_consume returned StreamResult::Completed \ + without consuming any items" + ); + } + } + StreamResult::Cancelled => { + if !cancelled { + bail!( + "StreamConsumer::poll_consume returned StreamResult::Cancelled \ + without being given a `finish` parameter value of true" + ); + } + } + StreamResult::Dropped => {} + } + + *consumer.lock().unwrap() = Some(mine); + + Ok(result) + } + }; + let consume = { + let consume = consume_with_buffer.clone(); + Box::new(move || { + let consume = consume.clone(); + async move { consume(None).await }.boxed() + }) + }; + + match &transmit.write { + WriteState::Open => { + transmit.read = ReadState::HostReady { + consume, + guest_offset: 0, + cancel: false, + cancel_waker: None, + }; + } + WriteState::GuestReady { .. } => { + let future = consume(); + transmit.read = ReadState::HostReady { + consume, + guest_offset: 0, + cancel: false, + cancel_waker: None, + }; + self.pipe_from_guest(store, kind, id, future); + } + WriteState::HostReady { .. } => { + let WriteState::HostReady { produce, .. } = mem::replace( + &mut transmit.write, + WriteState::HostReady { + produce: Box::new(|| unreachable!()), + guest_offset: 0, + cancel: false, + cancel_waker: None, + }, + ) else { + unreachable!(); + }; + + transmit.read = ReadState::HostToHost { + accept: Box::new(move |input| { + let consume = consume_with_buffer.clone(); + async move { consume(Some(input.get_mut::())).await }.boxed() + }), + }; + + let future = async move { + loop { + if tls::get(|store| { + anyhow::Ok(matches!( + self.concurrent_state_mut(store).get_mut(id)?.read, + ReadState::Dropped + )) + })? { + break Ok(()); + } - /// Write to the specified stream or future from the host. - fn host_write, U>( - self, - mut store: StoreContextMut, - id: TableId, - mut buffer: B, - kind: TransmitKind, - post_write: PostWrite, - ) -> Result, oneshot::Receiver>>> { - let transmit_id = self.concurrent_state_mut(store.0).get_mut(id)?.state; - let transmit = self - .concurrent_state_mut(store.0) - .get_mut(transmit_id) - .with_context(|| format!("retrieving state for transmit [{transmit_id:?}]"))?; - log::trace!("host_write state {transmit_id:?}; {:?}", transmit.read); + match produce().await? { + StreamResult::Completed | StreamResult::Cancelled => {} + StreamResult::Dropped => break Ok(()), + } - let new_state = if let ReadState::Dropped = &transmit.read { - ReadState::Dropped - } else { - ReadState::Open - }; + if let TransmitKind::Future = kind { + break Ok(()); + } + } + } + .map(move |result| { + tls::get(|store| self.concurrent_state_mut(store).delete_transmit(id))?; + result + }); - if matches!(post_write, PostWrite::Drop) && !matches!(transmit.read, ReadState::Open) { - transmit.write = WriteState::Dropped; + state.push_future(Box::pin(future)); + } + WriteState::Dropped => unreachable!(), } + } - Ok(match mem::replace(&mut transmit.read, new_state) { - ReadState::Open => { - assert!(matches!(&transmit.write, WriteState::Open)); + async fn write>( + self, + token: StoreToken, + id: TableId, + pair: Arc>>, + kind: TransmitKind, + ) -> Result<()> { + let (read, guest_offset) = tls::get(|store| { + let transmit = self.concurrent_state_mut(store).get_mut(id)?; - let token = StoreToken::new(store.as_context_mut()); - let (tx, rx) = oneshot::channel(); - let state = WriteState::HostReady { - accept: Box::new(move |store, instance, reader| { - let (result, code) = accept_reader::( - token.as_context_mut(store), - instance, - reader, - buffer, - kind, - )?; - _ = tx.send(result); - Ok(code) - }), - post_write, - }; - self.concurrent_state_mut(store.0) - .get_mut(transmit_id)? - .write = state; + let guest_offset = if let &WriteState::HostReady { guest_offset, .. } = &transmit.write + { + Some(guest_offset) + } else { + None + }; - Err(rx) - } + anyhow::Ok(( + mem::replace(&mut transmit.read, ReadState::Open), + guest_offset, + )) + })?; + match read { ReadState::GuestReady { ty, - flat_abi: _, + flat_abi, options, address, count, handle, - .. } => { + let guest_offset = guest_offset.unwrap(); + if let TransmitKind::Future = kind { - transmit.done = true; + tls::get(|store| { + self.concurrent_state_mut(store).get_mut(id)?.done = true; + anyhow::Ok(()) + })?; } - let read_handle = transmit.read_handle; - let accept = move |mut store: StoreContextMut| { - let (result, code) = accept_reader::( - store.as_context_mut(), - self, - Reader::Guest { - options: &options, + let old_remaining = pair.lock().unwrap().as_mut().unwrap().1.remaining().len(); + let accept = { + let pair = pair.clone(); + move |mut store: StoreContextMut| { + lower::( + store.as_context_mut(), + self, + &options, ty, - address, - count, - }, - buffer, - kind, - )?; - - self.concurrent_state_mut(store.0).set_event( - read_handle.rep(), - match ty { - TransmitIndex::Future(ty) => Event::FutureRead { - code, - pending: Some((ty, handle)), - }, - TransmitIndex::Stream(ty) => Event::StreamRead { - code, - pending: Some((ty, handle)), - }, - }, - )?; - - anyhow::Ok(result) + address + (T::SIZE32 * guest_offset), + count - guest_offset, + &mut pair.lock().unwrap().as_mut().unwrap().1, + )?; + anyhow::Ok(()) + } }; - if T::MAY_REQUIRE_REALLOC { - // For payloads which may require a realloc call, use a - // oneshot::channel and background task. This is necessary - // because calling the guest while there are host embedder - // frames on the stack is unsound. - let (tx, rx) = oneshot::channel(); - let token = StoreToken::new(store.as_context_mut()); - self.concurrent_state_mut(store.0).push_high_priority( - WorkItem::WorkerFunction(AlwaysMut::new(Box::new(move |store, _| { - _ = tx.send(accept(token.as_context_mut(store))?); - Ok(()) - }))), - ); - Err(rx) - } else { - // Optimize flat payloads (i.e. those which do not require - // calling the guest's realloc function) by lowering - // directly instead of using a oneshot::channel and - // background task. - Ok(accept(store)?) + if guest_offset < count { + if T::MAY_REQUIRE_REALLOC { + // For payloads which may require a realloc call, use a + // oneshot::channel and background task. This is + // necessary because calling the guest while there are + // host embedder frames on the stack is unsound. + let (tx, rx) = oneshot::channel(); + tls::get(move |store| { + self.concurrent_state_mut(store).push_high_priority( + WorkItem::WorkerFunction(AlwaysMut::new(Box::new( + move |store, _| { + _ = tx.send(accept(token.as_context_mut(store))?); + Ok(()) + }, + ))), + ) + }); + rx.await? + } else { + // Optimize flat payloads (i.e. those which do not + // require calling the guest's realloc function) by + // lowering directly instead of using a oneshot::channel + // and background task. + tls::get(|store| accept(token.as_context_mut(store)))? + }; } - } - - ReadState::HostReady { accept } => { - let count = buffer.remaining().len(); - let mut untyped = UntypedWriteBuffer::new(&mut buffer); - let code = accept(Writer::Host { - buffer: &mut untyped, - count, - })?; - let (ReturnCode::Completed(_) | ReturnCode::Dropped(_)) = code else { - unreachable!() - }; - - Ok(HostResult { - buffer, - dropped: false, - }) - } - ReadState::Dropped => Ok(HostResult { - buffer, - dropped: true, - }), - }) - } + tls::get(|store| { + let count = + old_remaining - pair.lock().unwrap().as_mut().unwrap().1.remaining().len(); - /// Async wrapper around `Self::host_write`. - async fn host_write_async>( - self, - accessor: impl AsAccessor, - id: TableId, - buffer: B, - kind: TransmitKind, - ) -> Result> { - match accessor.as_accessor().with(move |mut access| { - self.host_write( - access.as_context_mut(), - id, - buffer, - kind, - PostWrite::Continue, - ) - })? { - Ok(result) => Ok(result), - Err(rx) => Ok(rx.await?), - } - } + let transmit = self.concurrent_state_mut(store).get_mut(id)?; - /// Read from the specified stream or future from the host. - fn host_read, U>( - self, - store: StoreContextMut, - id: TableId, - mut buffer: B, - kind: TransmitKind, - ) -> Result, oneshot::Receiver>>> { - let transmit_id = self.concurrent_state_mut(store.0).get_mut(id)?.state; - let transmit = self - .concurrent_state_mut(store.0) - .get_mut(transmit_id) - .with_context(|| format!("retrieving state for transmit [{transmit_id:?}]"))?; - log::trace!("host_read state {transmit_id:?}; {:?}", transmit.write); + let WriteState::HostReady { guest_offset, .. } = &mut transmit.write else { + unreachable!(); + }; - let new_state = if let WriteState::Dropped = &transmit.write { - WriteState::Dropped - } else { - WriteState::Open - }; + *guest_offset += count; - Ok(match mem::replace(&mut transmit.write, new_state) { - WriteState::Open => { - assert!(matches!(&transmit.read, ReadState::Open)); + transmit.read = ReadState::GuestReady { + ty, + flat_abi, + options, + address, + count, + handle, + }; - let (tx, rx) = oneshot::channel(); - transmit.read = ReadState::HostReady { - accept: Box::new(move |writer| { - let (result, code) = accept_writer::(writer, buffer, kind)?; - _ = tx.send(result); - Ok(code) - }), - }; + anyhow::Ok(()) + })?; - Err(rx) + Ok(()) } - WriteState::GuestReady { - ty, - flat_abi: _, - options, - address, - count, - handle, - post_write, - .. - } => { - if let TransmitIndex::Future(_) = ty { - transmit.done = true; - } + ReadState::HostToHost { accept } => { + let (mine, mut buffer) = pair.lock().unwrap().take().unwrap(); - let write_handle = transmit.write_handle; - let lift = &mut LiftContext::new(store.0.store_opaque_mut(), &options, self); - let (result, code) = accept_writer::( - Writer::Guest { - ty: payload(ty, lift.types), - lift, - address, - count, - }, - buffer, - kind, - )?; + let state = accept(&mut UntypedWriteBuffer::new(&mut buffer)).await?; - let state = self.concurrent_state_mut(store.0); - let pending = if let PostWrite::Drop = post_write { - state.get_mut(transmit_id)?.write = WriteState::Dropped; - false - } else { - true - }; + *pair.lock().unwrap() = Some((mine, buffer)); - state.set_event( - write_handle.rep(), - match ty { - TransmitIndex::Future(ty) => Event::FutureWrite { - code, - pending: pending.then_some((ty, handle)), - }, - TransmitIndex::Stream(ty) => Event::StreamWrite { - code, - pending: pending.then_some((ty, handle)), - }, - }, - )?; + tls::get(|store| { + self.concurrent_state_mut(store).get_mut(id)?.read = match state { + StreamResult::Dropped => ReadState::Dropped, + StreamResult::Completed | StreamResult::Cancelled => { + ReadState::HostToHost { accept } + } + }; - Ok(result) + anyhow::Ok(()) + })?; + Ok(()) } - WriteState::HostReady { accept, post_write } => { - accept( - store.0, - self, - Reader::Host { - accept: Box::new(|input, count| { - let count = count.min(buffer.remaining_capacity()); - buffer.move_from(input.get_mut::(), count); - count - }), - }, - )?; - - if let PostWrite::Drop = post_write { - self.concurrent_state_mut(store.0) - .get_mut(transmit_id)? - .write = WriteState::Dropped; - } + _ => unreachable!(), + } + } - Ok(HostResult { - buffer, - dropped: false, - }) - } + fn pipe_from_guest( + self, + mut store: impl AsContextMut, + kind: TransmitKind, + id: TableId, + future: Pin> + Send + 'static>>, + ) { + let future = async move { + let stream_state = future.await?; + tls::get(|store| { + let state = self.concurrent_state_mut(store); + let transmit = state.get_mut(id)?; + let ReadState::HostReady { + consume, + guest_offset, + .. + } = mem::replace(&mut transmit.read, ReadState::Open) + else { + unreachable!(); + }; + let code = return_code(kind, stream_state, guest_offset); + transmit.read = match stream_state { + StreamResult::Dropped => ReadState::Dropped, + StreamResult::Completed | StreamResult::Cancelled => ReadState::HostReady { + consume, + guest_offset: 0, + cancel: false, + cancel_waker: None, + }, + }; + let WriteState::GuestReady { ty, handle, .. } = + mem::replace(&mut transmit.write, WriteState::Open) + else { + unreachable!(); + }; + state.send_write_result(ty, id, handle, code)?; + Ok(()) + }) + }; - WriteState::Dropped => Ok(HostResult { - buffer, - dropped: true, - }), - }) + self.concurrent_state_mut(store.as_context_mut().0) + .push_future(future.boxed()); } - /// Async wrapper around `Self::host_read`. - async fn host_read_async>( + fn pipe_to_guest( self, - accessor: impl AsAccessor, - id: TableId, - buffer: B, + mut store: impl AsContextMut, kind: TransmitKind, - ) -> Result> { - match accessor - .as_accessor() - .with(move |mut access| self.host_read(access.as_context_mut(), id, buffer, kind))? - { - Ok(result) => Ok(result), - Err(rx) => Ok(rx.await?), - } + id: TableId, + future: Pin> + Send + 'static>>, + ) { + let future = async move { + let stream_state = future.await?; + tls::get(|store| { + let state = self.concurrent_state_mut(store); + let transmit = state.get_mut(id)?; + let WriteState::HostReady { + produce, + guest_offset, + .. + } = mem::replace(&mut transmit.write, WriteState::Open) + else { + unreachable!(); + }; + let code = return_code(kind, stream_state, guest_offset); + transmit.write = match stream_state { + StreamResult::Dropped => WriteState::Dropped, + StreamResult::Completed | StreamResult::Cancelled => WriteState::HostReady { + produce, + guest_offset: 0, + cancel: false, + cancel_waker: None, + }, + }; + let ReadState::GuestReady { ty, handle, .. } = + mem::replace(&mut transmit.read, ReadState::Open) + else { + unreachable!(); + }; + state.send_read_result(ty, id, handle, code)?; + Ok(()) + }) + }; + + self.concurrent_state_mut(store.as_context_mut().0) + .push_future(future.boxed()); } /// Drop the read end of a stream or future read from the host. @@ -2105,9 +2272,6 @@ impl Instance { ); transmit.read = ReadState::Dropped; - if let Some(waker) = transmit.reader_watcher.take() { - waker.wake(); - } // If the write end is already dropped, it should stay dropped, // otherwise, it should be opened. @@ -2122,34 +2286,23 @@ impl Instance { match mem::replace(&mut transmit.write, new_state) { // If a guest is waiting to write, notify it that the read end has // been dropped. - WriteState::GuestReady { - ty, - handle, - post_write, - .. - } => { - if let PostWrite::Drop = post_write { - state.delete_transmit(transmit_id)?; - } else { - state.update_event( - write_handle.rep(), - match ty { - TransmitIndex::Future(ty) => Event::FutureWrite { - code: ReturnCode::Dropped(0), - pending: Some((ty, handle)), - }, - TransmitIndex::Stream(ty) => Event::StreamWrite { - code: ReturnCode::Dropped(0), - pending: Some((ty, handle)), - }, + WriteState::GuestReady { ty, handle, .. } => { + state.update_event( + write_handle.rep(), + match ty { + TransmitIndex::Future(ty) => Event::FutureWrite { + code: ReturnCode::Dropped(0), + pending: Some((ty, handle)), }, - )?; - }; + TransmitIndex::Stream(ty) => Event::StreamWrite { + code: ReturnCode::Dropped(0), + pending: Some((ty, handle)), + }, + }, + )?; } - WriteState::HostReady { accept, .. } => { - accept(store, self, Reader::End)?; - } + WriteState::HostReady { .. } => {} WriteState::Open => { state.update_event( @@ -2176,11 +2329,11 @@ impl Instance { } /// Drop the write end of a stream or future read from the host. - fn host_drop_writer( + fn host_drop_writer( self, - mut store: StoreContextMut, + store: StoreContextMut, id: TableId, - default: Option<&dyn Fn() -> Result>, + on_drop_open: Option Result<()>>, ) -> Result<()> { let transmit_id = self.concurrent_state_mut(store.0).get_mut(id)?.state; let transmit = self @@ -2193,32 +2346,18 @@ impl Instance { transmit.write ); - if let Some(waker) = transmit.writer_watcher.take() { - waker.wake(); - } - // Existing queued transmits must be updated with information for the impending writer closure match &mut transmit.write { WriteState::GuestReady { .. } => { unreachable!("can't call `host_drop_writer` on a guest-owned writer"); } - WriteState::HostReady { post_write, .. } => { - *post_write = PostWrite::Drop; - } + WriteState::HostReady { .. } => {} v @ WriteState::Open => { - if let (Some(default), false) = ( - default, + if let (Some(on_drop_open), false) = ( + on_drop_open, transmit.done || matches!(transmit.read, ReadState::Dropped), ) { - // This is a future, and we haven't written a value yet -- - // write the default value. - _ = self.host_write( - store.as_context_mut(), - id, - Some(default()?), - TransmitKind::Future, - PostWrite::Drop, - )?; + on_drop_open()?; } else { *v = WriteState::Dropped; } @@ -2263,17 +2402,13 @@ impl Instance { )?; } - // If the host was ready to read, and the writer end is being dropped (host->host write?) - // signal to the reader that we've reached the end of the stream - ReadState::HostReady { accept } => { - accept(Writer::End)?; - } + ReadState::HostReady { .. } | ReadState::HostToHost { .. } => {} // If the read state is open, then there are no registered readers of the stream/future ReadState::Open => { self.concurrent_state_mut(store.0).update_event( read_handle.rep(), - match default { + match on_drop_open { Some(_) => Event::FutureRead { code: ReturnCode::Dropped(0), pending: None, @@ -2313,14 +2448,12 @@ impl Instance { let id = TableId::::new(transmit_rep); log::trace!("guest_drop_writable: drop writer {id:?}"); match ty { - TransmitIndex::Stream(_) => { - self.host_drop_writer(store, id, None::<&dyn Fn() -> Result<()>>) - } + TransmitIndex::Stream(_) => self.host_drop_writer(store, id, None), TransmitIndex::Future(_) => self.host_drop_writer( store, id, - Some(&|| { - Err::<(), _>(anyhow!( + Some(|| { + Err(anyhow!( "cannot drop future write end without first writing a value" )) }), @@ -2467,6 +2600,40 @@ impl Instance { Ok(()) } + fn check_bounds( + self, + store: &StoreOpaque, + options: &Options, + ty: TransmitIndex, + address: usize, + count: usize, + ) -> Result<()> { + let types = self.id().get(store).component().types().clone(); + let size = usize::try_from( + match ty { + TransmitIndex::Future(ty) => types[types[ty].ty] + .payload + .map(|ty| types.canonical_abi(&ty).size32), + TransmitIndex::Stream(ty) => types[types[ty].ty] + .payload + .map(|ty| types.canonical_abi(&ty).size32), + } + .unwrap_or(0), + ) + .unwrap(); + + if count > 0 && size > 0 { + options + .memory(store) + .get(address..) + .and_then(|b| b.get(..(size * count))) + .map(drop) + .ok_or_else(|| anyhow::anyhow!("read pointer out of bounds of memory")) + } else { + Ok(()) + } + } + /// Write to the specified stream or future from the guest. pub(super) fn guest_write( self, @@ -2481,6 +2648,7 @@ impl Instance { let address = usize::try_from(address).unwrap(); let count = usize::try_from(count).unwrap(); let options = Options::new_index(store.0, self, options); + self.check_bounds(store.0, &options, ty, address, count)?; if !options.async_() { bail!("synchronous stream and future writes not yet supported"); } @@ -2502,7 +2670,7 @@ impl Instance { let transmit_id = concurrent_state.get_mut(transmit_handle)?.state; let transmit = concurrent_state.get_mut(transmit_id)?; log::trace!( - "guest_write {transmit_handle:?} (handle {handle}; state {transmit_id:?}); {:?}", + "guest_write {count} to {transmit_handle:?} (handle {handle}; state {transmit_id:?}); {:?}", transmit.read ); @@ -2526,7 +2694,6 @@ impl Instance { address, count, handle, - post_write: PostWrite::Continue, }; Ok::<_, crate::Error>(()) }; @@ -2608,19 +2775,7 @@ impl Instance { let code = ReturnCode::completed(ty.kind(), total); - concurrent_state.set_event( - read_handle_rep, - match read_ty { - TransmitIndex::Future(ty) => Event::FutureRead { - code, - pending: Some((ty, read_handle)), - }, - TransmitIndex::Stream(ty) => Event::StreamRead { - code, - pending: Some((ty, read_handle)), - }, - }, - )?; + concurrent_state.send_read_result(read_ty, transmit_id, read_handle, code)?; } if read_buffer_remaining { @@ -2643,20 +2798,58 @@ impl Instance { } } - ReadState::HostReady { accept } => { + ReadState::HostReady { + consume, + guest_offset, + cancel, + cancel_waker, + } => { + assert!(cancel_waker.is_none()); + assert!(!cancel); + assert_eq!(0, guest_offset); + if let TransmitIndex::Future(_) = ty { transmit.done = true; } - let lift = &mut LiftContext::new(store.0.store_opaque_mut(), &options, self); - accept(Writer::Guest { - ty: payload(ty, lift.types), - lift, - address, - count, - })? + let mut future = consume(); + transmit.read = ReadState::HostReady { + consume, + guest_offset: 0, + cancel: false, + cancel_waker: None, + }; + set_guest_ready(concurrent_state)?; + let poll = self.set_tls(store.0, || { + future + .as_mut() + .poll(&mut Context::from_waker(&Waker::noop())) + }); + + match poll { + Poll::Ready(state) => { + let transmit = self.concurrent_state_mut(store.0).get_mut(transmit_id)?; + let ReadState::HostReady { guest_offset, .. } = &mut transmit.read else { + unreachable!(); + }; + let code = return_code(ty.kind(), state?, mem::replace(guest_offset, 0)); + transmit.write = WriteState::Open; + code + } + Poll::Pending => { + self.pipe_from_guest( + store.as_context_mut(), + ty.kind(), + transmit_id, + future, + ); + ReturnCode::Blocked + } + } } + ReadState::HostToHost { .. } => unreachable!(), + ReadState::Open => { set_guest_ready(concurrent_state)?; ReturnCode::Blocked @@ -2696,7 +2889,9 @@ impl Instance { count: u32, ) -> Result { let address = usize::try_from(address).unwrap(); + let count = usize::try_from(count).unwrap(); let options = Options::new_index(store.0, self, options); + self.check_bounds(store.0, &options, ty, address, count)?; if !options.async_() { bail!("synchronous stream and future reads not yet supported"); } @@ -2715,7 +2910,7 @@ impl Instance { let transmit_id = concurrent_state.get_mut(transmit_handle)?.state; let transmit = concurrent_state.get_mut(transmit_id)?; log::trace!( - "guest_read {transmit_handle:?} (handle {handle}; state {transmit_id:?}); {:?}", + "guest_read {count} from {transmit_handle:?} (handle {handle}; state {transmit_id:?}); {:?}", transmit.write ); @@ -2737,7 +2932,7 @@ impl Instance { flat_abi, options, address, - count: usize::try_from(count).unwrap(), + count, handle, }; Ok::<_, crate::Error>(()) @@ -2751,7 +2946,6 @@ impl Instance { address: write_address, count: write_count, handle: write_handle, - post_write, } => { assert_eq!(flat_abi, write_flat_abi); @@ -2765,8 +2959,6 @@ impl Instance { // `ReadState::GuestReady` case concerning zero-length reads and // writes. - let count = usize::try_from(count).unwrap(); - let write_complete = write_count == 0 || count > 0; let read_complete = write_count > 0; let write_buffer_remaining = count < write_count; @@ -2792,12 +2984,6 @@ impl Instance { .map(|ty| usize::try_from(types.canonical_abi(&ty).size32).unwrap()) .unwrap_or(0); let concurrent_state = instance.concurrent_state_mut(); - let pending = if let PostWrite::Drop = post_write { - concurrent_state.get_mut(transmit_id)?.write = WriteState::Dropped; - false - } else { - true - }; if write_complete { let count = u32::try_from(count).unwrap(); @@ -2813,18 +2999,11 @@ impl Instance { let code = ReturnCode::completed(ty.kind(), total); - concurrent_state.set_event( - write_handle_rep, - match write_ty { - TransmitIndex::Future(ty) => Event::FutureWrite { - code, - pending: pending.then_some((ty, write_handle)), - }, - TransmitIndex::Stream(ty) => Event::StreamWrite { - code, - pending: pending.then_some((ty, write_handle)), - }, - }, + concurrent_state.send_write_result( + write_ty, + transmit_id, + write_handle, + code, )?; } @@ -2837,7 +3016,6 @@ impl Instance { address: write_address + (count * item_size), count: write_count - count, handle: write_handle, - post_write, }; } @@ -2849,29 +3027,49 @@ impl Instance { } } - WriteState::HostReady { accept, post_write } => { + WriteState::HostReady { + produce, + guest_offset, + cancel, + cancel_waker, + } => { + assert!(cancel_waker.is_none()); + assert!(!cancel); + assert_eq!(0, guest_offset); + if let TransmitIndex::Future(_) = ty { transmit.done = true; } - let code = accept( - store.0, - self, - Reader::Guest { - options: &options, - ty, - address, - count: count.try_into().unwrap(), - }, - )?; - - if let PostWrite::Drop = post_write { - self.concurrent_state_mut(store.0) - .get_mut(transmit_id)? - .write = WriteState::Dropped; + let mut future = produce(); + transmit.write = WriteState::HostReady { + produce, + guest_offset: 0, + cancel: false, + cancel_waker: None, + }; + set_guest_ready(concurrent_state)?; + let poll = self.set_tls(store.0, || { + future + .as_mut() + .poll(&mut Context::from_waker(&Waker::noop())) + }); + + match poll { + Poll::Ready(state) => { + let transmit = self.concurrent_state_mut(store.0).get_mut(transmit_id)?; + let WriteState::HostReady { guest_offset, .. } = &mut transmit.write else { + unreachable!(); + }; + let code = return_code(ty.kind(), state?, mem::replace(guest_offset, 0)); + transmit.read = ReadState::Open; + code + } + Poll::Pending => { + self.pipe_to_guest(store.as_context_mut(), ty.kind(), transmit_id, future); + ReturnCode::Blocked + } } - - code } WriteState::Open => { @@ -3319,6 +3517,52 @@ impl ComponentInstance { } impl ConcurrentState { + fn send_write_result( + &mut self, + ty: TransmitIndex, + id: TableId, + handle: u32, + code: ReturnCode, + ) -> Result<()> { + let write_handle = self.get_mut(id)?.write_handle.rep(); + self.set_event( + write_handle, + match ty { + TransmitIndex::Future(ty) => Event::FutureWrite { + code, + pending: Some((ty, handle)), + }, + TransmitIndex::Stream(ty) => Event::StreamWrite { + code, + pending: Some((ty, handle)), + }, + }, + ) + } + + fn send_read_result( + &mut self, + ty: TransmitIndex, + id: TableId, + handle: u32, + code: ReturnCode, + ) -> Result<()> { + let read_handle = self.get_mut(id)?.read_handle.rep(); + self.set_event( + read_handle, + match ty { + TransmitIndex::Future(ty) => Event::FutureRead { + code, + pending: Some((ty, handle)), + }, + TransmitIndex::Stream(ty) => Event::StreamRead { + code, + pending: Some((ty, handle)), + }, + }, + ) + } + fn take_event(&mut self, waitable: u32) -> Result> { Waitable::Transmit(TableId::::new(waitable)).take_event(self) } @@ -3444,6 +3688,17 @@ impl ConcurrentState { (ReturnCode::Dropped(_) | ReturnCode::Completed(_), _) => code, _ => unreachable!(), } + } else if let ReadState::HostReady { + cancel, + cancel_waker, + .. + } = &mut self.get_mut(transmit_id)?.read + { + *cancel = true; + if let Some(waker) = cancel_waker.take() { + waker.wake() + } + ReturnCode::Blocked } else { ReturnCode::Cancelled(0) }; @@ -3451,10 +3706,10 @@ impl ConcurrentState { let transmit = self.get_mut(transmit_id)?; match &transmit.write { - WriteState::GuestReady { .. } | WriteState::HostReady { .. } => { + WriteState::GuestReady { .. } => { transmit.write = WriteState::Open; } - + WriteState::HostReady { .. } => todo!("support host write cancellation"), WriteState::Open | WriteState::Dropped => {} } @@ -3488,6 +3743,17 @@ impl ConcurrentState { (ReturnCode::Dropped(_) | ReturnCode::Completed(_), _) => code, _ => unreachable!(), } + } else if let WriteState::HostReady { + cancel, + cancel_waker, + .. + } = &mut self.get_mut(transmit_id)?.write + { + *cancel = true; + if let Some(waker) = cancel_waker.take() { + waker.wake() + } + ReturnCode::Blocked } else { ReturnCode::Cancelled(0) }; @@ -3495,10 +3761,12 @@ impl ConcurrentState { let transmit = self.get_mut(transmit_id)?; match &transmit.read { - ReadState::GuestReady { .. } | ReadState::HostReady { .. } => { + ReadState::GuestReady { .. } => { transmit.read = ReadState::Open; } - + ReadState::HostReady { .. } | ReadState::HostToHost { .. } => { + todo!("support host read cancellation") + } ReadState::Open | ReadState::Dropped => {} } diff --git a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams/buffers.rs b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams/buffers.rs index b71a99863f31..4f9dea603fe5 100644 --- a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams/buffers.rs +++ b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams/buffers.rs @@ -11,9 +11,11 @@ use std::vec::Vec; pub use untyped::*; mod untyped { use super::WriteBuffer; + use crate::vm::SendSyncPtr; use std::any::TypeId; use std::marker; use std::mem; + use std::ptr::NonNull; /// Helper structure to type-erase the `T` in `WriteBuffer`. /// @@ -25,7 +27,7 @@ mod untyped { /// borrow on the original buffer passed in. pub struct UntypedWriteBuffer<'a> { element_type_id: TypeId, - buf: *mut dyn WriteBuffer<()>, + buf: SendSyncPtr>, _marker: marker::PhantomData<&'a mut dyn WriteBuffer<()>>, } @@ -48,11 +50,14 @@ mod untyped { // is safe here because `typed` and `untyped` have the same size // and we're otherwise reinterpreting a raw pointer with a type // parameter to one without one. - buf: unsafe { - let r = ReinterpretWriteBuffer { typed: buf }; - assert_eq!(mem::size_of_val(&r.typed), mem::size_of_val(&r.untyped)); - r.untyped - }, + buf: SendSyncPtr::new( + NonNull::new(unsafe { + let r = ReinterpretWriteBuffer { typed: buf }; + assert_eq!(mem::size_of_val(&r.typed), mem::size_of_val(&r.untyped)); + r.untyped + }) + .unwrap(), + ), _marker: marker::PhantomData, } } @@ -68,7 +73,12 @@ mod untyped { // structure also is proof of valid existence of the original // `&mut WriteBuffer`, so taking the raw pointer back to a safe // reference is valid. - unsafe { &mut *ReinterpretWriteBuffer { untyped: self.buf }.typed } + unsafe { + &mut *ReinterpretWriteBuffer { + untyped: self.buf.as_ptr(), + } + .typed + } } } } @@ -210,6 +220,12 @@ pub struct VecBuffer { offset: usize, } +impl Default for VecBuffer { + fn default() -> Self { + Self::with_capacity(0) + } +} + impl VecBuffer { /// Create a new instance with the specified capacity. pub fn with_capacity(capacity: usize) -> Self { diff --git a/crates/wasmtime/src/runtime/component/mod.rs b/crates/wasmtime/src/runtime/component/mod.rs index 4ed9e306f4a8..35f1a27880f6 100644 --- a/crates/wasmtime/src/runtime/component/mod.rs +++ b/crates/wasmtime/src/runtime/component/mod.rs @@ -119,9 +119,10 @@ mod values; pub use self::component::{Component, ComponentExportIndex}; #[cfg(feature = "component-model-async")] pub use self::concurrent::{ - Access, Accessor, AccessorTask, AsAccessor, ErrorContext, FutureReader, FutureWriter, - GuardedFutureReader, GuardedFutureWriter, GuardedStreamReader, GuardedStreamWriter, JoinHandle, - ReadBuffer, StreamReader, StreamWriter, VMComponentAsyncStore, VecBuffer, WriteBuffer, + Access, Accessor, AccessorTask, AsAccessor, Destination, DirectDestination, DirectSource, + ErrorContext, FutureConsumer, FutureProducer, FutureReader, GuardedFutureReader, + GuardedStreamReader, JoinHandle, ReadBuffer, Source, StreamConsumer, StreamProducer, + StreamReader, StreamResult, VMComponentAsyncStore, VecBuffer, WriteBuffer, }; pub use self::func::{ ComponentNamedList, ComponentType, Func, Lift, Lower, TypedFunc, WasmList, WasmStr, diff --git a/crates/wit-bindgen/src/lib.rs b/crates/wit-bindgen/src/lib.rs index 535603298003..f5388d4c6efa 100644 --- a/crates/wit-bindgen/src/lib.rs +++ b/crates/wit-bindgen/src/lib.rs @@ -1527,7 +1527,7 @@ impl Wasmtime { {wt}::component::ResourceType::host::<{camel}>(), move |caller: &{wt}::component::Accessor::, rep| {{ {wt}::component::__internal::Box::pin(async move {{ - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); Host{camel}WithStore::drop(accessor, {wt}::component::Resource::new_own(rep)).await }}) }}, @@ -2490,7 +2490,7 @@ impl<'a> InterfaceGenerator<'a> { } if flags.contains(FunctionFlags::STORE) { - uwriteln!(self.src, "let accessor = &caller.with_data(host_getter);"); + uwriteln!(self.src, "let accessor = &caller.with_getter(host_getter);"); } else { self.src .push_str("let host = &mut host_getter(caller.data_mut());\n");