From 548e3989a1c51ec0d2c40a738e63cbfeef83c7d4 Mon Sep 17 00:00:00 2001 From: Joel Dice Date: Fri, 15 Aug 2025 12:05:19 -0600 Subject: [PATCH 01/32] Revamp component model stream/future host API (again) This changes the host APIs for dealing with futures and streams from a "rendezvous"-style API to a callback-oriented one. Previously you would create e.g. a `StreamReader`/`StreamWriter` pair and call their `read` and `write` methods, respectively, and those methods would return `Future`s that resolved when the operation was matched with a corresponding `write` or `read` operation on the other end. With the new API, you instead provide a `StreamProducer` trait implementation whe creating the stream, whose `produce` method will be called as soon as a read happens, giving the implementation a chance to respond immediately without making the reader wait for a rendezvous. Likewise, you can match the read end of a stream to a `StreamConsumer` to respond immediately to writes. This model should reduce scheduling overhead and make it easier to e.g. pipe items to/from `AsyncWrite`/`AsyncRead` or `Sink`/`Stream` implementations without needing to explicitly spawn background tasks. In addition, the new API provides direct access to guest read and write buffers for `stream` operations, enabling zero-copy operations. Other changes: - I've removed the `HostTaskOutput`; we were using it to run extra code with access to the store after a host task completes, but we can do that more elegantly inside the future using `tls::get`. This also allowed me to simplify `Instance::poll_until` a bit. - I've removed the `watch_{reader,writer}` functionality; it's not needed now given that the runtime will automatically dispose of the producer or consumer when the other end of the stream or future is closed -- no need for embedder code to manage that. - In order to make `UntypedWriteBuffer` `Send`, I had to wrap its raw pointer `buf` field in a `SendSyncPtr`. - I've removed `{Future,Stream}Writer` entirely and moved `Instance::{future,stream}` to `{Future,Stream}Reader::new`, respectively. - I've added a bounds check to the beginnings of `Instance::guest_read` and `Instance::guest_write` so that we need not do it later in `Guest{Source,Destination}::remaining`, meaning those functions can be infallible. Note that I haven't updated `wasmtime-wasi` yet to match; that will happen in one or more follow-up commits. Signed-off-by: Joel Dice --- .../src/resource_stream.rs | 36 +- crates/misc/component-async-tests/src/util.rs | 118 +- .../tests/scenario/round_trip.rs | 2 +- .../tests/scenario/streams.rs | 515 +--- .../tests/scenario/transmit.rs | 165 +- .../src/bin/async_closed_streams.rs | 13 +- .../src/bin/async_poll_stackless.rs | 2 +- .../src/bin/async_poll_synchronous.rs | 2 +- .../src/runtime/component/concurrent.rs | 144 +- .../concurrent/futures_and_streams.rs | 2485 ++++++++--------- .../concurrent/futures_and_streams/buffers.rs | 24 +- crates/wasmtime/src/runtime/component/mod.rs | 7 +- 12 files changed, 1510 insertions(+), 2003 deletions(-) diff --git a/crates/misc/component-async-tests/src/resource_stream.rs b/crates/misc/component-async-tests/src/resource_stream.rs index b545efd34c5d..cb2c1e405e70 100644 --- a/crates/misc/component-async-tests/src/resource_stream.rs +++ b/crates/misc/component-async-tests/src/resource_stream.rs @@ -1,7 +1,7 @@ +use crate::util::MpscProducer; use anyhow::Result; -use wasmtime::component::{ - Accessor, AccessorTask, GuardedStreamWriter, Resource, StreamReader, StreamWriter, -}; +use futures::channel::mpsc; +use wasmtime::component::{Accessor, Resource, StreamReader}; use super::Ctx; @@ -38,29 +38,15 @@ impl bindings::local::local::resource_stream::HostWithStore for Ctx { accessor: &Accessor, count: u32, ) -> wasmtime::Result>> { - struct Task { - tx: StreamWriter>, - - count: u32, - } - - impl AccessorTask> for Task { - async fn run(self, accessor: &Accessor) -> Result<()> { - let mut tx = GuardedStreamWriter::new(accessor, self.tx); - for _ in 0..self.count { - let item = accessor.with(|mut view| view.get().table.push(ResourceStreamX))?; - tx.write_all(Some(item)).await; - } - Ok(()) + accessor.with(|mut access| { + let (mut tx, rx) = mpsc::channel(usize::try_from(count).unwrap()); + for _ in 0..count { + tx.try_send(access.get().table.push(ResourceStreamX)?) + .unwrap() } - } - - let (tx, rx) = accessor.with(|mut view| { - let instance = view.instance(); - instance.stream(&mut view) - })?; - accessor.spawn(Task { tx, count }); - Ok(rx) + let instance = access.instance(); + Ok(StreamReader::new(instance, access, MpscProducer::new(rx))) + }) } } diff --git a/crates/misc/component-async-tests/src/util.rs b/crates/misc/component-async-tests/src/util.rs index fb1380603e8f..f7551dad42ae 100644 --- a/crates/misc/component-async-tests/src/util.rs +++ b/crates/misc/component-async-tests/src/util.rs @@ -1,5 +1,13 @@ -use futures::channel::oneshot; +use anyhow::Result; +use futures::{ + SinkExt, StreamExt, + channel::{mpsc, oneshot}, +}; use std::thread; +use wasmtime::component::{ + Accessor, Destination, FutureConsumer, FutureProducer, Lift, Lower, Source, StreamConsumer, + StreamProducer, StreamState, +}; pub async fn sleep(duration: std::time::Duration) { if cfg!(miri) { @@ -21,3 +29,111 @@ pub async fn sleep(duration: std::time::Duration) { tokio::time::sleep(duration).await; } } + +pub struct MpscProducer { + rx: mpsc::Receiver, + closed: bool, +} + +impl MpscProducer { + pub fn new(rx: mpsc::Receiver) -> Self { + Self { rx, closed: false } + } + + fn state(&self) -> StreamState { + if self.closed { + StreamState::Closed + } else { + StreamState::Open + } + } +} + +impl StreamProducer for MpscProducer { + async fn produce( + &mut self, + accessor: &Accessor, + destination: &mut Destination, + ) -> Result { + if let Some(item) = self.rx.next().await { + let item = destination.write(accessor, Some(item)).await?; + assert!(item.is_none()); + } else { + self.closed = true; + } + + Ok(self.state()) + } + + async fn when_ready(&mut self, _: &Accessor) -> Result { + Ok(self.state()) + } +} + +pub struct MpscConsumer { + tx: mpsc::Sender, +} + +impl MpscConsumer { + pub fn new(tx: mpsc::Sender) -> Self { + Self { tx } + } + + fn state(&self) -> StreamState { + if self.tx.is_closed() { + StreamState::Closed + } else { + StreamState::Open + } + } +} + +impl StreamConsumer for MpscConsumer { + async fn consume( + &mut self, + accessor: &Accessor, + source: &mut Source<'_, T>, + ) -> Result { + let item = &mut None; + accessor.with(|access| source.read(access, item))?; + _ = self.tx.send(item.take().unwrap()).await; + Ok(self.state()) + } + + async fn when_ready(&mut self, _: &Accessor) -> Result { + Ok(self.state()) + } +} + +pub struct OneshotProducer { + rx: oneshot::Receiver, +} + +impl OneshotProducer { + pub fn new(rx: oneshot::Receiver) -> Self { + Self { rx } + } +} + +impl FutureProducer for OneshotProducer { + async fn produce(self, _: &Accessor) -> Result { + Ok(self.rx.await?) + } +} + +pub struct OneshotConsumer { + tx: oneshot::Sender, +} + +impl OneshotConsumer { + pub fn new(tx: oneshot::Sender) -> Self { + Self { tx } + } +} + +impl FutureConsumer for OneshotConsumer { + async fn consume(self, _: &Accessor, value: T) -> Result<()> { + _ = self.tx.send(value); + Ok(()) + } +} diff --git a/crates/misc/component-async-tests/tests/scenario/round_trip.rs b/crates/misc/component-async-tests/tests/scenario/round_trip.rs index a40dd26f9b15..13e1670f8ec1 100644 --- a/crates/misc/component-async-tests/tests/scenario/round_trip.rs +++ b/crates/misc/component-async-tests/tests/scenario/round_trip.rs @@ -237,7 +237,7 @@ pub async fn test_round_trip( component_async_tests::round_trip::bindings::RoundTrip::new(&mut store, &instance)?; if call_style == 0 || !cfg!(miri) { - // Now do it again using `Instance::run_concurrent`: + // Run the test using `Instance::run_concurrent`: instance .run_concurrent(&mut store, { let inputs_and_outputs = inputs_and_outputs diff --git a/crates/misc/component-async-tests/tests/scenario/streams.rs b/crates/misc/component-async-tests/tests/scenario/streams.rs index c024fc1ae5c5..122153ee44af 100644 --- a/crates/misc/component-async-tests/tests/scenario/streams.rs +++ b/crates/misc/component-async-tests/tests/scenario/streams.rs @@ -1,195 +1,25 @@ use { super::util::{config, make_component}, anyhow::Result, - component_async_tests::{Ctx, closed_streams}, - futures::{ - future::FutureExt, - stream::{FuturesUnordered, StreamExt, TryStreamExt}, + component_async_tests::{ + Ctx, closed_streams, + util::{MpscConsumer, MpscProducer, OneshotConsumer, OneshotProducer}, }, - std::{ - future::{self, Future}, - pin::pin, - sync::{Arc, Mutex}, - task::{Context, Waker}, + futures::{ + SinkExt, StreamExt, + channel::{mpsc, oneshot}, + future, }, + std::sync::{Arc, Mutex}, wasmtime::{ - Engine, Store, Trap, - component::{ - Accessor, GuardedFutureReader, GuardedStreamReader, GuardedStreamWriter, Linker, - ResourceTable, VecBuffer, - }, + Engine, Store, + component::{FutureReader, Linker, ResourceTable, StreamReader}, }, wasmtime_wasi::WasiCtxBuilder, }; -#[tokio::test] -pub async fn async_watch_streams() -> Result<()> { - let engine = Engine::new(&config())?; - - let mut store = Store::new( - &engine, - Ctx { - wasi: WasiCtxBuilder::new().inherit_stdio().build(), - table: ResourceTable::default(), - continue_: false, - wakers: Arc::new(Mutex::new(None)), - }, - ); - - let mut linker = Linker::new(&engine); - - wasmtime_wasi::p2::add_to_linker_async(&mut linker)?; - - let component = make_component( - &engine, - &[test_programs_artifacts::ASYNC_CLOSED_STREAMS_COMPONENT], - ) - .await?; - - let instance = linker.instantiate_async(&mut store, &component).await?; - - // Test watching and then dropping the read end of a stream. - let (mut tx, mut rx) = instance.stream::(&mut store)?; - instance - .run_concurrent(&mut store, async |store| { - futures::join!(tx.watch_reader(store), async { rx.close_with(store) }).1 - }) - .await?; - - // Test dropping and then watching the read end of a stream. - let (mut tx, mut rx) = instance.stream::(&mut store)?; - instance - .run_concurrent(&mut store, async |store| { - rx.close_with(store); - tx.watch_reader(store).await; - }) - .await?; - - // Test watching and then dropping the write end of a stream. - let (mut tx, mut rx) = instance.stream::(&mut store)?; - instance - .run_concurrent(&mut store, async |store| { - futures::join!(rx.watch_writer(store), async { tx.close_with(store) }).1 - }) - .await?; - - // Test dropping and then watching the write end of a stream. - let (mut tx, mut rx) = instance.stream::(&mut store)?; - instance - .run_concurrent(&mut store, async |store| { - tx.close_with(store); - rx.watch_writer(store).await; - }) - .await?; - - // Test watching and then dropping the read end of a future. - let (mut tx, mut rx) = instance.future::(&mut store, || 42)?; - instance - .run_concurrent(&mut store, async |store| { - futures::join!(tx.watch_reader(store), async { rx.close_with(store) }).1 - }) - .await?; - - // Test dropping and then watching the read end of a future. - let (mut tx, mut rx) = instance.future::(&mut store, || 42)?; - instance - .run_concurrent(&mut store, async |store| { - rx.close_with(store); - tx.watch_reader(store).await; - }) - .await?; - - // Test watching and then dropping the write end of a future. - let (mut tx, mut rx) = instance.future::(&mut store, || 42)?; - instance - .run_concurrent(&mut store, async |store| { - futures::join!(rx.watch_writer(store), async { tx.close_with(store) }).1 - }) - .await?; - - // Test dropping and then watching the write end of a future. - let (mut tx, mut rx) = instance.future::(&mut store, || 42)?; - instance - .run_concurrent(&mut store, async |store| { - tx.close_with(store); - rx.watch_writer(store).await; - }) - .await?; - - enum Event<'a> { - Write(Option>>), - Read( - Option>>, - Option, - ), - } - - // Test watching, then writing to, then dropping, then writing again to the - // read end of a stream. - let (tx, rx) = instance.stream(&mut store)?; - instance - .run_concurrent(&mut store, async move |store| -> wasmtime::Result<_> { - let mut tx = GuardedStreamWriter::new(store, tx); - let mut rx = GuardedStreamReader::new(store, rx); - let mut futures = FuturesUnordered::new(); - assert!( - pin!(tx.watch_reader()) - .poll(&mut Context::from_waker(&Waker::noop())) - .is_pending() - ); - futures.push( - async move { - tx.write_all(Some(42)).await; - let w = if tx.is_closed() { None } else { Some(tx) }; - anyhow::Ok(Event::Write(w)) - } - .boxed(), - ); - futures.push( - async move { - let b = rx.read(None).await; - let r = if rx.is_closed() { None } else { Some(rx) }; - Ok(Event::Read(r, b)) - } - .boxed(), - ); - let mut rx = None; - let mut tx = None; - while let Some(event) = futures.try_next().await? { - match event { - Event::Write(None) => unreachable!(), - Event::Write(Some(new_tx)) => tx = Some(new_tx), - Event::Read(None, _) => unreachable!(), - Event::Read(Some(new_rx), mut buffer) => { - assert_eq!(buffer.take(), Some(42)); - rx = Some(new_rx); - } - } - } - drop(rx); - - let mut tx = tx.take().unwrap(); - tx.watch_reader().await; - tx.write_all(Some(42)).await; - assert!(tx.is_closed()); - Ok(()) - }) - .await??; - - Ok(()) -} - #[tokio::test] pub async fn async_closed_streams() -> Result<()> { - test_closed_streams(false).await -} - -#[tokio::test] -pub async fn async_closed_streams_with_watch() -> Result<()> { - test_closed_streams(true).await -} - -pub async fn test_closed_streams(watch: bool) -> Result<()> { let engine = Engine::new(&config())?; let mut store = Store::new( @@ -214,153 +44,53 @@ pub async fn test_closed_streams(watch: bool) -> Result<()> { let instance = linker.instantiate_async(&mut store, &component).await?; - enum StreamEvent<'a> { - FirstWrite(Option>>), - FirstRead(Option>>, Vec), - SecondWrite(Option>>), - GuestCompleted, - } - - enum FutureEvent { - Write(bool), - Read(Option), - WriteIgnored(bool), - GuestCompleted, - } - let values = vec![42_u8, 43, 44]; let value = 42_u8; // First, test stream host->host { - let (tx, rx) = instance.stream(&mut store)?; - let values = values.clone(); + let (mut input_tx, input_rx) = mpsc::channel(1); + let (output_tx, mut output_rx) = mpsc::channel(1); + StreamReader::new(instance, &mut store, MpscProducer::new(input_rx)) + .pipe(&mut store, MpscConsumer::new(output_tx)); instance - .run_concurrent(&mut store, async move |store| -> wasmtime::Result<_> { - let mut tx = GuardedStreamWriter::new(store, tx); - let mut rx = GuardedStreamReader::new(store, rx); - - let mut futures = FuturesUnordered::new(); - futures.push({ - let values = values.clone(); - async move { - tx.write_all(VecBuffer::from(values)).await; - anyhow::Ok(StreamEvent::FirstWrite(if tx.is_closed() { - None - } else { - Some(tx) - })) - } - .boxed() - }); - futures.push( - async move { - let b = rx.read(Vec::with_capacity(3)).await; - let r = if rx.is_closed() { None } else { Some(rx) }; - Ok(StreamEvent::FirstRead(r, b)) - } - .boxed(), - ); - - let mut count = 0; - while let Some(event) = futures.try_next().await? { - count += 1; - match event { - StreamEvent::FirstWrite(Some(mut tx)) => { - if watch { - futures.push( - async move { - tx.watch_reader().await; - Ok(StreamEvent::SecondWrite(None)) - } - .boxed(), - ); - } else { - futures.push({ - let values = values.clone(); - async move { - tx.write_all(VecBuffer::from(values)).await; - Ok(StreamEvent::SecondWrite(if tx.is_closed() { - None - } else { - Some(tx) - })) - } - .boxed() - }); - } + .run_concurrent(&mut store, async |_| { + let (a, b) = future::join( + async { + for &value in &values { + input_tx.send(value).await?; } - StreamEvent::FirstWrite(None) => { - panic!("first write should have been accepted") + drop(input_tx); + anyhow::Ok(()) + }, + async { + for &value in &values { + assert_eq!(Some(value), output_rx.next().await); } - StreamEvent::FirstRead(Some(_), results) => { - assert_eq!(values, results); - } - StreamEvent::FirstRead(None, _) => unreachable!(), - StreamEvent::SecondWrite(None) => {} - StreamEvent::SecondWrite(Some(_)) => { - panic!("second write should _not_ have been accepted") - } - StreamEvent::GuestCompleted => unreachable!(), - } - } + assert!(output_rx.next().await.is_none()); + Ok(()) + }, + ) + .await; - assert_eq!(count, 3); - Ok(()) + a.and(b) }) .await??; } // Next, test futures host->host { - let (tx, rx) = instance.future(&mut store, || unreachable!())?; - let (mut tx_ignored, rx_ignored) = instance.future(&mut store, || unreachable!())?; + let (input_tx, input_rx) = oneshot::channel(); + let (output_tx, output_rx) = oneshot::channel(); + FutureReader::new(instance, &mut store, OneshotProducer::new(input_rx)) + .pipe(&mut store, OneshotConsumer::new(output_tx)); instance - .run_concurrent(&mut store, async move |store| { - let rx_ignored = GuardedFutureReader::new(store, rx_ignored); - - let mut futures = FuturesUnordered::new(); - futures.push(tx.write(store, value).map(FutureEvent::Write).boxed()); - futures.push(rx.read(store).map(FutureEvent::Read).boxed()); - if watch { - futures.push( - tx_ignored - .watch_reader(store) - .map(|()| FutureEvent::WriteIgnored(false)) - .boxed(), - ); - } else { - futures.push( - tx_ignored - .write(store, value) - .map(FutureEvent::WriteIgnored) - .boxed(), - ); - } - drop(rx_ignored); - - let mut count = 0; - while let Some(event) = futures.next().await { - count += 1; - match event { - FutureEvent::Write(delivered) => { - assert!(delivered); - } - FutureEvent::Read(Some(result)) => { - assert_eq!(value, result); - } - FutureEvent::Read(None) => panic!("read should have succeeded"), - FutureEvent::WriteIgnored(delivered) => { - assert!(!delivered); - } - FutureEvent::GuestCompleted => unreachable!(), - } - } - - assert_eq!(count, 3); + .run_concurrent(&mut store, async |_| { + _ = input_tx.send(value); + assert_eq!(value, output_rx.await?); anyhow::Ok(()) }) .await??; @@ -368,7 +98,8 @@ pub async fn test_closed_streams(watch: bool) -> Result<()> { // Next, test stream host->guest { - let (tx, rx) = instance.stream(&mut store)?; + let (mut tx, rx) = mpsc::channel(1); + let rx = StreamReader::new(instance, &mut store, MpscProducer::new(rx)); let closed_streams = closed_streams::bindings::ClosedStreams::new(&mut store, &instance)?; @@ -376,163 +107,45 @@ pub async fn test_closed_streams(watch: bool) -> Result<()> { instance .run_concurrent(&mut store, async move |accessor| { - let mut tx = GuardedStreamWriter::new(accessor, tx); - - let mut futures = FuturesUnordered::new(); - futures.push( - closed_streams - .local_local_closed() - .call_read_stream(accessor, rx, values.clone()) - .map(|v| v.map(|()| StreamEvent::GuestCompleted)) - .boxed(), - ); - futures.push({ - let values = values.clone(); - async move { - tx.write_all(VecBuffer::from(values)).await; - let w = if tx.is_closed() { None } else { Some(tx) }; - Ok(StreamEvent::FirstWrite(w)) - } - .boxed() - }); - - let mut count = 0; - while let Some(event) = futures.try_next().await? { - count += 1; - match event { - StreamEvent::FirstWrite(Some(mut tx)) => { - if watch { - futures.push( - async move { - tx.watch_reader().await; - Ok(StreamEvent::SecondWrite(None)) - } - .boxed(), - ); - } else { - futures.push({ - let values = values.clone(); - async move { - tx.write_all(VecBuffer::from(values)).await; - let w = if tx.is_closed() { None } else { Some(tx) }; - Ok(StreamEvent::SecondWrite(w)) - } - .boxed() - }); - } - } - StreamEvent::FirstWrite(None) => { - panic!("first write should have been accepted") - } - StreamEvent::FirstRead(_, _) => unreachable!(), - StreamEvent::SecondWrite(None) => {} - StreamEvent::SecondWrite(Some(_)) => { - panic!("second write should _not_ have been accepted") + let (a, b) = future::join( + async { + for &value in &values { + tx.send(value).await?; } - StreamEvent::GuestCompleted => {} - } - } - - assert_eq!(count, 3); - - anyhow::Ok(()) + drop(tx); + Ok(()) + }, + closed_streams.local_local_closed().call_read_stream( + accessor, + rx, + values.clone(), + ), + ) + .await; + + a.and(b) }) .await??; } // Next, test futures host->guest { - let (tx, rx) = instance.future(&mut store, || unreachable!())?; - let (mut tx_ignored, rx_ignored) = instance.future(&mut store, || unreachable!())?; + let (tx, rx) = oneshot::channel(); + let rx = FutureReader::new(instance, &mut store, OneshotProducer::new(rx)); + let (_, rx_ignored) = oneshot::channel(); + let rx_ignored = FutureReader::new(instance, &mut store, OneshotProducer::new(rx_ignored)); let closed_streams = closed_streams::bindings::ClosedStreams::new(&mut store, &instance)?; instance .run_concurrent(&mut store, async move |accessor| { - let mut futures = FuturesUnordered::new(); - futures.push( - closed_streams - .local_local_closed() - .call_read_future(accessor, rx, value, rx_ignored) - .map(|v| v.map(|()| FutureEvent::GuestCompleted)) - .boxed(), - ); - futures.push( - tx.write(accessor, value) - .map(FutureEvent::Write) - .map(Ok) - .boxed(), - ); - if watch { - futures.push( - tx_ignored - .watch_reader(accessor) - .map(|()| Ok(FutureEvent::WriteIgnored(false))) - .boxed(), - ); - } else { - futures.push( - tx_ignored - .write(accessor, value) - .map(FutureEvent::WriteIgnored) - .map(Ok) - .boxed(), - ); - } - - let mut count = 0; - while let Some(event) = futures.try_next().await? { - count += 1; - match event { - FutureEvent::Write(delivered) => { - assert!(delivered); - } - FutureEvent::Read(_) => unreachable!(), - FutureEvent::WriteIgnored(delivered) => { - assert!(!delivered); - } - FutureEvent::GuestCompleted => {} - } - } - - assert_eq!(count, 3); - - anyhow::Ok(()) - }) - .await??; - } - - // Next, test futures host->guest again, but this time using the default value when closing the writers. - { - let (mut tx, rx) = instance.future(&mut store, || 42)?; - let (mut tx_ignored, rx_ignored) = instance.future(&mut store, || 42)?; - - let closed_streams = closed_streams::bindings::ClosedStreams::new(&mut store, &instance)?; - - let result = instance - .run_concurrent(&mut store, async move |accessor| { + _ = tx.send(value); closed_streams .local_local_closed() - .call_read_future_post_return(accessor, rx, 42, rx_ignored) - .await?; - - tx.close_with(accessor); - tx_ignored.close_with(accessor); - - future::pending::<()>().await; - - anyhow::Ok(()) + .call_read_future(accessor, rx, value, rx_ignored) + .await }) - .await; - - // As of this writing, passing a future which never resolves to - // `Instance::run_concurrent` and expecting a `Trap::AsyncDeadlock` is - // the only way to join all tasks for the `Instance`, so that's what we - // do: - assert!(matches!( - result.unwrap_err().downcast::(), - Ok(Trap::AsyncDeadlock) - )); + .await??; } Ok(()) diff --git a/crates/misc/component-async-tests/tests/scenario/transmit.rs b/crates/misc/component-async-tests/tests/scenario/transmit.rs index 59a505832fb6..d1d9d0de3804 100644 --- a/crates/misc/component-async-tests/tests/scenario/transmit.rs +++ b/crates/misc/component-async-tests/tests/scenario/transmit.rs @@ -6,14 +6,15 @@ use super::util::{config, make_component, test_run, test_run_with_count}; use anyhow::{Result, anyhow}; use cancel::exports::local::local::cancel::Mode; use component_async_tests::transmit::bindings::exports::local::local::transmit::Control; +use component_async_tests::util::{MpscConsumer, MpscProducer, OneshotConsumer, OneshotProducer}; use component_async_tests::{Ctx, sleep, transmit}; use futures::{ - future::FutureExt, - stream::{FuturesUnordered, TryStreamExt}, + FutureExt, SinkExt, StreamExt, TryStreamExt, + channel::{mpsc, oneshot}, + stream::FuturesUnordered, }; use wasmtime::component::{ - Accessor, Component, FutureReader, GuardedFutureReader, GuardedStreamReader, - GuardedStreamWriter, HasSelf, Instance, Linker, ResourceTable, StreamReader, Val, + Accessor, Component, FutureReader, HasSelf, Instance, Linker, ResourceTable, StreamReader, Val, }; use wasmtime::{AsContextMut, Engine, Store}; use wasmtime_wasi::WasiCtxBuilder; @@ -362,59 +363,60 @@ async fn test_transmit_with(component: &str) -> Re let (test, instance) = Test::instantiate(&mut store, &component, &linker).await?; - enum Event<'a, Test: TransmitTest> { + enum Event { Result(Test::Result), - ControlWriteA(Option>>), - ControlWriteB(Option>>), - ControlWriteC(Option>>), + ControlWriteA(mpsc::Sender), + ControlWriteB(mpsc::Sender), + ControlWriteC(mpsc::Sender), ControlWriteD, WriteA, - WriteB(bool), - ReadC( - Option>>, - Option, - ), - ReadD(Option), - ReadNone(Option>>), + ReadC(mpsc::Receiver, Option), + ReadD(mpsc::Receiver, Option), + ReadNone(Option), } - let (control_tx, control_rx) = instance.stream(&mut store)?; - let (caller_stream_tx, caller_stream_rx) = instance.stream(&mut store)?; - let (caller_future1_tx, caller_future1_rx) = instance.future(&mut store, || unreachable!())?; - let (_caller_future2_tx, caller_future2_rx) = instance.future(&mut store, || unreachable!())?; - + let (mut control_tx, control_rx) = mpsc::channel(1); + let control_rx = StreamReader::new(instance, &mut store, MpscProducer::new(control_rx)); + let (mut caller_stream_tx, caller_stream_rx) = mpsc::channel(1); + let caller_stream_rx = + StreamReader::new(instance, &mut store, MpscProducer::new(caller_stream_rx)); + let (caller_future1_tx, caller_future1_rx) = oneshot::channel(); + let caller_future1_rx = FutureReader::new( + instance, + &mut store, + OneshotProducer::new(caller_future1_rx), + ); + let (_, caller_future2_rx) = oneshot::channel(); + let caller_future2_rx = FutureReader::new( + instance, + &mut store, + OneshotProducer::new(caller_future2_rx), + ); + let (callee_future1_tx, callee_future1_rx) = oneshot::channel(); + let (callee_stream_tx, callee_stream_rx) = mpsc::channel(1); instance - .run_concurrent(&mut store, async move |accessor| { - let mut control_tx = GuardedStreamWriter::new(accessor, control_tx); - let control_rx = GuardedStreamReader::new(accessor, control_rx); - let mut caller_stream_tx = GuardedStreamWriter::new(accessor, caller_stream_tx); - - let mut futures = FuturesUnordered::< - Pin>> + Send>>, - >::new(); + .run_concurrent(&mut store, async |accessor| { let mut caller_future1_tx = Some(caller_future1_tx); - let mut callee_stream_rx = None; - let mut callee_future1_rx = None; + let mut callee_future1_tx = Some(callee_future1_tx); + let mut callee_future1_rx = Some(callee_future1_rx); + let mut callee_stream_tx = Some(callee_stream_tx); + let mut callee_stream_rx = Some(callee_stream_rx); let mut complete = false; + let mut futures = FuturesUnordered::< + Pin>> + Send>>, + >::new(); futures.push( async move { - control_tx - .write_all(Some(Control::ReadStream("a".into()))) - .await; - let w = if control_tx.is_closed() { - None - } else { - Some(control_tx) - }; - Ok(Event::ControlWriteA(w)) + control_tx.send(Control::ReadStream("a".into())).await?; + Ok(Event::ControlWriteA(control_tx)) } .boxed(), ); futures.push( async move { - caller_stream_tx.write_all(Some(String::from("a"))).await; + caller_stream_tx.send(String::from("a")).await?; Ok(Event::WriteA) } .boxed(), @@ -425,7 +427,7 @@ async fn test_transmit_with(component: &str) -> Re accessor, &test, Test::into_params( - control_rx.into(), + control_rx, caller_stream_rx, caller_future1_rx, caller_future2_rx, @@ -438,92 +440,76 @@ async fn test_transmit_with(component: &str) -> Re while let Some(event) = futures.try_next().await? { match event { Event::Result(result) => { - let (stream_rx, future_rx, _) = accessor - .with(|mut store| Test::from_result(&mut store, instance, result))?; - callee_stream_rx = Some(GuardedStreamReader::new(accessor, stream_rx)); - callee_future1_rx = Some(GuardedFutureReader::new(accessor, future_rx)); + accessor.with(|mut store| { + let (callee_stream_rx, callee_future1_rx, _) = + Test::from_result(&mut store, instance, result)?; + callee_stream_rx.pipe( + &mut store, + MpscConsumer::new(callee_stream_tx.take().unwrap()), + ); + callee_future1_rx.pipe( + &mut store, + OneshotConsumer::new(callee_future1_tx.take().unwrap()), + ); + anyhow::Ok(()) + })?; } - Event::ControlWriteA(tx) => { + Event::ControlWriteA(mut control_tx) => { futures.push( async move { - let mut tx = tx.unwrap(); - tx.write_all(Some(Control::ReadFuture("b".into()))).await; - let w = if tx.is_closed() { None } else { Some(tx) }; - Ok(Event::ControlWriteB(w)) + control_tx.send(Control::ReadFuture("b".into())).await?; + Ok(Event::ControlWriteB(control_tx)) } .boxed(), ); } Event::WriteA => { - futures.push( - caller_future1_tx - .take() - .unwrap() - .write(accessor, "b".into()) - .map(Event::WriteB) - .map(Ok) - .boxed(), - ); - } - Event::ControlWriteB(tx) => { + _ = caller_future1_tx.take().unwrap().send("b".into()); + let mut callee_stream_rx = callee_stream_rx.take().unwrap(); futures.push( async move { - let mut tx = tx.unwrap(); - tx.write_all(Some(Control::WriteStream("c".into()))).await; - let w = if tx.is_closed() { None } else { Some(tx) }; - Ok(Event::ControlWriteC(w)) + let value = callee_stream_rx.next().await; + Ok(Event::ReadC(callee_stream_rx, value)) } .boxed(), ); } - Event::WriteB(delivered) => { - assert!(delivered); - let mut rx = callee_stream_rx.take().unwrap(); + Event::ControlWriteB(mut control_tx) => { futures.push( async move { - let b = rx.read(None).await; - let r = if rx.is_closed() { None } else { Some(rx) }; - Ok(Event::ReadC(r, b)) + control_tx.send(Control::WriteStream("c".into())).await?; + Ok(Event::ControlWriteC(control_tx)) } .boxed(), ); } - Event::ControlWriteC(tx) => { + Event::ControlWriteC(mut control_tx) => { futures.push( async move { - let mut tx = tx.unwrap(); - tx.write_all(Some(Control::WriteFuture("d".into()))).await; + control_tx.send(Control::WriteFuture("d".into())).await?; Ok(Event::ControlWriteD) } .boxed(), ); } - Event::ReadC(None, _) => unreachable!(), - Event::ReadC(Some(rx), mut value) => { + Event::ReadC(callee_stream_rx, mut value) => { assert_eq!(value.take().as_deref(), Some("c")); futures.push( callee_future1_rx .take() .unwrap() - .read() - .map(Event::ReadD) + .map(|v| Event::ReadD(callee_stream_rx, v.ok())) .map(Ok) .boxed(), ); - callee_stream_rx = Some(rx); } Event::ControlWriteD => {} - Event::ReadD(None) => unreachable!(), - Event::ReadD(Some(value)) => { + Event::ReadD(_, None) => unreachable!(), + Event::ReadD(mut callee_stream_rx, Some(value)) => { assert_eq!(&value, "d"); - let mut rx = callee_stream_rx.take().unwrap(); futures.push( - async move { - rx.read(None).await; - let r = if rx.is_closed() { None } else { Some(rx) }; - Ok(Event::ReadNone(r)) - } - .boxed(), + async move { Ok(Event::ReadNone(callee_stream_rx.next().await)) } + .boxed(), ); } Event::ReadNone(Some(_)) => unreachable!(), @@ -537,5 +523,6 @@ async fn test_transmit_with(component: &str) -> Re anyhow::Ok(()) }) - .await? + .await??; + Ok(()) } diff --git a/crates/test-programs/src/bin/async_closed_streams.rs b/crates/test-programs/src/bin/async_closed_streams.rs index 459b542fb290..c794279043ce 100644 --- a/crates/test-programs/src/bin/async_closed_streams.rs +++ b/crates/test-programs/src/bin/async_closed_streams.rs @@ -11,6 +11,7 @@ mod bindings { use { bindings::exports::local::local::closed::Guest, + std::mem, wit_bindgen_rt::async_support::{self, FutureReader, StreamReader, StreamResult}, }; @@ -18,9 +19,15 @@ struct Component; impl Guest for Component { async fn read_stream(mut rx: StreamReader, expected: Vec) { - let (result, buf) = rx.read(Vec::with_capacity(expected.len())).await; - assert_eq!(result, StreamResult::Complete(expected.len())); - assert_eq!(buf, expected); + let mut buffer = Vec::with_capacity(expected.len()); + loop { + let (result, buf) = rx.read(mem::replace(&mut buffer, Vec::new())).await; + buffer = buf; + if !matches!(result, StreamResult::Complete(_)) { + break; + } + } + assert_eq!(buffer, expected); } async fn read_future(rx: FutureReader, expected: u8, _rx_ignored: FutureReader) { diff --git a/crates/test-programs/src/bin/async_poll_stackless.rs b/crates/test-programs/src/bin/async_poll_stackless.rs index 7ad3908a3e71..43a860772ee3 100644 --- a/crates/test-programs/src/bin/async_poll_stackless.rs +++ b/crates/test-programs/src/bin/async_poll_stackless.rs @@ -121,7 +121,7 @@ unsafe extern "C" fn callback_run(event0: u32, event1: u32, event2: u32) -> u32 assert_eq!(event0, EVENT_NONE); let set = *set; - assert!(async_when_ready() == STATUS_RETURNED); + assert_eq!(async_when_ready(), STATUS_RETURNED); *state = State::S5 { set }; diff --git a/crates/test-programs/src/bin/async_poll_synchronous.rs b/crates/test-programs/src/bin/async_poll_synchronous.rs index 962b5e7ba70c..f5450ae2a6ce 100644 --- a/crates/test-programs/src/bin/async_poll_synchronous.rs +++ b/crates/test-programs/src/bin/async_poll_synchronous.rs @@ -64,7 +64,7 @@ impl Guest for Component { assert_eq!(waitable_set_poll(set), (EVENT_NONE, 0, 0)); - assert!(async_when_ready() == STATUS_RETURNED); + assert_eq!(async_when_ready(), STATUS_RETURNED); assert_eq!(waitable_set_poll(set), (EVENT_NONE, 0, 0)); diff --git a/crates/wasmtime/src/runtime/component/concurrent.rs b/crates/wasmtime/src/runtime/component/concurrent.rs index b34ba9ca10bf..aaed977f8ed1 100644 --- a/crates/wasmtime/src/runtime/component/concurrent.rs +++ b/crates/wasmtime/src/runtime/component/concurrent.rs @@ -89,9 +89,9 @@ use wasmtime_environ::component::{ pub use abort::JoinHandle; pub use futures_and_streams::{ - ErrorContext, FutureReader, FutureWriter, GuardedFutureReader, GuardedFutureWriter, - GuardedStreamReader, GuardedStreamWriter, ReadBuffer, StreamReader, StreamWriter, VecBuffer, - WriteBuffer, + Destination, ErrorContext, FutureConsumer, FutureProducer, FutureReader, GuardedFutureReader, + GuardedStreamReader, GuestDestination, GuestSource, ReadBuffer, Source, StreamConsumer, + StreamProducer, StreamReader, StreamState, VecBuffer, WriteBuffer, }; pub(crate) use futures_and_streams::{ ResourcePair, lower_error_context_to_index, lower_future_to_index, lower_stream_to_index, @@ -1146,12 +1146,9 @@ impl Instance { // Create an "abortable future" here where internally the future will // hook calls to poll and possibly spawn more background tasks on each // iteration. - let (handle, future) = - JoinHandle::run(async move { HostTaskOutput::Result(task.run(&accessor).await) }); + let (handle, future) = JoinHandle::run(async move { task.run(&accessor).await }); self.concurrent_state_mut(store.0) - .push_future(Box::pin(async move { - future.await.unwrap_or(HostTaskOutput::Result(Ok(()))) - })); + .push_future(Box::pin(async move { future.await.unwrap_or(Ok(())) })); handle } @@ -1195,21 +1192,8 @@ impl Instance { let next = match self.set_tls(store.0, || next.as_mut().poll(cx)) { Poll::Ready(Some(output)) => { match output { - HostTaskOutput::Result(Err(e)) => return Poll::Ready(Err(e)), - HostTaskOutput::Result(Ok(())) => {} - HostTaskOutput::Function(fun) => { - // Defer calling this function to a worker fiber - // in case it involves calling a guest realloc - // function as part of a lowering operation. - // - // TODO: This isn't necessary for _all_ - // `HostOutput::Function`s, so we could optimize - // by adding another variant to `HostOutput` to - // distinguish which ones need it and which - // don't. - self.concurrent_state_mut(store.0) - .push_high_priority(WorkItem::WorkerFunction(Mutex::new(fun))) - } + Err(e) => return Poll::Ready(Err(e)), + Ok(()) => {} } Poll::Ready(true) } @@ -2381,32 +2365,8 @@ impl Instance { let task = state.push(HostTask::new(caller_instance, Some(join_handle)))?; log::trace!("new host task child of {caller:?}: {task:?}"); - let token = StoreToken::new(store.as_context_mut()); - - // Map the output of the future to a `HostTaskOutput` responsible for - // lowering the result into the guest's stack and memory, as well as - // notifying any waiters that the task returned. - let mut future = Box::pin(async move { - let result = match future.await { - Some(result) => result, - // Task was cancelled; nothing left to do. - None => return HostTaskOutput::Result(Ok(())), - }; - HostTaskOutput::Function(Box::new(move |store, instance| { - let mut store = token.as_context_mut(store); - lower(store.as_context_mut(), instance, result?)?; - let state = instance.concurrent_state_mut(store.0); - state.get_mut(task)?.join_handle.take(); - Waitable::Host(task).set_event( - state, - Some(Event::Subtask { - status: Status::Returned, - }), - )?; - Ok(()) - })) - }); + let mut future = Box::pin(future); // Finally, poll the future. We can use a dummy `Waker` here because // we'll add the future to `ConcurrentState::futures` and poll it @@ -2419,10 +2379,11 @@ impl Instance { }); Ok(match poll { - Poll::Ready(output) => { + Poll::Ready(None) => unreachable!(), + Poll::Ready(Some(result)) => { // It finished immediately; lower the result and delete the // task. - output.consume(store.0, self)?; + lower(store.as_context_mut(), self, result?)?; log::trace!("delete host task {task:?} (already ready)"); self.concurrent_state_mut(store.0).delete(task)?; None @@ -2431,6 +2392,39 @@ impl Instance { // It hasn't finished yet; add the future to // `ConcurrentState::futures` so it will be polled by the event // loop and allocate a waitable handle to return to the guest. + + // Wrap the future in a closure responsible for lowering the result into + // the guest's stack and memory, as well as notifying any waiters that + // the task returned. + let future = Box::pin(async move { + let result = match future.await { + Some(result) => result?, + // Task was cancelled; nothing left to do. + None => return Ok(()), + }; + tls::get(move |store| { + // Here we schedule a task to run on a worker fiber to do + // the lowering since it may involve a call to the guest's + // realloc function. This is necessary because calling the + // guest while there are host embedder frames on the stack + // is unsound. + self.concurrent_state_mut(store).push_high_priority( + WorkItem::WorkerFunction(Mutex::new(Box::new(move |store, _| { + lower(token.as_context_mut(store), self, result)?; + let state = self.concurrent_state_mut(store); + state.get_mut(task)?.join_handle.take(); + Waitable::Host(task).set_event( + state, + Some(Event::Subtask { + status: Status::Returned, + }), + ) + }))), + ); + Ok(()) + }) + }); + self.concurrent_state_mut(store.0).push_future(future); let handle = self.id().get_mut(store.0).guest_tables().0[caller_instance] .subtask_insert_host(task.rep())?; @@ -2488,13 +2482,14 @@ impl Instance { log::trace!("new host task child of {caller:?}: {task:?}"); - // Map the output of the future to a `HostTaskOutput` which will take - // care of stashing the result in `GuestTask::result` and resuming this - // fiber when the host task completes. - let mut future = Box::pin(future.map(move |result| { - HostTaskOutput::Function(Box::new(move |store, instance| { - let state = instance.concurrent_state_mut(store); - state.get_mut(caller)?.result = Some(Box::new(result?) as _); + // Wrap the future in a closure which will take care of stashing the + // result in `GuestTask::result` and resuming this fiber when the host + // task completes. + let mut future = Box::pin(async move { + let result = future.await?; + tls::get(move |store| { + let state = self.concurrent_state_mut(store); + state.get_mut(caller)?.result = Some(Box::new(result) as _); Waitable::Host(task).set_event( state, @@ -2504,8 +2499,8 @@ impl Instance { )?; Ok(()) - })) - })) as HostTaskFuture; + }) + }) as HostTaskFuture; // Finally, poll the future. We can use a dummy `Waker` here because // we'll add the future to `ConcurrentState::futures` and poll it @@ -2518,17 +2513,16 @@ impl Instance { }); match poll { - Poll::Ready(output) => { - // It completed immediately; run the `HostTaskOutput` function - // to stash the result and delete the task. - output.consume(store, self)?; + Poll::Ready(result) => { + // It completed immediately; check the result and delete the task. + result?; log::trace!("delete host task {task:?} (already ready)"); self.concurrent_state_mut(store).delete(task)?; } Poll::Pending => { // It did not complete immediately; add it to // `ConcurrentState::futures` so it will be polled via the event - // loop, then use `GuestTask::sync_call_set` to wait for the + // loop; then use `GuestTask::sync_call_set` to wait for the // task to complete, suspending the current fiber until it does // so. let state = self.concurrent_state_mut(store); @@ -3431,27 +3425,7 @@ impl VMComponentAsyncStore for StoreInner { } } -/// Represents the output of a host task or background task. -pub(crate) enum HostTaskOutput { - /// A plain result - Result(Result<()>), - /// A function to be run after the future completes (e.g. post-processing - /// which requires access to the store and instance). - Function(Box Result<()> + Send>), -} - -impl HostTaskOutput { - /// Retrieve the result of the host or background task, running the - /// post-processing function if present. - fn consume(self, store: &mut dyn VMStore, instance: Instance) -> Result<()> { - match self { - Self::Function(fun) => fun(store, instance), - Self::Result(result) => result, - } - } -} - -type HostTaskFuture = Pin + Send + 'static>>; +type HostTaskFuture = Pin> + Send + 'static>>; /// Represents the state of a pending host task. struct HostTask { diff --git a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs index 327431a24fa3..b679ba3c8a8b 100644 --- a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs +++ b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs @@ -1,6 +1,6 @@ use super::table::{TableDebug, TableId}; use super::{Event, GlobalErrorContextRefCount, Waitable, WaitableCommon}; -use crate::component::concurrent::{ConcurrentState, WorkItem}; +use crate::component::concurrent::{Accessor, ConcurrentState, JoinHandle, WorkItem, tls}; use crate::component::func::{self, LiftContext, LowerContext, Options}; use crate::component::matching::InstanceType; use crate::component::values::{ErrorContextAny, FutureAny, StreamAny}; @@ -9,20 +9,20 @@ use crate::store::{StoreOpaque, StoreToken}; use crate::vm::VMStore; use crate::vm::component::{ComponentInstance, HandleTable, TransmitLocalState}; use crate::{AsContextMut, StoreContextMut, ValRaw}; -use anyhow::{Context, Result, anyhow, bail}; +use anyhow::{Context as _, Result, anyhow, bail}; use buffers::Extender; use buffers::UntypedWriteBuffer; +use futures::FutureExt; use futures::channel::oneshot; use std::boxed::Box; use std::fmt; -use std::future; use std::iter; use std::marker::PhantomData; use std::mem::{self, MaybeUninit}; use std::pin::Pin; use std::string::{String, ToString}; use std::sync::{Arc, Mutex}; -use std::task::{Poll, Waker}; +use std::task::{Context, Poll, Waker}; use std::vec::Vec; use wasmtime_environ::component::{ CanonicalAbiInfo, ComponentTypes, InterfaceType, OptionsIndex, @@ -108,22 +108,6 @@ impl TransmitIndex { } } -/// Action to take after writing -enum PostWrite { - /// Continue performing writes - Continue, - /// Drop the channel post-write - Drop, -} - -/// Represents the result of a host-initiated stream or future read or write. -struct HostResult { - /// The buffer provided when reading or writing. - buffer: B, - /// Whether the other end of the stream or future has been dropped. - dropped: bool, -} - /// Retrieve the payload type of the specified stream or future, or `None` if it /// has no payload type. fn payload(ty: TransmitIndex, types: &Arc) -> Option { @@ -146,198 +130,71 @@ fn get_mut_by_index_from( } } -/// Complete a write initiated by a host-owned future or stream by matching it -/// with the specified `Reader`. -fn accept_reader, U: 'static>( +fn lower, U: 'static>( mut store: StoreContextMut, instance: Instance, - reader: Reader, - mut buffer: B, - kind: TransmitKind, -) -> Result<(HostResult, ReturnCode)> { - Ok(match reader { - Reader::Guest { - options, - ty, - address, - count, - } => { - let types = instance.id().get(store.0).component().types().clone(); - let count = buffer.remaining().len().min(count); - - let lower = &mut if T::MAY_REQUIRE_REALLOC { - LowerContext::new - } else { - LowerContext::new_without_realloc - }(store.as_context_mut(), options, &types, instance); - - if address % usize::try_from(T::ALIGN32)? != 0 { - bail!("read pointer not aligned"); - } - lower - .as_slice_mut() - .get_mut(address..) - .and_then(|b| b.get_mut(..T::SIZE32 * count)) - .ok_or_else(|| anyhow::anyhow!("read pointer out of bounds of memory"))?; - - if let Some(ty) = payload(ty, &types) { - T::linear_store_list_to_memory(lower, ty, address, &buffer.remaining()[..count])?; - } - - buffer.skip(count); - ( - HostResult { - buffer, - dropped: false, - }, - ReturnCode::completed(kind, count.try_into().unwrap()), - ) - } - Reader::Host { accept } => { - let count = buffer.remaining().len(); - let mut untyped = UntypedWriteBuffer::new(&mut buffer); - let count = accept(&mut untyped, count); - ( - HostResult { - buffer, - dropped: false, - }, - ReturnCode::completed(kind, count.try_into().unwrap()), - ) + options: &Options, + ty: TransmitIndex, + address: usize, + count: usize, + buffer: &mut B, +) -> Result<()> { + let types = instance.id().get(store.0).component().types().clone(); + let count = buffer.remaining().len().min(count); + + let lower = &mut if T::MAY_REQUIRE_REALLOC { + LowerContext::new + } else { + LowerContext::new_without_realloc + }(store.as_context_mut(), options, &types, instance); + + if address % usize::try_from(T::ALIGN32)? != 0 { + bail!("read pointer not aligned"); + } + lower + .as_slice_mut() + .get_mut(address..) + .and_then(|b| b.get_mut(..T::SIZE32 * count)) + .ok_or_else(|| anyhow::anyhow!("read pointer out of bounds of memory"))?; + + if let Some(ty) = payload(ty, &types) { + T::linear_store_list_to_memory(lower, ty, address, &buffer.remaining()[..count])?; + } + + buffer.skip(count); + + Ok(()) +} + +fn lift, U>( + lift: &mut LiftContext<'_>, + ty: Option, + buffer: &mut B, + address: usize, + count: usize, +) -> Result<()> { + let count = count.min(buffer.remaining_capacity()); + if T::IS_RUST_UNIT_TYPE { + // SAFETY: `T::IS_RUST_UNIT_TYPE` is only true for `()`, a + // zero-sized type, so `MaybeUninit::uninit().assume_init()` + // is a valid way to populate the zero-sized buffer. + buffer.extend( + iter::repeat_with(|| unsafe { MaybeUninit::uninit().assume_init() }).take(count), + ) + } else { + let ty = ty.unwrap(); + if address % usize::try_from(T::ALIGN32)? != 0 { + bail!("write pointer not aligned"); } - Reader::End => ( - HostResult { - buffer, - dropped: true, - }, - ReturnCode::Dropped(0), - ), - }) -} - -/// Complete a read initiated by a host-owned future or stream by matching it with the -/// specified `Writer`. -fn accept_writer, U>( - writer: Writer, - mut buffer: B, - kind: TransmitKind, -) -> Result<(HostResult, ReturnCode)> { - Ok(match writer { - Writer::Guest { - lift, - ty, - address, - count, - } => { - let count = count.min(buffer.remaining_capacity()); - if T::IS_RUST_UNIT_TYPE { - // SAFETY: `T::IS_RUST_UNIT_TYPE` is only true for `()`, a - // zero-sized type, so `MaybeUninit::uninit().assume_init()` - // is a valid way to populate the zero-sized buffer. - buffer.extend( - iter::repeat_with(|| unsafe { MaybeUninit::uninit().assume_init() }) - .take(count), - ) - } else { - let ty = ty.unwrap(); - if address % usize::try_from(T::ALIGN32)? != 0 { - bail!("write pointer not aligned"); - } - lift.memory() - .get(address..) - .and_then(|b| b.get(..T::SIZE32 * count)) - .ok_or_else(|| anyhow::anyhow!("write pointer out of bounds of memory"))?; + lift.memory() + .get(address..) + .and_then(|b| b.get(..T::SIZE32 * count)) + .ok_or_else(|| anyhow::anyhow!("write pointer out of bounds of memory"))?; - let list = &WasmList::new(address, count, lift, ty)?; - T::linear_lift_into_from_memory(lift, list, &mut Extender(&mut buffer))? - } - ( - HostResult { - buffer, - dropped: false, - }, - ReturnCode::completed(kind, count.try_into().unwrap()), - ) - } - Writer::Host { - buffer: input, - count, - } => { - let count = count.min(buffer.remaining_capacity()); - buffer.move_from(input.get_mut::(), count); - ( - HostResult { - buffer, - dropped: false, - }, - ReturnCode::completed(kind, count.try_into().unwrap()), - ) - } - Writer::End => ( - HostResult { - buffer, - dropped: true, - }, - ReturnCode::Dropped(0), - ), - }) -} - -/// Return a `Future` which will resolve once the reader end corresponding to -/// the specified writer end of a future or stream is dropped. -async fn watch_reader(accessor: impl AsAccessor, instance: Instance, id: TableId) { - future::poll_fn(|cx| { - accessor - .as_accessor() - .with(|mut access| { - let concurrent_state = instance.concurrent_state_mut(access.as_context_mut().0); - let state_id = concurrent_state.get(id)?.state; - let state = concurrent_state.get_mut(state_id)?; - anyhow::Ok(if matches!(&state.read, ReadState::Dropped) { - Poll::Ready(()) - } else { - state.reader_watcher = Some(cx.waker().clone()); - Poll::Pending - }) - }) - .unwrap_or(Poll::Ready(())) - }) - .await -} - -/// Return a `Future` which will resolve once the writer end corresponding to -/// the specified reader end of a future or stream is dropped. -async fn watch_writer(accessor: impl AsAccessor, instance: Instance, id: TableId) { - future::poll_fn(|cx| { - accessor - .as_accessor() - .with(|mut access| { - let concurrent_state = instance.concurrent_state_mut(access.as_context_mut().0); - let state_id = concurrent_state.get(id)?.state; - let state = concurrent_state.get_mut(state_id)?; - anyhow::Ok( - if matches!( - &state.write, - WriteState::Dropped - | WriteState::GuestReady { - post_write: PostWrite::Drop, - .. - } - | WriteState::HostReady { - post_write: PostWrite::Drop, - .. - } - ) { - Poll::Ready(()) - } else { - state.writer_watcher = Some(cx.waker().clone()); - Poll::Pending - }, - ) - }) - .unwrap_or(Poll::Ready(())) - }) - .await + let list = &WasmList::new(address, count, lift, ty)?; + T::linear_lift_into_from_memory(lift, list, &mut Extender(buffer))? + } + Ok(()) } /// Represents the state associated with an error context @@ -355,200 +212,473 @@ pub(super) struct FlatAbi { pub(super) align: u32, } -/// Represents the writable end of a Component Model `future`. -/// -/// Note that `FutureWriter` instances must be disposed of using either `write` -/// or `close`; otherwise the in-store representation will leak and the reader -/// end will hang indefinitely. Consider using [`GuardedFutureWriter`] to -/// ensure that disposal happens automatically. -pub struct FutureWriter { - default: fn() -> T, - id: TableId, +/// Represents the buffer for a host- or guest-initiated stream read. +pub struct Destination { instance: Instance, + kind: TransmitKind, + id: TableId, + _phantom: PhantomData T>, } -impl FutureWriter { - fn new(default: fn() -> T, id: TableId, instance: Instance) -> Self { - Self { - default, - id, - instance, +impl Destination { + /// Deliver zero or more items to the reader. + pub async fn write(&mut self, accessor: A, mut buffer: B) -> Result + where + T: func::Lower + 'static, + B: WriteBuffer, + { + let accessor = accessor.as_accessor(); + let (read, guest_offset) = accessor.with(|mut access| { + let transmit = self + .instance + .concurrent_state_mut(access.as_context_mut().0) + .get_mut(self.id)?; + + let guest_offset = if let &WriteState::HostReady { guest_offset, .. } = &transmit.write + { + Some(guest_offset) + } else { + None + }; + + anyhow::Ok(( + mem::replace(&mut transmit.read, ReadState::Open), + guest_offset, + )) + })?; + + match read { + ReadState::GuestReady { + ty, + flat_abi, + options, + address, + count, + handle, + } => { + let guest_offset = guest_offset.unwrap(); + + if let TransmitKind::Future = self.kind { + accessor.with(|mut access| { + self.instance + .concurrent_state_mut(access.as_context_mut().0) + .get_mut(self.id)? + .done = true; + anyhow::Ok(()) + })?; + } + + let old_remaining = buffer.remaining().len(); + let instance = self.instance; + let accept = move |mut store: StoreContextMut| { + lower::( + store.as_context_mut(), + instance, + &options, + ty, + address + (T::SIZE32 * guest_offset), + count - guest_offset, + &mut buffer, + )?; + anyhow::Ok(buffer) + }; + + let buffer = if T::MAY_REQUIRE_REALLOC { + // For payloads which may require a realloc call, use a + // oneshot::channel and background task. This is necessary + // because calling the guest while there are host embedder + // frames on the stack is unsound. + let (tx, rx) = oneshot::channel(); + accessor.with(move |mut access| { + let mut store = access.as_context_mut(); + let token = StoreToken::new(store.as_context_mut()); + instance.concurrent_state_mut(store.0).push_high_priority( + WorkItem::WorkerFunction(Mutex::new(Box::new(move |store, _| { + _ = tx.send(accept(token.as_context_mut(store))?); + Ok(()) + }))), + ) + }); + rx.await? + } else { + // Optimize flat payloads (i.e. those which do not require + // calling the guest's realloc function) by lowering + // directly instead of using a oneshot::channel and + // background task. + accessor.with(|mut access| accept(access.as_context_mut()))? + }; + + accessor.with(|mut access| { + let count = old_remaining - buffer.remaining().len(); + + let transmit = self + .instance + .concurrent_state_mut(access.as_context_mut().0) + .get_mut(self.id)?; + + let WriteState::HostReady { guest_offset, .. } = &mut transmit.write else { + unreachable!(); + }; + + *guest_offset += count; + + transmit.read = ReadState::GuestReady { + ty, + flat_abi, + options, + address, + count, + handle, + }; + + anyhow::Ok(()) + })?; + + Ok(buffer) + } + + ReadState::HostToHost { accept } => { + let state = accept(&mut UntypedWriteBuffer::new(&mut buffer)).await?; + accessor.with(|mut access| { + self.instance + .concurrent_state_mut(access.as_context_mut().0) + .get_mut(self.id)? + .read = match state { + StreamState::Closed => ReadState::Dropped, + StreamState::Open => ReadState::HostToHost { accept }, + }; + + anyhow::Ok(()) + })?; + Ok(buffer) + } + + _ => unreachable!(), } } +} - /// Write the specified value to this `future`. - /// - /// The returned `Future` will yield `true` if the read end accepted the - /// value; otherwise it will return `false`, meaning the read end was dropped - /// before the value could be delivered. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn write(self, accessor: impl AsAccessor, value: T) -> bool - where - T: func::Lower + Send + Sync + 'static, - { - self.guard(accessor).write(value).await +impl Destination { + /// Return a `GuestDestination` view of `self` if the guest is reading. + pub fn as_guest_destination<'a, D>( + &'a mut self, + store: StoreContextMut<'a, D>, + ) -> Option> { + if let ReadState::GuestReady { .. } = self + .instance + .concurrent_state_mut(store.0) + .get_mut(self.id) + .unwrap() + .read + { + Some(GuestDestination { + instance: self.instance, + id: self.id, + store, + }) + } else { + None + } } +} - /// Mut-ref signature instead of by-value signature for - /// `GuardedFutureWriter` to more easily call. - async fn write_(&mut self, accessor: impl AsAccessor, value: T) -> bool - where - T: func::Lower + Send + Sync + 'static, - { - let accessor = accessor.as_accessor(); +/// Represents a guest read from a `stream`, providing direct access to the +/// guest's buffer. +pub struct GuestDestination<'a, D: 'static> { + instance: Instance, + id: TableId, + store: StoreContextMut<'a, D>, +} - let result = self +impl GuestDestination<'_, D> { + /// Provide direct access to the guest's buffer. + pub fn remaining(&mut self) -> &mut [u8] { + let transmit = self .instance - .host_write_async(accessor, self.id, Some(value), TransmitKind::Future) - .await; + .concurrent_state_mut(self.store.as_context_mut().0) + .get_mut(self.id) + .unwrap(); - match result { - Ok(HostResult { dropped, .. }) => !dropped, - Err(_) => todo!("guarantee buffer recovery if `host_write` fails"), - } - } + let &ReadState::GuestReady { + address, + count, + options, + .. + } = &transmit.read + else { + unreachable!() + }; - /// Wait for the read end of this `future` is dropped. - /// - /// The [`Accessor`] provided can be acquired from [`Instance::run_concurrent`] or - /// from within a host function for example. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn watch_reader(&mut self, accessor: impl AsAccessor) { - watch_reader(accessor, self.instance, self.id).await + let &WriteState::HostReady { guest_offset, .. } = &transmit.write else { + unreachable!() + }; + + options + .memory_mut(self.store.0) + .get_mut((address + guest_offset)..) + .and_then(|b| b.get_mut(..(count - guest_offset))) + .unwrap() } - /// Close this `FutureWriter`, writing the default value. - /// - /// # Panics + /// Mark the specified number of bytes as written to the guest's buffer. /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. Usage of this future after calling `close` will also cause - /// a panic. - pub fn close(&mut self, mut store: impl AsContextMut) - where - T: func::Lower + Send + Sync + 'static, - { - let id = mem::replace(&mut self.id, TableId::new(0)); - let default = self.default; - self.instance - .host_drop_writer(store.as_context_mut(), id, Some(&move || Ok(default()))) + /// This will panic if the count is larger than the size of the + /// buffer returned by `Self::remaining`. + pub fn mark_written(&mut self, count: usize) { + let transmit = self + .instance + .concurrent_state_mut(self.store.as_context_mut().0) + .get_mut(self.id) .unwrap(); - } - /// Convenience method around [`Self::close`]. - pub fn close_with(&mut self, accessor: impl AsAccessor) - where - T: func::Lower + Send + Sync + 'static, - { - accessor.as_accessor().with(|access| self.close(access)) + let ReadState::GuestReady { + count: read_count, .. + } = &transmit.read + else { + unreachable!() + }; + + let WriteState::HostReady { guest_offset, .. } = &mut transmit.write else { + unreachable!() + }; + + if *guest_offset + count > *read_count { + panic!("write count ({count}) must be less than or equal to read count ({read_count})") + } else { + *guest_offset += count; + } } +} - /// Returns a [`GuardedFutureWriter`] which will auto-close this future on - /// drop and clean it up from the store. +/// Represents the state of a `Stream{Producer,Consumer}`. +#[derive(Copy, Clone, Debug)] +pub enum StreamState { + /// The producer or consumer may be able to produce or consume more items, + /// respectively. + Open, + /// The producer or consumer is _not_ able to produce or consume more items, + /// respectively. + Closed, +} + +/// Represents the host-owned write end of a stream. +pub trait StreamProducer: Send + 'static { + /// Handle a host- or guest-initiated read by delivering zero or more items + /// to the specified destination. /// - /// Note that the `accessor` provided must own this future and is - /// additionally transferred to the `GuardedFutureWriter` return value. - pub fn guard(self, accessor: A) -> GuardedFutureWriter + /// The returned future will resolve to `Ok(StreamState::Closed)` if and + /// when this producer cannot produce any more items. + fn produce( + &mut self, + accessor: &Accessor, + destination: &mut Destination, + ) -> impl Future> + Send; + + /// Handle a guest-initiated zero-length read by returning a future which + /// resolves once this producer is either ready to produce more items or is + /// closed. + fn when_ready( + &mut self, + accessor: &Accessor, + ) -> impl Future> + Send; +} + +/// Represents the buffer for a host- or guest-initiated stream write. +pub struct Source<'a, T> { + instance: Instance, + id: TableId, + host_buffer: Option<&'a mut dyn WriteBuffer>, +} + +impl Source<'_, T> { + /// Accept zero or more items from the writer. + pub fn read(&mut self, mut store: S, buffer: &mut B) -> Result<()> where - T: func::Lower + Send + Sync + 'static, - A: AsAccessor, + T: func::Lift + 'static, + B: ReadBuffer, { - GuardedFutureWriter::new(accessor, self) - } -} + if let Some(input) = &mut self.host_buffer { + let count = input.remaining().len().min(buffer.remaining_capacity()); + buffer.move_from(*input, count); + } else { + let store = store.as_context_mut(); + let transmit = self + .instance + .concurrent_state_mut(store.0) + .get_mut(self.id)?; -/// A [`FutureWriter`] paired with an [`Accessor`]. -/// -/// This is an RAII wrapper around [`FutureWriter`] that ensures it is closed -/// when dropped. This can be created through [`GuardedFutureWriter::new`] or -/// [`FutureWriter::guard`]. -pub struct GuardedFutureWriter -where - T: func::Lower + Send + Sync + 'static, - A: AsAccessor, -{ - // This field is `None` to implement the conversion from this guard back to - // `FutureWriter`. When `None` is seen in the destructor it will cause the - // destructor to do nothing. - writer: Option>, - accessor: A, -} + let &ReadState::HostReady { guest_offset, .. } = &transmit.read else { + unreachable!(); + }; -impl GuardedFutureWriter -where - T: func::Lower + Send + Sync + 'static, - A: AsAccessor, -{ - /// Create a new `GuardedFutureWriter` with the specified `accessor` and - /// `writer`. - pub fn new(accessor: A, writer: FutureWriter) -> Self { - Self { - writer: Some(writer), - accessor, + let &WriteState::GuestReady { + ty, + address, + count, + options, + .. + } = &transmit.write + else { + unreachable!() + }; + + let cx = &mut LiftContext::new(store.0.store_opaque_mut(), &options, self.instance); + let ty = payload(ty, cx.types); + let old_remaining = buffer.remaining_capacity(); + lift::( + cx, + ty, + buffer, + address + (T::SIZE32 * guest_offset), + count - guest_offset, + )?; + + let transmit = self + .instance + .concurrent_state_mut(store.0) + .get_mut(self.id)?; + + let ReadState::HostReady { guest_offset, .. } = &mut transmit.read else { + unreachable!(); + }; + + *guest_offset += old_remaining - buffer.remaining_capacity(); } + + Ok(()) } +} - /// Wrapper for [`FutureWriter::write`]. - pub async fn write(mut self, value: T) -> bool - where - T: func::Lower + Send + Sync + 'static, - { - self.writer - .as_mut() +impl Source<'_, u8> { + /// Return a `GuestSource` view of `self` if the guest is writing. + pub fn as_guest_source<'a, D>( + &'a mut self, + store: StoreContextMut<'a, D>, + ) -> Option> { + if let WriteState::GuestReady { .. } = self + .instance + .concurrent_state_mut(store.0) + .get_mut(self.id) .unwrap() - .write_(&self.accessor, value) - .await + .write + { + assert!(self.host_buffer.is_none()); + Some(GuestSource { + instance: self.instance, + id: self.id, + store, + }) + } else { + None + } } +} - /// Wrapper for [`FutureWriter::watch_reader`] - pub async fn watch_reader(&mut self) { - self.writer - .as_mut() +/// Represents a guest write to a `stream`, providing direct access to the +/// guest's buffer. +pub struct GuestSource<'a, D: 'static> { + instance: Instance, + id: TableId, + store: StoreContextMut<'a, D>, +} + +impl GuestSource<'_, D> { + /// Provide direct access to the guest's buffer. + pub fn remaining(&mut self) -> &[u8] { + let transmit = self + .instance + .concurrent_state_mut(self.store.as_context_mut().0) + .get_mut(self.id) + .unwrap(); + + let &ReadState::GuestReady { + address, + count, + options, + .. + } = &transmit.read + else { + unreachable!() + }; + + let &WriteState::HostReady { guest_offset, .. } = &transmit.write else { + unreachable!() + }; + + options + .memory(self.store.0) + .get((address + guest_offset)..) + .and_then(|b| b.get(..(count - guest_offset))) .unwrap() - .watch_reader(&self.accessor) - .await } - /// Extracts the underlying [`FutureWriter`] from this guard, returning it - /// back. - pub fn into_future(self) -> FutureWriter { - self.into() + /// Mark the specified number of bytes as read from the guest's buffer. + /// + /// This will panic if the count is larger than the size of the buffer + /// returned by `Self::remaining`. + pub fn mark_read(&mut self, count: usize) { + let transmit = self + .instance + .concurrent_state_mut(self.store.as_context_mut().0) + .get_mut(self.id) + .unwrap(); + + let WriteState::GuestReady { + count: write_count, .. + } = &transmit.write + else { + unreachable!() + }; + + let ReadState::HostReady { guest_offset, .. } = &mut transmit.read else { + unreachable!() + }; + + if *guest_offset + count > *write_count { + panic!("read count ({count}) must be less than or equal to write count ({write_count})") + } else { + *guest_offset += count; + } } } -impl From> for FutureWriter -where - T: func::Lower + Send + Sync + 'static, - A: AsAccessor, -{ - fn from(mut guard: GuardedFutureWriter) -> Self { - guard.writer.take().unwrap() - } +/// Represents the host-owned read end of a stream. +pub trait StreamConsumer: Send + 'static { + /// Handle a host- or guest-initiated write by accepting zero or more items + /// from the specified source. + /// + /// The returned future will resolve to `Ok(StreamState::Closed)` if and + /// when this consumer cannot accept any more items. + fn consume( + &mut self, + accessor: &Accessor, + source: &mut Source, + ) -> impl Future> + Send; + + /// Handle a guest-initiated zero-length write by returning a future which + /// resolves once this consumer is either ready to consume more items or is + /// closed. + fn when_ready( + &mut self, + accessor: &Accessor, + ) -> impl Future> + Send; } -impl Drop for GuardedFutureWriter -where - T: func::Lower + Send + Sync + 'static, - A: AsAccessor, -{ - fn drop(&mut self) { - if let Some(writer) = &mut self.writer { - writer.close_with(&self.accessor) - } - } +/// Represents a host-owned write end of a future. +pub trait FutureProducer: Send + 'static { + /// Handle a host- or guest-initiated read by producing a value. + fn produce(self, accessor: &Accessor) -> impl Future> + Send; +} + +/// Represents a host-owned read end of a future. +pub trait FutureConsumer: Send + 'static { + /// Handle a host- or guest-initiated write by consuming a value. + fn consume(self, accessor: &Accessor, value: T) -> impl Future> + Send; } /// Represents the readable end of a Component Model `future`. /// -/// Note that `FutureReader` instances must be disposed of using either `read` +/// Note that `FutureReader` instances must be disposed of using either `pipe` /// or `close`; otherwise the in-store representation will leak and the writer /// end will hang indefinitely. Consider using [`GuardedFutureReader`] to /// ensure that disposal happens automatically. @@ -559,7 +689,41 @@ pub struct FutureReader { } impl FutureReader { - fn new(id: TableId, instance: Instance) -> Self { + /// Create a new future with the specified producer. + pub fn new( + instance: Instance, + store: S, + producer: impl FutureProducer, + ) -> Self + where + T: func::Lower + func::Lift + Send + Sync + 'static, + { + struct Producer

(Option

); + + impl> StreamProducer for Producer

{ + async fn produce( + &mut self, + accessor: &Accessor, + destination: &mut Destination, + ) -> Result { + let value = self.0.take().unwrap().produce(accessor).await?; + let value = destination.write(accessor, Some(value)).await?; + assert!(value.is_none()); + Ok(StreamState::Open) + } + + async fn when_ready(&mut self, _: &Accessor) -> Result { + Ok(StreamState::Open) + } + } + + Self::new_( + instance.new_transmit(store, TransmitKind::Future, Producer(Some(producer))), + instance, + ) + } + + fn new_(id: TableId, instance: Instance) -> Self { Self { instance, id, @@ -567,58 +731,42 @@ impl FutureReader { } } - /// Read the value from this `future`. - /// - /// The returned `Future` will yield `Err` if the guest has trapped - /// before it could produce a result. - /// - /// The [`Accessor`] provided can be acquired from [`Instance::run_concurrent`] or - /// from within a host function for example. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn read(self, accessor: impl AsAccessor) -> Option + /// Set the consumer that accepts the result of this future. + pub fn pipe(self, store: S, consumer: impl FutureConsumer) where - T: func::Lift + Send + 'static, - { - self.guard(accessor).read().await - } - - async fn read_(&mut self, accessor: impl AsAccessor) -> Option - where - T: func::Lift + Send + 'static, + T: func::Lift + 'static, { - let accessor = accessor.as_accessor(); - - let result = self - .instance - .host_read_async(accessor, self.id, None, TransmitKind::Future) - .await; + struct Consumer(Option); - if let Ok(HostResult { - mut buffer, - dropped: false, - }) = result + impl> StreamConsumer + for Consumer { - buffer.take() - } else { - None + async fn consume( + &mut self, + accessor: &Accessor, + source: &mut Source<'_, T>, + ) -> Result { + let value = &mut None; + accessor.with(|access| source.read(access, value))?; + self.0 + .take() + .unwrap() + .consume(accessor, value.take().unwrap()) + .await?; + Ok(StreamState::Open) + } + + async fn when_ready(&mut self, _: &Accessor) -> Result { + Ok(StreamState::Open) + } } - } - /// Wait for the write end of this `future` to be dropped. - /// - /// The [`Accessor`] provided can be acquired from - /// [`Instance::run_concurrent`] or from within a host function for example. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn watch_writer(&mut self, accessor: impl AsAccessor) { - watch_writer(accessor, self.instance, self.id).await; + self.instance.set_consumer( + store, + self.id, + TransmitKind::Future, + Consumer(Some(consumer)), + ); } /// Convert this `FutureReader` into a [`Val`]. @@ -639,7 +787,7 @@ impl FutureReader { let store = store.as_context_mut(); let id = TableId::::new(*rep); instance.concurrent_state_mut(store.0).get(id)?; // Just make sure it's present - Ok(Self::new(id, instance)) + Ok(Self::new_(id, instance)) } /// Transfer ownership of the read end of a future from a guest to the host. @@ -663,7 +811,7 @@ impl FutureReader { bail!("cannot lift future after previous read succeeded"); } - Ok(Self::new(id, cx.instance_handle())) + Ok(Self::new_(id, cx.instance_handle())) } _ => func::bad_type_info(), } @@ -772,343 +920,91 @@ unsafe impl func::Lower for FutureReader { } fn linear_lower_to_memory( - &self, - cx: &mut LowerContext<'_, U>, - ty: InterfaceType, - offset: usize, - ) -> Result<()> { - lower_future_to_index(self.id.rep(), cx, ty)?.linear_lower_to_memory( - cx, - InterfaceType::U32, - offset, - ) - } -} - -// SAFETY: See the comment on the `ComponentType` `impl` for this type. -unsafe impl func::Lift for FutureReader { - fn linear_lift_from_flat( - cx: &mut LiftContext<'_>, - ty: InterfaceType, - src: &Self::Lower, - ) -> Result { - let index = u32::linear_lift_from_flat(cx, InterfaceType::U32, src)?; - Self::lift_from_index(cx, ty, index) - } - - fn linear_lift_from_memory( - cx: &mut LiftContext<'_>, - ty: InterfaceType, - bytes: &[u8], - ) -> Result { - let index = u32::linear_lift_from_memory(cx, InterfaceType::U32, bytes)?; - Self::lift_from_index(cx, ty, index) - } -} - -/// A [`FutureReader`] paired with an [`Accessor`]. -/// -/// This is an RAII wrapper around [`FutureReader`] that ensures it is closed -/// when dropped. This can be created through [`GuardedFutureReader::new`] or -/// [`FutureReader::guard`]. -pub struct GuardedFutureReader -where - A: AsAccessor, -{ - // This field is `None` to implement the conversion from this guard back to - // `FutureReader`. When `None` is seen in the destructor it will cause the - // destructor to do nothing. - reader: Option>, - accessor: A, -} - -impl GuardedFutureReader -where - A: AsAccessor, -{ - /// Create a new `GuardedFutureReader` with the specified `accessor` and `reader`. - pub fn new(accessor: A, reader: FutureReader) -> Self { - Self { - reader: Some(reader), - accessor, - } - } - - /// Wrapper for [`FutureReader::read`]. - pub async fn read(mut self) -> Option - where - T: func::Lift + Send + 'static, - { - self.reader.as_mut().unwrap().read_(&self.accessor).await - } - - /// Wrapper for [`FutureReader::watch_writer`]. - pub async fn watch_writer(&mut self) { - self.reader - .as_mut() - .unwrap() - .watch_writer(&self.accessor) - .await - } - - /// Extracts the underlying [`FutureReader`] from this guard, returning it - /// back. - pub fn into_future(self) -> FutureReader { - self.into() - } -} - -impl From> for FutureReader -where - A: AsAccessor, -{ - fn from(mut guard: GuardedFutureReader) -> Self { - guard.reader.take().unwrap() - } -} - -impl Drop for GuardedFutureReader -where - A: AsAccessor, -{ - fn drop(&mut self) { - if let Some(reader) = &mut self.reader { - reader.close_with(&self.accessor) - } - } -} - -/// Represents the writable end of a Component Model `stream`. -/// -/// Note that `StreamWriter` instances must be disposed of using `close`; -/// otherwise the in-store representation will leak and the reader end will hang -/// indefinitely. Consider using [`GuardedStreamWriter`] to ensure that -/// disposal happens automatically. -pub struct StreamWriter { - instance: Instance, - id: TableId, - closed: bool, - _phantom: PhantomData, -} - -impl StreamWriter { - fn new(id: TableId, instance: Instance) -> Self { - Self { - instance, - id, - closed: false, - _phantom: PhantomData, - } - } - - /// Returns whether this stream is "closed" meaning that the other end of - /// the stream has been dropped. - pub fn is_closed(&self) -> bool { - self.closed - } - - /// Write the specified items to the `stream`. - /// - /// Note that this will only write as many items as the reader accepts - /// during its current or next read. Use `write_all` to loop until the - /// buffer is drained or the read end is dropped. - /// - /// The returned `Future` will yield the input buffer back, - /// possibly consuming a subset of the items or nothing depending on the - /// number of items the reader accepted. - /// - /// The [`is_closed`](Self::is_closed) method can be used to determine - /// whether the stream was learned to be closed after this operation completes. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn write(&mut self, accessor: impl AsAccessor, buffer: B) -> B - where - T: func::Lower + 'static, - B: WriteBuffer, - { - let result = self - .instance - .host_write_async( - accessor.as_accessor(), - self.id, - buffer, - TransmitKind::Stream, - ) - .await; - - match result { - Ok(HostResult { buffer, dropped }) => { - if self.closed { - debug_assert!(dropped); - } - self.closed = dropped; - buffer - } - Err(_) => todo!("guarantee buffer recovery if `host_write` fails"), - } - } - - /// Write the specified values until either the buffer is drained or the - /// read end is dropped. - /// - /// The buffer is returned back to the caller and may still contain items - /// within it if the other end of this stream was dropped. Use the - /// [`is_closed`](Self::is_closed) method to determine if the other end is - /// dropped. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn write_all(&mut self, accessor: impl AsAccessor, mut buffer: B) -> B - where - T: func::Lower + 'static, - B: WriteBuffer, - { - let accessor = accessor.as_accessor(); - while !self.is_closed() && buffer.remaining().len() > 0 { - buffer = self.write(accessor, buffer).await; - } - buffer - } - - /// Wait for the read end of this `stream` to be dropped. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn watch_reader(&mut self, accessor: impl AsAccessor) { - watch_reader(accessor, self.instance, self.id).await - } - - /// Close this `StreamWriter`, writing the default value. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. Usage of this future after calling `close` will also cause - /// a panic. - pub fn close(&mut self, mut store: impl AsContextMut) { - // `self` should never be used again, but leave an invalid handle there just in case. - let id = mem::replace(&mut self.id, TableId::new(0)); - self.instance - .host_drop_writer(store.as_context_mut(), id, None::<&dyn Fn() -> Result<()>>) - .unwrap() + &self, + cx: &mut LowerContext<'_, U>, + ty: InterfaceType, + offset: usize, + ) -> Result<()> { + lower_future_to_index(self.id.rep(), cx, ty)?.linear_lower_to_memory( + cx, + InterfaceType::U32, + offset, + ) } +} - /// Convenience method around [`Self::close`]. - pub fn close_with(&mut self, accessor: impl AsAccessor) { - accessor.as_accessor().with(|access| self.close(access)) +// SAFETY: See the comment on the `ComponentType` `impl` for this type. +unsafe impl func::Lift for FutureReader { + fn linear_lift_from_flat( + cx: &mut LiftContext<'_>, + ty: InterfaceType, + src: &Self::Lower, + ) -> Result { + let index = u32::linear_lift_from_flat(cx, InterfaceType::U32, src)?; + Self::lift_from_index(cx, ty, index) } - /// Returns a [`GuardedStreamWriter`] which will auto-close this stream on - /// drop and clean it up from the store. - /// - /// Note that the `accessor` provided must own this future and is - /// additionally transferred to the `GuardedStreamWriter` return value. - pub fn guard(self, accessor: A) -> GuardedStreamWriter - where - A: AsAccessor, - { - GuardedStreamWriter::new(accessor, self) + fn linear_lift_from_memory( + cx: &mut LiftContext<'_>, + ty: InterfaceType, + bytes: &[u8], + ) -> Result { + let index = u32::linear_lift_from_memory(cx, InterfaceType::U32, bytes)?; + Self::lift_from_index(cx, ty, index) } } -/// A [`StreamWriter`] paired with an [`Accessor`]. +/// A [`FutureReader`] paired with an [`Accessor`]. /// -/// This is an RAII wrapper around [`StreamWriter`] that ensures it is closed -/// when dropped. This can be created through [`GuardedStreamWriter::new`] or -/// [`StreamWriter::guard`]. -pub struct GuardedStreamWriter +/// This is an RAII wrapper around [`FutureReader`] that ensures it is closed +/// when dropped. This can be created through [`GuardedFutureReader::new`] or +/// [`FutureReader::guard`]. +pub struct GuardedFutureReader where A: AsAccessor, { // This field is `None` to implement the conversion from this guard back to - // `StreamWriter`. When `None` is seen in the destructor it will cause the + // `FutureReader`. When `None` is seen in the destructor it will cause the // destructor to do nothing. - writer: Option>, + reader: Option>, accessor: A, } -impl GuardedStreamWriter +impl GuardedFutureReader where A: AsAccessor, { - /// Create a new `GuardedStreamWriter` with the specified `accessor` and `writer`. - pub fn new(accessor: A, writer: StreamWriter) -> Self { + /// Create a new `GuardedFutureReader` with the specified `accessor` and `reader`. + pub fn new(accessor: A, reader: FutureReader) -> Self { Self { - writer: Some(writer), + reader: Some(reader), accessor, } } - /// Wrapper for [`StreamWriter::is_closed`]. - pub fn is_closed(&self) -> bool { - self.writer.as_ref().unwrap().is_closed() - } - - /// Wrapper for [`StreamWriter::write`]. - pub async fn write(&mut self, buffer: B) -> B - where - T: func::Lower + 'static, - B: WriteBuffer, - { - self.writer - .as_mut() - .unwrap() - .write(&self.accessor, buffer) - .await - } - - /// Wrapper for [`StreamWriter::write_all`]. - pub async fn write_all(&mut self, buffer: B) -> B - where - T: func::Lower + 'static, - B: WriteBuffer, - { - self.writer - .as_mut() - .unwrap() - .write_all(&self.accessor, buffer) - .await - } - - /// Wrapper for [`StreamWriter::watch_reader`]. - pub async fn watch_reader(&mut self) { - self.writer - .as_mut() - .unwrap() - .watch_reader(&self.accessor) - .await - } - - /// Extracts the underlying [`StreamWriter`] from this guard, returning it + /// Extracts the underlying [`FutureReader`] from this guard, returning it /// back. - pub fn into_stream(self) -> StreamWriter { + pub fn into_future(self) -> FutureReader { self.into() } } -impl From> for StreamWriter +impl From> for FutureReader where A: AsAccessor, { - fn from(mut guard: GuardedStreamWriter) -> Self { - guard.writer.take().unwrap() + fn from(mut guard: GuardedFutureReader) -> Self { + guard.reader.take().unwrap() } } -impl Drop for GuardedStreamWriter +impl Drop for GuardedFutureReader where A: AsAccessor, { fn drop(&mut self) { - if let Some(writer) = &mut self.writer { - writer.close_with(&self.accessor) + if let Some(reader) = &mut self.reader { + reader.close_with(&self.accessor) } } } @@ -1122,75 +1018,40 @@ where pub struct StreamReader { instance: Instance, id: TableId, - closed: bool, _phantom: PhantomData, } impl StreamReader { - fn new(id: TableId, instance: Instance) -> Self { + /// Create a new stream with the specified producer. + pub fn new( + instance: Instance, + store: S, + producer: impl StreamProducer, + ) -> Self + where + T: func::Lower + func::Lift + Send + Sync + 'static, + { + Self::new_( + instance.new_transmit(store, TransmitKind::Stream, producer), + instance, + ) + } + + fn new_(id: TableId, instance: Instance) -> Self { Self { instance, id, - closed: false, _phantom: PhantomData, } } - /// Returns whether this stream is "closed" meaning that the other end of - /// the stream has been dropped. - pub fn is_closed(&self) -> bool { - self.closed - } - - /// Read values from this `stream`. - /// - /// The returned `Future` will yield a `(Some(_), _)` if the read completed - /// (possibly with zero items if the write was empty). It will return - /// `(None, _)` if the read failed due to the closure of the write end. In - /// either case, the returned buffer will be the same one passed as a - /// parameter, with zero or more items added. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn read(&mut self, accessor: impl AsAccessor, buffer: B) -> B + /// Set the consumer that accepts the items delivered to this stream. + pub fn pipe(self, store: S, consumer: impl StreamConsumer) where - T: func::Lift + 'static, - B: ReadBuffer + Send + 'static, + T: 'static, { - let result = self - .instance - .host_read_async( - accessor.as_accessor(), - self.id, - buffer, - TransmitKind::Stream, - ) - .await; - - match result { - Ok(HostResult { buffer, dropped }) => { - if self.closed { - debug_assert!(dropped); - } - self.closed = dropped; - buffer - } - Err(_) => { - todo!("guarantee buffer recovery if `host_read` fails") - } - } - } - - /// Wait until the write end of this `stream` is dropped. - /// - /// # Panics - /// - /// Panics if the store that the [`Accessor`] is derived from does not own - /// this future. - pub async fn watch_writer(&mut self, accessor: impl AsAccessor) { - watch_writer(accessor, self.instance, self.id).await + self.instance + .set_consumer(store, self.id, TransmitKind::Stream, consumer); } /// Convert this `StreamReader` into a [`Val`]. @@ -1211,7 +1072,7 @@ impl StreamReader { let store = store.as_context_mut(); let id = TableId::::new(*rep); instance.concurrent_state_mut(store.0).get(id)?; // Just make sure it's present - Ok(Self::new(id, instance)) + Ok(Self::new_(id, instance)) } /// Transfer ownership of the read end of a stream from a guest to the host. @@ -1231,7 +1092,7 @@ impl StreamReader { .get_mut(id)? .common .handle = None; - Ok(Self::new(id, cx.instance_handle())) + Ok(Self::new_(id, cx.instance_handle())) } _ => func::bad_type_info(), } @@ -1403,33 +1264,6 @@ where } } - /// Wrapper for `StreamReader::is_closed` - pub fn is_closed(&self) -> bool { - self.reader.as_ref().unwrap().is_closed() - } - - /// Wrapper for `StreamReader::read`. - pub async fn read(&mut self, buffer: B) -> B - where - T: func::Lift + 'static, - B: ReadBuffer + Send + 'static, - { - self.reader - .as_mut() - .unwrap() - .read(&self.accessor, buffer) - .await - } - - /// Wrapper for `StreamReader::watch_writer`. - pub async fn watch_writer(&mut self) { - self.reader - .as_mut() - .unwrap() - .watch_writer(&self.accessor) - .await - } - /// Extracts the underlying [`StreamReader`] from this guard, returning it /// back. pub fn into_stream(self) -> StreamReader { @@ -1605,14 +1439,6 @@ struct TransmitState { write: WriteState, /// See `ReadState` read: ReadState, - /// The `Waker`, if any, to be woken when the write end of the stream or - /// future is dropped. - /// - /// This will signal to the host-owned read end that the write end has been - /// dropped. - writer_watcher: Option, - /// Like `writer_watcher`, but for the reverse direction. - reader_watcher: Option, /// Whether futher values may be transmitted via this stream or future. done: bool, } @@ -1624,8 +1450,6 @@ impl Default for TransmitState { read_handle: TableId::new(0), read: ReadState::Open, write: WriteState::Open, - reader_watcher: None, - writer_watcher: None, done: false, } } @@ -1649,441 +1473,341 @@ enum WriteState { address: usize, count: usize, handle: u32, - post_write: PostWrite, - }, - /// The write end is owned by a host task and a write is pending. - HostReady { - accept: - Box Result + Send + Sync>, - post_write: PostWrite, - }, - /// The write end has been dropped. - Dropped, -} - -impl fmt::Debug for WriteState { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::Open => f.debug_tuple("Open").finish(), - Self::GuestReady { .. } => f.debug_tuple("GuestReady").finish(), - Self::HostReady { .. } => f.debug_tuple("HostReady").finish(), - Self::Dropped => f.debug_tuple("Dropped").finish(), - } - } -} - -/// Represents the state of the read end of a stream or future. -enum ReadState { - /// The read end is open, but no read is pending. - Open, - /// The read end is owned by a guest task and a read is pending. - GuestReady { - ty: TransmitIndex, - flat_abi: Option, - options: Options, - address: usize, - count: usize, - handle: u32, - }, - /// The read end is owned by a host task and a read is pending. - HostReady { - accept: Box Result + Send + Sync>, - }, - /// The read end has been dropped. - Dropped, -} - -impl fmt::Debug for ReadState { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Self::Open => f.debug_tuple("Open").finish(), - Self::GuestReady { .. } => f.debug_tuple("GuestReady").finish(), - Self::HostReady { .. } => f.debug_tuple("HostReady").finish(), - Self::Dropped => f.debug_tuple("Dropped").finish(), - } - } -} - -/// Parameter type to pass to a `ReadState::HostReady` closure. -/// -/// See also `accept_writer`. -enum Writer<'a> { - /// The write end is owned by a guest task. - Guest { - lift: &'a mut LiftContext<'a>, - ty: Option, - address: usize, - count: usize, - }, - /// The write end is owned by the host. - Host { - buffer: &'a mut UntypedWriteBuffer<'a>, - count: usize, - }, - /// The write end has been dropped. - End, -} - -/// Parameter type to pass to a `WriteState::HostReady` closure. -/// -/// See also `accept_reader`. -enum Reader<'a> { - /// The read end is owned by a guest task. - Guest { - options: &'a Options, - ty: TransmitIndex, - address: usize, - count: usize, - }, - /// The read end is owned by the host. - Host { - accept: Box usize + 'a>, - }, - /// The read end has been dropped. - End, -} - -impl Instance { - /// Create a new Component Model `future` as pair of writable and readable ends, - /// the latter of which may be passed to guest code. - /// - /// `default` is a callback to be used if the writable end of the future is - /// closed without having written a value. You may supply e.g. `|| - /// unreachable!()` if you're sure that won't happen. - pub fn future( - self, - mut store: impl AsContextMut, - default: fn() -> T, - ) -> Result<(FutureWriter, FutureReader)> { - let (write, read) = self - .concurrent_state_mut(store.as_context_mut().0) - .new_transmit()?; - - Ok(( - FutureWriter::new(default, write, self), - FutureReader::new(read, self), - )) - } - - /// Create a new Component Model `stream` as pair of writable and readable ends, - /// the latter of which may be passed to guest code. - pub fn stream( - self, - mut store: impl AsContextMut, - ) -> Result<(StreamWriter, StreamReader)> { - let (write, read) = self - .concurrent_state_mut(store.as_context_mut().0) - .new_transmit()?; - - Ok(( - StreamWriter::new(write, self), - StreamReader::new(read, self), - )) - } - - /// Write to the specified stream or future from the host. - fn host_write, U>( - self, - mut store: StoreContextMut, - id: TableId, - mut buffer: B, - kind: TransmitKind, - post_write: PostWrite, - ) -> Result, oneshot::Receiver>>> { - let transmit_id = self.concurrent_state_mut(store.0).get(id)?.state; - let transmit = self - .concurrent_state_mut(store.0) - .get_mut(transmit_id) - .with_context(|| format!("retrieving state for transmit [{transmit_id:?}]"))?; - log::trace!("host_write state {transmit_id:?}; {:?}", transmit.read); - - let new_state = if let ReadState::Dropped = &transmit.read { - ReadState::Dropped - } else { - ReadState::Open - }; - - if matches!(post_write, PostWrite::Drop) && !matches!(transmit.read, ReadState::Open) { - transmit.write = WriteState::Dropped; - } - - Ok(match mem::replace(&mut transmit.read, new_state) { - ReadState::Open => { - assert!(matches!(&transmit.write, WriteState::Open)); - - let token = StoreToken::new(store.as_context_mut()); - let (tx, rx) = oneshot::channel(); - let state = WriteState::HostReady { - accept: Box::new(move |store, instance, reader| { - let (result, code) = accept_reader::( - token.as_context_mut(store), - instance, - reader, - buffer, - kind, - )?; - _ = tx.send(result); - Ok(code) - }), - post_write, - }; - self.concurrent_state_mut(store.0) - .get_mut(transmit_id)? - .write = state; - - Err(rx) - } - - ReadState::GuestReady { - ty, - flat_abi: _, - options, - address, - count, - handle, - .. - } => { - if let TransmitKind::Future = kind { - transmit.done = true; - } - - let read_handle = transmit.read_handle; - let accept = move |mut store: StoreContextMut| { - let (result, code) = accept_reader::( - store.as_context_mut(), - self, - Reader::Guest { - options: &options, - ty, - address, - count, - }, - buffer, - kind, - )?; - - self.concurrent_state_mut(store.0).set_event( - read_handle.rep(), - match ty { - TransmitIndex::Future(ty) => Event::FutureRead { - code, - pending: Some((ty, handle)), - }, - TransmitIndex::Stream(ty) => Event::StreamRead { - code, - pending: Some((ty, handle)), - }, - }, - )?; - - anyhow::Ok(result) - }; - - if T::MAY_REQUIRE_REALLOC { - // For payloads which may require a realloc call, use a - // oneshot::channel and background task. This is necessary - // because calling the guest while there are host embedder - // frames on the stack is unsound. - let (tx, rx) = oneshot::channel(); - let token = StoreToken::new(store.as_context_mut()); - self.concurrent_state_mut(store.0).push_high_priority( - WorkItem::WorkerFunction(Mutex::new(Box::new(move |store, _| { - _ = tx.send(accept(token.as_context_mut(store))?); - Ok(()) - }))), - ); - Err(rx) - } else { - // Optimize flat payloads (i.e. those which do not require - // calling the guest's realloc function) by lowering - // directly instead of using a oneshot::channel and - // background task. - Ok(accept(store)?) - } - } + }, + /// The write end is owned by the host, which is ready to produce items. + HostReady { + produce: Box< + dyn Fn() -> Pin> + Send + 'static>> + + Send + + Sync, + >, + guest_offset: usize, + join: Option, + }, + /// The write end has been dropped. + Dropped, +} - ReadState::HostReady { accept } => { - let count = buffer.remaining().len(); - let mut untyped = UntypedWriteBuffer::new(&mut buffer); - let code = accept(Writer::Host { - buffer: &mut untyped, - count, - })?; - let (ReturnCode::Completed(_) | ReturnCode::Dropped(_)) = code else { - unreachable!() - }; +impl fmt::Debug for WriteState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Open => f.debug_tuple("Open").finish(), + Self::GuestReady { .. } => f.debug_tuple("GuestReady").finish(), + Self::HostReady { .. } => f.debug_tuple("HostReady").finish(), + Self::Dropped => f.debug_tuple("Dropped").finish(), + } + } +} - Ok(HostResult { - buffer, - dropped: false, - }) - } +/// Represents the state of the read end of a stream or future. +enum ReadState { + /// The read end is open, but no read is pending. + Open, + /// The read end is owned by a guest task and a read is pending. + GuestReady { + ty: TransmitIndex, + flat_abi: Option, + options: Options, + address: usize, + count: usize, + handle: u32, + }, + /// The read end is owned by a host task, and it is ready to consume items. + HostReady { + consume: Box< + dyn Fn() -> Pin> + Send + 'static>> + + Send + + Sync, + >, + guest_offset: usize, + join: Option, + }, + /// Both the read and write ends are owned by the host. + HostToHost { + accept: Box< + dyn for<'a> Fn( + &'a mut UntypedWriteBuffer<'a>, + ) + -> Pin> + Send + 'a>> + + Send + + Sync, + >, + }, + /// The read end has been dropped. + Dropped, +} - ReadState::Dropped => Ok(HostResult { - buffer, - dropped: true, - }), - }) +impl fmt::Debug for ReadState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Open => f.debug_tuple("Open").finish(), + Self::GuestReady { .. } => f.debug_tuple("GuestReady").finish(), + Self::HostReady { .. } => f.debug_tuple("HostReady").finish(), + Self::HostToHost { .. } => f.debug_tuple("HostToHost").finish(), + Self::Dropped => f.debug_tuple("Dropped").finish(), + } + } +} + +fn return_code(kind: TransmitKind, state: StreamState, guest_offset: usize) -> ReturnCode { + let count = guest_offset.try_into().unwrap(); + match state { + StreamState::Closed => ReturnCode::Dropped(count), + StreamState::Open => ReturnCode::completed(kind, count), } +} - /// Async wrapper around `Self::host_write`. - async fn host_write_async>( +impl Instance { + fn new_transmit( self, - accessor: impl AsAccessor, - id: TableId, - buffer: B, + mut store: S, kind: TransmitKind, - ) -> Result> { - match accessor.as_accessor().with(move |mut access| { - self.host_write( - access.as_context_mut(), - id, - buffer, - kind, - PostWrite::Continue, - ) - })? { - Ok(result) => Ok(result), - Err(rx) => Ok(rx.await?), - } + producer: impl StreamProducer, + ) -> TableId { + let mut store = store.as_context_mut(); + let token = StoreToken::new(store.as_context_mut()); + let state = self.concurrent_state_mut(store.0); + let (_, read) = state.new_transmit().unwrap(); + let producer = Arc::new(Mutex::new(Some(producer))); + let transmit_id = state.get(read).unwrap().state; + let produce = Box::new(move || { + let producer = producer.clone(); + async move { + let mut mine = producer.lock().unwrap().take().unwrap(); + // TODO: call `StreamProducer::when_ready` instead of `consume` + // for zero-length reads. + let result = mine + .produce( + &Accessor::new(token, Some(self)), + &mut Destination { + instance: self, + id: transmit_id, + kind, + _phantom: PhantomData, + }, + ) + .await; + *producer.lock().unwrap() = Some(mine); + result + } + .boxed() + }); + state.get_mut(transmit_id).unwrap().write = WriteState::HostReady { + produce, + guest_offset: 0, + join: None, + }; + read } - /// Read from the specified stream or future from the host. - fn host_read, U>( + fn set_consumer( self, - store: StoreContextMut, + mut store: S, id: TableId, - mut buffer: B, kind: TransmitKind, - ) -> Result, oneshot::Receiver>>> { - let transmit_id = self.concurrent_state_mut(store.0).get(id)?.state; - let transmit = self - .concurrent_state_mut(store.0) - .get_mut(transmit_id) - .with_context(|| format!("retrieving state for transmit [{transmit_id:?}]"))?; - log::trace!("host_read state {transmit_id:?}; {:?}", transmit.write); - - let new_state = if let WriteState::Dropped = &transmit.write { - WriteState::Dropped - } else { - WriteState::Open + consumer: impl StreamConsumer, + ) { + let mut store = store.as_context_mut(); + let token = StoreToken::new(store.as_context_mut()); + let state = self.concurrent_state_mut(store.0); + let id = state.get(id).unwrap().state; + let transmit = state.get_mut(id).unwrap(); + let consumer = Arc::new(Mutex::new(Some(consumer))); + let consume = { + let consumer = consumer.clone(); + Box::new(move || { + let consumer = consumer.clone(); + async move { + let mut mine = consumer.lock().unwrap().take().unwrap(); + // TODO: call `StreamConsumer::when_ready` instead of + // `consume` for zero-length writes. + let result = mine + .consume( + &Accessor::new(token, Some(self)), + &mut Source { + instance: self, + id, + host_buffer: None, + }, + ) + .await; + *consumer.lock().unwrap() = Some(mine); + result + } + .boxed() + }) }; - Ok(match mem::replace(&mut transmit.write, new_state) { + match mem::replace(&mut transmit.write, WriteState::Open) { WriteState::Open => { - assert!(matches!(&transmit.read, ReadState::Open)); - - let (tx, rx) = oneshot::channel(); transmit.read = ReadState::HostReady { - accept: Box::new(move |writer| { - let (result, code) = accept_writer::(writer, buffer, kind)?; - _ = tx.send(result); - Ok(code) - }), + consume, + guest_offset: 0, + join: None, }; - - Err(rx) } - - WriteState::GuestReady { - ty, - flat_abi: _, - options, - address, - count, - handle, - post_write, - .. - } => { - if let TransmitIndex::Future(_) = ty { - transmit.done = true; - } - - let write_handle = transmit.write_handle; - let lift = &mut LiftContext::new(store.0.store_opaque_mut(), &options, self); - let (result, code) = accept_writer::( - Writer::Guest { - ty: payload(ty, lift.types), - lift, - address, - count, - }, - buffer, - kind, - )?; - - let state = self.concurrent_state_mut(store.0); - let pending = if let PostWrite::Drop = post_write { - state.get_mut(transmit_id)?.write = WriteState::Dropped; - false - } else { - true + WriteState::GuestReady { .. } => { + let future = consume(); + transmit.read = ReadState::HostReady { + consume, + guest_offset: 0, + join: None, }; - - state.set_event( - write_handle.rep(), - match ty { - TransmitIndex::Future(ty) => Event::FutureWrite { - code, - pending: pending.then_some((ty, handle)), - }, - TransmitIndex::Stream(ty) => Event::StreamWrite { - code, - pending: pending.then_some((ty, handle)), - }, - }, - )?; - - Ok(result) + self.pipe_from_guest(store, kind, id, future).unwrap(); } + WriteState::HostReady { produce, .. } => { + transmit.read = ReadState::HostToHost { + accept: Box::new(move |input| { + let consumer = consumer.clone(); + async move { + let mut mine = consumer.lock().unwrap().take().unwrap(); + let result = mine + .consume( + &Accessor::new(token, Some(self)), + &mut Source { + instance: self, + id, + host_buffer: Some(input.get_mut::()), + }, + ) + .await; + *consumer.lock().unwrap() = Some(mine); + result + } + .boxed() + }), + }; - WriteState::HostReady { accept, post_write } => { - accept( - store.0, - self, - Reader::Host { - accept: Box::new(|input, count| { - let count = count.min(buffer.remaining_capacity()); - buffer.move_from(input.get_mut::(), count); - count - }), - }, - )?; + let future = async move { + loop { + if tls::get(|store| { + anyhow::Ok(matches!( + self.concurrent_state_mut(store).get(id)?.read, + ReadState::Dropped + )) + })? { + break Ok(()); + } + + match produce().await? { + StreamState::Open => {} + StreamState::Closed => break Ok(()), + } - if let PostWrite::Drop = post_write { - self.concurrent_state_mut(store.0) - .get_mut(transmit_id)? - .write = WriteState::Dropped; + if let TransmitKind::Future = kind { + break Ok(()); + } + } } + .map(move |result| { + tls::get(|store| self.concurrent_state_mut(store).delete_transmit(id))?; + result + }); - Ok(HostResult { - buffer, - dropped: false, - }) + state.push_future(Box::pin(future)); } + WriteState::Dropped => unreachable!(), + } + } - WriteState::Dropped => Ok(HostResult { - buffer, - dropped: true, - }), - }) + fn pipe_from_guest( + self, + mut store: impl AsContextMut, + kind: TransmitKind, + id: TableId, + future: Pin> + Send + 'static>>, + ) -> Result<()> { + let (join, future) = JoinHandle::run(async move { + let stream_state = future.await?; + tls::get(|store| { + let state = self.concurrent_state_mut(store); + let transmit = state.get_mut(id)?; + let ReadState::HostReady { + consume, + guest_offset, + .. + } = mem::replace(&mut transmit.read, ReadState::Open) + else { + unreachable!(); + }; + let code = return_code(kind, stream_state, guest_offset); + transmit.read = match stream_state { + StreamState::Closed => ReadState::Dropped, + StreamState::Open => ReadState::HostReady { + consume, + guest_offset: 0, + join: None, + }, + }; + let WriteState::GuestReady { ty, handle, .. } = + mem::replace(&mut transmit.write, WriteState::Open) + else { + unreachable!(); + }; + state.send_write_result(ty, id, handle, code)?; + Ok(()) + }) + }); + let state = self.concurrent_state_mut(store.as_context_mut().0); + state.push_future(future.map(|result| result.unwrap_or(Ok(()))).boxed()); + let ReadState::HostReady { + join: state_join, .. + } = &mut state.get_mut(id)?.read + else { + unreachable!() + }; + *state_join = Some(join); + Ok(()) } - /// Async wrapper around `Self::host_read`. - async fn host_read_async>( + fn pipe_to_guest( self, - accessor: impl AsAccessor, - id: TableId, - buffer: B, + mut store: impl AsContextMut, kind: TransmitKind, - ) -> Result> { - match accessor - .as_accessor() - .with(move |mut access| self.host_read(access.as_context_mut(), id, buffer, kind))? - { - Ok(result) => Ok(result), - Err(rx) => Ok(rx.await?), - } + id: TableId, + future: Pin> + Send + 'static>>, + ) -> Result<()> { + let (join, future) = JoinHandle::run(async move { + let stream_state = future.await?; + tls::get(|store| { + let state = self.concurrent_state_mut(store); + let transmit = state.get_mut(id)?; + let WriteState::HostReady { + produce, + guest_offset, + .. + } = mem::replace(&mut transmit.write, WriteState::Open) + else { + unreachable!(); + }; + let code = return_code(kind, stream_state, guest_offset); + transmit.write = match stream_state { + StreamState::Closed => WriteState::Dropped, + StreamState::Open => WriteState::HostReady { + produce, + guest_offset: 0, + join: None, + }, + }; + let ReadState::GuestReady { ty, handle, .. } = + mem::replace(&mut transmit.read, ReadState::Open) + else { + unreachable!(); + }; + state.send_read_result(ty, id, handle, code)?; + Ok(()) + }) + }); + let state = self.concurrent_state_mut(store.as_context_mut().0); + state.push_future(future.map(|result| result.unwrap_or(Ok(()))).boxed()); + let WriteState::HostReady { + join: state_join, .. + } = &mut state.get_mut(id)?.write + else { + unreachable!() + }; + *state_join = Some(join); + Ok(()) } /// Drop the read end of a stream or future read from the host. @@ -2105,9 +1829,6 @@ impl Instance { ); transmit.read = ReadState::Dropped; - if let Some(waker) = transmit.reader_watcher.take() { - waker.wake(); - } // If the write end is already dropped, it should stay dropped, // otherwise, it should be opened. @@ -2122,34 +1843,23 @@ impl Instance { match mem::replace(&mut transmit.write, new_state) { // If a guest is waiting to write, notify it that the read end has // been dropped. - WriteState::GuestReady { - ty, - handle, - post_write, - .. - } => { - if let PostWrite::Drop = post_write { - state.delete_transmit(transmit_id)?; - } else { - state.update_event( - write_handle.rep(), - match ty { - TransmitIndex::Future(ty) => Event::FutureWrite { - code: ReturnCode::Dropped(0), - pending: Some((ty, handle)), - }, - TransmitIndex::Stream(ty) => Event::StreamWrite { - code: ReturnCode::Dropped(0), - pending: Some((ty, handle)), - }, + WriteState::GuestReady { ty, handle, .. } => { + state.update_event( + write_handle.rep(), + match ty { + TransmitIndex::Future(ty) => Event::FutureWrite { + code: ReturnCode::Dropped(0), + pending: Some((ty, handle)), }, - )?; - }; + TransmitIndex::Stream(ty) => Event::StreamWrite { + code: ReturnCode::Dropped(0), + pending: Some((ty, handle)), + }, + }, + )?; } - WriteState::HostReady { accept, .. } => { - accept(store, self, Reader::End)?; - } + WriteState::HostReady { .. } => {} WriteState::Open => { state.update_event( @@ -2176,11 +1886,11 @@ impl Instance { } /// Drop the write end of a stream or future read from the host. - fn host_drop_writer( + fn host_drop_writer( self, - mut store: StoreContextMut, + store: StoreContextMut, id: TableId, - default: Option<&dyn Fn() -> Result>, + on_drop_open: Option Result<()>>, ) -> Result<()> { let transmit_id = self.concurrent_state_mut(store.0).get(id)?.state; let transmit = self @@ -2193,32 +1903,18 @@ impl Instance { transmit.write ); - if let Some(waker) = transmit.writer_watcher.take() { - waker.wake(); - } - // Existing queued transmits must be updated with information for the impending writer closure match &mut transmit.write { WriteState::GuestReady { .. } => { unreachable!("can't call `host_drop_writer` on a guest-owned writer"); } - WriteState::HostReady { post_write, .. } => { - *post_write = PostWrite::Drop; - } + WriteState::HostReady { .. } => {} v @ WriteState::Open => { - if let (Some(default), false) = ( - default, + if let (Some(on_drop_open), false) = ( + on_drop_open, transmit.done || matches!(transmit.read, ReadState::Dropped), ) { - // This is a future, and we haven't written a value yet -- - // write the default value. - _ = self.host_write( - store.as_context_mut(), - id, - Some(default()?), - TransmitKind::Future, - PostWrite::Drop, - )?; + on_drop_open()?; } else { *v = WriteState::Dropped; } @@ -2263,17 +1959,13 @@ impl Instance { )?; } - // If the host was ready to read, and the writer end is being dropped (host->host write?) - // signal to the reader that we've reached the end of the stream - ReadState::HostReady { accept } => { - accept(Writer::End)?; - } + ReadState::HostReady { .. } | ReadState::HostToHost { .. } => {} // If the read state is open, then there are no registered readers of the stream/future ReadState::Open => { self.concurrent_state_mut(store.0).update_event( read_handle.rep(), - match default { + match on_drop_open { Some(_) => Event::FutureRead { code: ReturnCode::Dropped(0), pending: None, @@ -2313,14 +2005,12 @@ impl Instance { let id = TableId::::new(transmit_rep); log::trace!("guest_drop_writable: drop writer {id:?}"); match ty { - TransmitIndex::Stream(_) => { - self.host_drop_writer(store, id, None::<&dyn Fn() -> Result<()>>) - } + TransmitIndex::Stream(_) => self.host_drop_writer(store, id, None), TransmitIndex::Future(_) => self.host_drop_writer( store, id, - Some(&|| { - Err::<(), _>(anyhow!( + Some(|| { + Err(anyhow!( "cannot drop future write end without first writing a value" )) }), @@ -2467,6 +2157,40 @@ impl Instance { Ok(()) } + fn check_bounds( + self, + store: &StoreOpaque, + options: &Options, + ty: TransmitIndex, + address: usize, + count: usize, + ) -> Result<()> { + let types = self.id().get(store).component().types().clone(); + let size = usize::try_from( + match ty { + TransmitIndex::Future(ty) => types[types[ty].ty] + .payload + .map(|ty| types.canonical_abi(&ty).size32), + TransmitIndex::Stream(ty) => types[types[ty].ty] + .payload + .map(|ty| types.canonical_abi(&ty).size32), + } + .unwrap_or(0), + ) + .unwrap(); + + if count > 0 && size > 0 { + options + .memory(store) + .get(address..) + .and_then(|b| b.get(..(size * count))) + .map(drop) + .ok_or_else(|| anyhow::anyhow!("read pointer out of bounds of memory")) + } else { + Ok(()) + } + } + /// Write to the specified stream or future from the guest. pub(super) fn guest_write( self, @@ -2481,6 +2205,7 @@ impl Instance { let address = usize::try_from(address).unwrap(); let count = usize::try_from(count).unwrap(); let options = Options::new_index(store.0, self, options); + self.check_bounds(store.0, &options, ty, address, count)?; if !options.async_() { bail!("synchronous stream and future writes not yet supported"); } @@ -2502,7 +2227,7 @@ impl Instance { let transmit_id = concurrent_state.get(transmit_handle)?.state; let transmit = concurrent_state.get_mut(transmit_id)?; log::trace!( - "guest_write {transmit_handle:?} (handle {handle}; state {transmit_id:?}); {:?}", + "guest_write {count} to {transmit_handle:?} (handle {handle}; state {transmit_id:?}); {:?}", transmit.read ); @@ -2526,7 +2251,6 @@ impl Instance { address, count, handle, - post_write: PostWrite::Continue, }; Ok::<_, crate::Error>(()) }; @@ -2608,19 +2332,7 @@ impl Instance { let code = ReturnCode::completed(ty.kind(), total); - concurrent_state.set_event( - read_handle_rep, - match read_ty { - TransmitIndex::Future(ty) => Event::FutureRead { - code, - pending: Some((ty, read_handle)), - }, - TransmitIndex::Stream(ty) => Event::StreamRead { - code, - pending: Some((ty, read_handle)), - }, - }, - )?; + concurrent_state.send_read_result(read_ty, transmit_id, read_handle, code)?; } if read_buffer_remaining { @@ -2643,20 +2355,55 @@ impl Instance { } } - ReadState::HostReady { accept } => { + ReadState::HostReady { + consume, + guest_offset, + join, + } => { + assert!(join.is_none()); + assert_eq!(0, guest_offset); + if let TransmitIndex::Future(_) = ty { transmit.done = true; } - let lift = &mut LiftContext::new(store.0.store_opaque_mut(), &options, self); - accept(Writer::Guest { - ty: payload(ty, lift.types), - lift, - address, - count, - })? + let mut future = consume(); + transmit.read = ReadState::HostReady { + consume, + guest_offset: 0, + join: None, + }; + set_guest_ready(concurrent_state)?; + let poll = self.set_tls(store.0, || { + future + .as_mut() + .poll(&mut Context::from_waker(&Waker::noop())) + }); + + match poll { + Poll::Ready(state) => { + let transmit = self.concurrent_state_mut(store.0).get_mut(transmit_id)?; + let ReadState::HostReady { guest_offset, .. } = &mut transmit.read else { + unreachable!(); + }; + let code = return_code(ty.kind(), state?, mem::replace(guest_offset, 0)); + transmit.write = WriteState::Open; + code + } + Poll::Pending => { + self.pipe_from_guest( + store.as_context_mut(), + ty.kind(), + transmit_id, + future, + )?; + ReturnCode::Blocked + } + } } + ReadState::HostToHost { .. } => unreachable!(), + ReadState::Open => { set_guest_ready(concurrent_state)?; ReturnCode::Blocked @@ -2696,7 +2443,9 @@ impl Instance { count: u32, ) -> Result { let address = usize::try_from(address).unwrap(); + let count = usize::try_from(count).unwrap(); let options = Options::new_index(store.0, self, options); + self.check_bounds(store.0, &options, ty, address, count)?; if !options.async_() { bail!("synchronous stream and future reads not yet supported"); } @@ -2715,7 +2464,7 @@ impl Instance { let transmit_id = concurrent_state.get(transmit_handle)?.state; let transmit = concurrent_state.get_mut(transmit_id)?; log::trace!( - "guest_read {transmit_handle:?} (handle {handle}; state {transmit_id:?}); {:?}", + "guest_read {count} from {transmit_handle:?} (handle {handle}; state {transmit_id:?}); {:?}", transmit.write ); @@ -2737,7 +2486,7 @@ impl Instance { flat_abi, options, address, - count: usize::try_from(count).unwrap(), + count, handle, }; Ok::<_, crate::Error>(()) @@ -2751,7 +2500,6 @@ impl Instance { address: write_address, count: write_count, handle: write_handle, - post_write, } => { assert_eq!(flat_abi, write_flat_abi); @@ -2765,8 +2513,6 @@ impl Instance { // `ReadState::GuestReady` case concerning zero-length reads and // writes. - let count = usize::try_from(count).unwrap(); - let write_complete = write_count == 0 || count > 0; let read_complete = write_count > 0; let write_buffer_remaining = count < write_count; @@ -2792,12 +2538,6 @@ impl Instance { .map(|ty| usize::try_from(types.canonical_abi(&ty).size32).unwrap()) .unwrap_or(0); let concurrent_state = instance.concurrent_state_mut(); - let pending = if let PostWrite::Drop = post_write { - concurrent_state.get_mut(transmit_id)?.write = WriteState::Dropped; - false - } else { - true - }; if write_complete { let count = u32::try_from(count).unwrap(); @@ -2813,18 +2553,11 @@ impl Instance { let code = ReturnCode::completed(ty.kind(), total); - concurrent_state.set_event( - write_handle_rep, - match write_ty { - TransmitIndex::Future(ty) => Event::FutureWrite { - code, - pending: pending.then_some((ty, write_handle)), - }, - TransmitIndex::Stream(ty) => Event::StreamWrite { - code, - pending: pending.then_some((ty, write_handle)), - }, - }, + concurrent_state.send_write_result( + write_ty, + transmit_id, + write_handle, + code, )?; } @@ -2837,7 +2570,6 @@ impl Instance { address: write_address + (count * item_size), count: write_count - count, handle: write_handle, - post_write, }; } @@ -2849,29 +2581,46 @@ impl Instance { } } - WriteState::HostReady { accept, post_write } => { + WriteState::HostReady { + produce, + guest_offset, + join, + } => { + assert!(join.is_none()); + assert_eq!(0, guest_offset); + if let TransmitIndex::Future(_) = ty { transmit.done = true; } - let code = accept( - store.0, - self, - Reader::Guest { - options: &options, - ty, - address, - count: count.try_into().unwrap(), - }, - )?; - - if let PostWrite::Drop = post_write { - self.concurrent_state_mut(store.0) - .get_mut(transmit_id)? - .write = WriteState::Dropped; + let mut future = produce(); + transmit.write = WriteState::HostReady { + produce, + guest_offset: 0, + join: None, + }; + set_guest_ready(concurrent_state)?; + let poll = self.set_tls(store.0, || { + future + .as_mut() + .poll(&mut Context::from_waker(&Waker::noop())) + }); + + match poll { + Poll::Ready(state) => { + let transmit = self.concurrent_state_mut(store.0).get_mut(transmit_id)?; + let WriteState::HostReady { guest_offset, .. } = &mut transmit.write else { + unreachable!(); + }; + let code = return_code(ty.kind(), state?, mem::replace(guest_offset, 0)); + transmit.read = ReadState::Open; + code + } + Poll::Pending => { + self.pipe_to_guest(store.as_context_mut(), ty.kind(), transmit_id, future)?; + ReturnCode::Blocked + } } - - code } WriteState::Open => { @@ -3319,6 +3068,52 @@ impl ComponentInstance { } impl ConcurrentState { + fn send_write_result( + &mut self, + ty: TransmitIndex, + id: TableId, + handle: u32, + code: ReturnCode, + ) -> Result<()> { + let write_handle = self.get_mut(id)?.write_handle.rep(); + self.set_event( + write_handle, + match ty { + TransmitIndex::Future(ty) => Event::FutureWrite { + code, + pending: Some((ty, handle)), + }, + TransmitIndex::Stream(ty) => Event::StreamWrite { + code, + pending: Some((ty, handle)), + }, + }, + ) + } + + fn send_read_result( + &mut self, + ty: TransmitIndex, + id: TableId, + handle: u32, + code: ReturnCode, + ) -> Result<()> { + let read_handle = self.get_mut(id)?.read_handle.rep(); + self.set_event( + read_handle, + match ty { + TransmitIndex::Future(ty) => Event::FutureRead { + code, + pending: Some((ty, handle)), + }, + TransmitIndex::Stream(ty) => Event::StreamRead { + code, + pending: Some((ty, handle)), + }, + }, + ) + } + fn take_event(&mut self, waitable: u32) -> Result> { Waitable::Transmit(TableId::::new(waitable)).take_event(self) } @@ -3444,6 +3239,14 @@ impl ConcurrentState { (ReturnCode::Dropped(_) | ReturnCode::Completed(_), _) => code, _ => unreachable!(), } + } else if let ReadState::HostReady { + join, guest_offset, .. + } = &mut self.get_mut(transmit_id)?.read + { + if let Some(join) = join.take() { + join.abort(); + } + ReturnCode::Cancelled(u32::try_from(mem::replace(guest_offset, 0)).unwrap()) } else { ReturnCode::Cancelled(0) }; @@ -3451,10 +3254,10 @@ impl ConcurrentState { let transmit = self.get_mut(transmit_id)?; match &transmit.write { - WriteState::GuestReady { .. } | WriteState::HostReady { .. } => { + WriteState::GuestReady { .. } => { transmit.write = WriteState::Open; } - + WriteState::HostReady { .. } => todo!("support host write cancellation"), WriteState::Open | WriteState::Dropped => {} } @@ -3488,6 +3291,14 @@ impl ConcurrentState { (ReturnCode::Dropped(_) | ReturnCode::Completed(_), _) => code, _ => unreachable!(), } + } else if let WriteState::HostReady { + join, guest_offset, .. + } = &mut self.get_mut(transmit_id)?.write + { + if let Some(join) = join.take() { + join.abort(); + } + ReturnCode::Cancelled(u32::try_from(mem::replace(guest_offset, 0)).unwrap()) } else { ReturnCode::Cancelled(0) }; @@ -3495,10 +3306,12 @@ impl ConcurrentState { let transmit = self.get_mut(transmit_id)?; match &transmit.read { - ReadState::GuestReady { .. } | ReadState::HostReady { .. } => { + ReadState::GuestReady { .. } => { transmit.read = ReadState::Open; } - + ReadState::HostReady { .. } | ReadState::HostToHost { .. } => { + todo!("support host read cancellation") + } ReadState::Open | ReadState::Dropped => {} } diff --git a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams/buffers.rs b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams/buffers.rs index b71a99863f31..c3f08b063095 100644 --- a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams/buffers.rs +++ b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams/buffers.rs @@ -11,9 +11,11 @@ use std::vec::Vec; pub use untyped::*; mod untyped { use super::WriteBuffer; + use crate::vm::SendSyncPtr; use std::any::TypeId; use std::marker; use std::mem; + use std::ptr::NonNull; /// Helper structure to type-erase the `T` in `WriteBuffer`. /// @@ -25,7 +27,7 @@ mod untyped { /// borrow on the original buffer passed in. pub struct UntypedWriteBuffer<'a> { element_type_id: TypeId, - buf: *mut dyn WriteBuffer<()>, + buf: SendSyncPtr>, _marker: marker::PhantomData<&'a mut dyn WriteBuffer<()>>, } @@ -48,11 +50,14 @@ mod untyped { // is safe here because `typed` and `untyped` have the same size // and we're otherwise reinterpreting a raw pointer with a type // parameter to one without one. - buf: unsafe { - let r = ReinterpretWriteBuffer { typed: buf }; - assert_eq!(mem::size_of_val(&r.typed), mem::size_of_val(&r.untyped)); - r.untyped - }, + buf: SendSyncPtr::new( + NonNull::new(unsafe { + let r = ReinterpretWriteBuffer { typed: buf }; + assert_eq!(mem::size_of_val(&r.typed), mem::size_of_val(&r.untyped)); + r.untyped + }) + .unwrap(), + ), _marker: marker::PhantomData, } } @@ -68,7 +73,12 @@ mod untyped { // structure also is proof of valid existence of the original // `&mut WriteBuffer`, so taking the raw pointer back to a safe // reference is valid. - unsafe { &mut *ReinterpretWriteBuffer { untyped: self.buf }.typed } + unsafe { + &mut *ReinterpretWriteBuffer { + untyped: self.buf.as_ptr(), + } + .typed + } } } } diff --git a/crates/wasmtime/src/runtime/component/mod.rs b/crates/wasmtime/src/runtime/component/mod.rs index 4ed9e306f4a8..fd5afa11767a 100644 --- a/crates/wasmtime/src/runtime/component/mod.rs +++ b/crates/wasmtime/src/runtime/component/mod.rs @@ -119,9 +119,10 @@ mod values; pub use self::component::{Component, ComponentExportIndex}; #[cfg(feature = "component-model-async")] pub use self::concurrent::{ - Access, Accessor, AccessorTask, AsAccessor, ErrorContext, FutureReader, FutureWriter, - GuardedFutureReader, GuardedFutureWriter, GuardedStreamReader, GuardedStreamWriter, JoinHandle, - ReadBuffer, StreamReader, StreamWriter, VMComponentAsyncStore, VecBuffer, WriteBuffer, + Access, Accessor, AccessorTask, AsAccessor, Destination, ErrorContext, FutureConsumer, + FutureProducer, FutureReader, GuardedFutureReader, GuardedStreamReader, GuestDestination, + GuestSource, JoinHandle, ReadBuffer, Source, StreamConsumer, StreamProducer, StreamReader, + StreamState, VMComponentAsyncStore, VecBuffer, WriteBuffer, }; pub use self::func::{ ComponentNamedList, ComponentType, Func, Lift, Lower, TypedFunc, WasmList, WasmStr, From a7343392ff7916d7e23f77193527f821590d0637 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 25 Aug 2025 10:48:51 -0700 Subject: [PATCH 02/32] Add `Accessor::getter`, rename `with_data` to `with_getter` --- .../src/runtime/component/concurrent.rs | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/crates/wasmtime/src/runtime/component/concurrent.rs b/crates/wasmtime/src/runtime/component/concurrent.rs index aaed977f8ed1..2e09d1204949 100644 --- a/crates/wasmtime/src/runtime/component/concurrent.rs +++ b/crates/wasmtime/src/runtime/component/concurrent.rs @@ -455,32 +455,32 @@ where }) } + /// Returns the getter this accessor is using to project from `T` into + /// `D::Data`. + pub fn getter(&self) -> fn(&mut T) -> D::Data<'_> { + self.get_data + } + /// Changes this accessor to access `D2` instead of the current type /// parameter `D`. /// /// This changes the underlying data access from `T` to `D2::Data<'_>`. /// - /// Note that this is not a public or recommended API because it's easy to - /// cause panics with this by having two `Accessor` values live at the same - /// time. The returned `Accessor` does not refer to this `Accessor` meaning - /// that both can be used. You could, for example, call `Accessor::with` - /// simultaneously on both. That would cause a panic though. - /// - /// In short while there's nothing unsafe about this it's a footgun. It's - /// here for bindings generation where the provided accessor is transformed - /// into a new accessor and then this returned accessor is passed to - /// implementations. + /// # Panics /// - /// Note that one possible fix for this would be a lifetime parameter on - /// `Accessor` itself so the returned value could borrow from the original - /// value (or this could be `self`-by-value instead of `&mut self`) but in - /// attempting that it was found to be a bit too onerous in terms of - /// plumbing things around without a whole lot of benefit. + /// When using this API the returned value is disconnected from `&self` and + /// the lifetime binding the `self` argument. An `Accessor` only works + /// within the context of the closure or async closure that it was + /// originally given to, however. This means that due to the fact that the + /// returned value has no lifetime connection it's possible to use the + /// accessor outside of `&self`, the original accessor, and panic. /// - /// In short, this works, but must be treated with care. The current main - /// user, bindings generation, treats this with care. - #[doc(hidden)] - pub fn with_data(&self, get_data: fn(&mut T) -> D2::Data<'_>) -> Accessor { + /// The returned value should only be used within the scope of the original + /// `Accessor` that `self` refers to. + pub fn with_getter( + &self, + get_data: fn(&mut T) -> D2::Data<'_>, + ) -> Accessor { Accessor { token: self.token, get_data, From c3204ca8a8ba48adf696558aabca3cc7109e5b6e Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Mon, 25 Aug 2025 19:59:16 +0200 Subject: [PATCH 03/32] fixup bindgen invocation Signed-off-by: Roman Volosatovs --- crates/wit-bindgen/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/wit-bindgen/src/lib.rs b/crates/wit-bindgen/src/lib.rs index 535603298003..f5388d4c6efa 100644 --- a/crates/wit-bindgen/src/lib.rs +++ b/crates/wit-bindgen/src/lib.rs @@ -1527,7 +1527,7 @@ impl Wasmtime { {wt}::component::ResourceType::host::<{camel}>(), move |caller: &{wt}::component::Accessor::, rep| {{ {wt}::component::__internal::Box::pin(async move {{ - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); Host{camel}WithStore::drop(accessor, {wt}::component::Resource::new_own(rep)).await }}) }}, @@ -2490,7 +2490,7 @@ impl<'a> InterfaceGenerator<'a> { } if flags.contains(FunctionFlags::STORE) { - uwriteln!(self.src, "let accessor = &caller.with_data(host_getter);"); + uwriteln!(self.src, "let accessor = &caller.with_getter(host_getter);"); } else { self.src .push_str("let host = &mut host_getter(caller.data_mut());\n"); From 61c5a24ebb6fac0e879077f47a43fea5a55b4af1 Mon Sep 17 00:00:00 2001 From: Joel Dice Date: Mon, 25 Aug 2025 15:30:14 -0600 Subject: [PATCH 04/32] add support for zero-length writes/reads to/from host I've added a test to cover this; it also tests direct buffer access for `stream`, which I realized I forgot to cover earlier. And of course there was a bug :facepalm:. Signed-off-by: Joel Dice --- crates/misc/component-async-tests/src/util.rs | 29 ++- .../tests/scenario/transmit.rs | 181 ++++++++++++- .../component-async-tests/tests/test_all.rs | 2 +- .../misc/component-async-tests/wit/test.wit | 8 + .../test-programs/src/bin/async_readiness.rs | 244 ++++++++++++++++++ .../concurrent/futures_and_streams.rs | 64 +++-- 6 files changed, 498 insertions(+), 30 deletions(-) create mode 100644 crates/test-programs/src/bin/async_readiness.rs diff --git a/crates/misc/component-async-tests/src/util.rs b/crates/misc/component-async-tests/src/util.rs index f7551dad42ae..558434c1f12a 100644 --- a/crates/misc/component-async-tests/src/util.rs +++ b/crates/misc/component-async-tests/src/util.rs @@ -3,7 +3,7 @@ use futures::{ SinkExt, StreamExt, channel::{mpsc, oneshot}, }; -use std::thread; +use std::{future, thread}; use wasmtime::component::{ Accessor, Destination, FutureConsumer, FutureProducer, Lift, Lower, Source, StreamConsumer, StreamProducer, StreamState, @@ -32,12 +32,17 @@ pub async fn sleep(duration: std::time::Duration) { pub struct MpscProducer { rx: mpsc::Receiver, + next: Option, closed: bool, } impl MpscProducer { pub fn new(rx: mpsc::Receiver) -> Self { - Self { rx, closed: false } + Self { + rx, + next: None, + closed: false, + } } fn state(&self) -> StreamState { @@ -55,7 +60,15 @@ impl StreamProducer for MpscProducer< accessor: &Accessor, destination: &mut Destination, ) -> Result { - if let Some(item) = self.rx.next().await { + let item = if let Some(item) = self.next.take() { + Some(item) + } else if let Some(item) = self.rx.next().await { + Some(item) + } else { + None + }; + + if let Some(item) = item { let item = destination.write(accessor, Some(item)).await?; assert!(item.is_none()); } else { @@ -66,6 +79,14 @@ impl StreamProducer for MpscProducer< } async fn when_ready(&mut self, _: &Accessor) -> Result { + if !self.closed && self.next.is_none() { + if let Some(item) = self.rx.next().await { + self.next = Some(item); + } else { + self.closed = true; + } + } + Ok(self.state()) } } @@ -101,6 +122,8 @@ impl StreamConsumer for MpscConsumer { } async fn when_ready(&mut self, _: &Accessor) -> Result { + future::poll_fn(|cx| self.tx.poll_ready(cx)).await?; + Ok(self.state()) } } diff --git a/crates/misc/component-async-tests/tests/scenario/transmit.rs b/crates/misc/component-async-tests/tests/scenario/transmit.rs index d1d9d0de3804..2b8fd88cdc89 100644 --- a/crates/misc/component-async-tests/tests/scenario/transmit.rs +++ b/crates/misc/component-async-tests/tests/scenario/transmit.rs @@ -1,6 +1,7 @@ -use std::future::Future; +use std::future::{self, Future}; use std::pin::Pin; use std::sync::{Arc, Mutex}; +use std::time::Duration; use super::util::{config, make_component, test_run, test_run_with_count}; use anyhow::{Result, anyhow}; @@ -14,11 +15,179 @@ use futures::{ stream::FuturesUnordered, }; use wasmtime::component::{ - Accessor, Component, FutureReader, HasSelf, Instance, Linker, ResourceTable, StreamReader, Val, + Accessor, Component, Destination, FutureReader, HasSelf, Instance, Linker, ResourceTable, + Source, StreamConsumer, StreamProducer, StreamReader, StreamState, Val, }; -use wasmtime::{AsContextMut, Engine, Store}; +use wasmtime::{AsContextMut, Engine, Store, Trap}; use wasmtime_wasi::WasiCtxBuilder; +mod readiness { + wasmtime::component::bindgen!({ + path: "wit", + world: "readiness-guest" + }); +} + +struct ReadinessProducer { + buffer: Vec, + slept: bool, + closed: bool, +} + +impl ReadinessProducer { + async fn maybe_sleep(&mut self) { + if !self.slept { + self.slept = true; + component_async_tests::util::sleep(Duration::from_millis(delay_millis())).await; + } + } + + fn state(&self) -> StreamState { + if self.closed { + StreamState::Closed + } else { + StreamState::Open + } + } +} + +impl StreamProducer for ReadinessProducer { + async fn produce( + &mut self, + accessor: &Accessor, + destination: &mut Destination, + ) -> Result { + self.maybe_sleep().await; + accessor.with(|mut access| { + let mut destination = destination + .as_guest_destination(access.as_context_mut()) + .unwrap(); + destination.remaining().copy_from_slice(&self.buffer); + destination.mark_written(self.buffer.len()); + }); + self.closed = true; + Ok(self.state()) + } + + async fn when_ready(&mut self, _: &Accessor) -> Result { + self.maybe_sleep().await; + Ok(self.state()) + } +} + +struct ReadinessConsumer { + expected: Vec, + slept: bool, + closed: bool, +} + +impl ReadinessConsumer { + async fn maybe_sleep(&mut self) { + if !self.slept { + self.slept = true; + component_async_tests::util::sleep(Duration::from_millis(delay_millis())).await; + } + } + + fn state(&self) -> StreamState { + if self.closed { + StreamState::Closed + } else { + StreamState::Open + } + } +} + +impl StreamConsumer for ReadinessConsumer { + async fn consume( + &mut self, + accessor: &Accessor, + source: &mut Source<'_, u8>, + ) -> Result { + self.maybe_sleep().await; + accessor.with(|mut access| { + let mut source = source.as_guest_source(access.as_context_mut()).unwrap(); + assert_eq!(&self.expected, source.remaining()); + source.mark_read(self.expected.len()); + }); + self.closed = true; + Ok(self.state()) + } + + async fn when_ready(&mut self, _: &Accessor) -> Result { + self.maybe_sleep().await; + Ok(self.state()) + } +} + +#[tokio::test] +pub async fn async_readiness() -> Result<()> { + let component = test_programs_artifacts::ASYNC_READINESS_COMPONENT; + + let engine = Engine::new(&config())?; + + let component = make_component(&engine, &[component]).await?; + + let mut linker = Linker::new(&engine); + + wasmtime_wasi::p2::add_to_linker_async(&mut linker)?; + + let mut store = Store::new( + &engine, + Ctx { + wasi: WasiCtxBuilder::new().inherit_stdio().build(), + table: ResourceTable::default(), + continue_: false, + wakers: Arc::new(Mutex::new(None)), + }, + ); + + let instance = linker.instantiate_async(&mut store, &component).await?; + let readiness_guest = readiness::ReadinessGuest::new(&mut store, &instance)?; + let expected = vec![2u8, 4, 6, 8, 9]; + let rx = StreamReader::new( + instance, + &mut store, + ReadinessProducer { + buffer: expected.clone(), + slept: false, + closed: false, + }, + ); + let result = instance + .run_concurrent(&mut store, async move |accessor| { + let (rx, expected) = readiness_guest + .local_local_readiness() + .call_start(accessor, rx, expected) + .await?; + + accessor.with(|access| { + rx.pipe( + access, + ReadinessConsumer { + expected, + slept: false, + closed: false, + }, + ) + }); + + future::pending::>().await + }) + .await; + + // As of this writing, passing a future which never resolves to + // `Instance::run_concurrent` and expecting a `Trap::AsyncDeadlock` is + // the only way to join all tasks for the `Instance`, so that's what we + // do: + assert!(matches!( + result.unwrap_err().downcast::(), + Ok(Trap::AsyncDeadlock) + )); + + Ok(()) +} + #[tokio::test] pub async fn async_poll_synchronous() -> Result<()> { test_run(&[test_programs_artifacts::ASYNC_POLL_SYNCHRONOUS_COMPONENT]).await @@ -29,7 +198,7 @@ pub async fn async_poll_stackless() -> Result<()> { test_run(&[test_programs_artifacts::ASYNC_POLL_STACKLESS_COMPONENT]).await } -pub mod cancel { +mod cancel { wasmtime::component::bindgen!({ path: "wit", world: "cancel-host", @@ -79,7 +248,7 @@ pub async fn async_trap_cancel_host_after_return() -> Result<()> { test_cancel_trap(Mode::TrapCancelHostAfterReturn).await } -fn cancel_delay() -> u64 { +fn delay_millis() -> u64 { // Miri-based builds are much slower to run, so we delay longer in that case // to ensure that async calls which the test expects to return `BLOCKED` // actually do so. @@ -132,7 +301,7 @@ async fn test_cancel(mode: Mode) -> Result<()> { .run_concurrent(&mut store, async move |accessor| { cancel_host .local_local_cancel() - .call_run(accessor, mode, cancel_delay()) + .call_run(accessor, mode, delay_millis()) .await }) .await??; diff --git a/crates/misc/component-async-tests/tests/test_all.rs b/crates/misc/component-async-tests/tests/test_all.rs index 7b4ac4d0e475..8ff157c4be94 100644 --- a/crates/misc/component-async-tests/tests/test_all.rs +++ b/crates/misc/component-async-tests/tests/test_all.rs @@ -30,7 +30,7 @@ use scenario::round_trip_many::{ use scenario::streams::async_closed_streams; use scenario::transmit::{ async_cancel_callee, async_cancel_caller, async_intertask_communication, async_poll_stackless, - async_poll_synchronous, async_transmit_callee, async_transmit_caller, + async_poll_synchronous, async_readiness, async_transmit_callee, async_transmit_caller, }; use scenario::unit_stream::{async_unit_stream_callee, async_unit_stream_caller}; use scenario::yield_::{ diff --git a/crates/misc/component-async-tests/wit/test.wit b/crates/misc/component-async-tests/wit/test.wit index f074e9bc3167..9a602834ec8a 100644 --- a/crates/misc/component-async-tests/wit/test.wit +++ b/crates/misc/component-async-tests/wit/test.wit @@ -162,6 +162,10 @@ interface intertask { foo: func(fut: future); } +interface readiness { + start: async func(s: stream, expected: list) -> tuple, list>; +} + world yield-caller { import continue; import ready; @@ -307,3 +311,7 @@ world intertask-communication { import intertask; export run; } + +world readiness-guest { + export readiness; +} diff --git a/crates/test-programs/src/bin/async_readiness.rs b/crates/test-programs/src/bin/async_readiness.rs new file mode 100644 index 000000000000..23a74e858016 --- /dev/null +++ b/crates/test-programs/src/bin/async_readiness.rs @@ -0,0 +1,244 @@ +mod bindings { + wit_bindgen::generate!({ + path: "../misc/component-async-tests/wit", + world: "readiness-guest", + }); +} + +use { + std::{mem, ptr}, + test_programs::async_::{ + BLOCKED, CALLBACK_CODE_EXIT, CALLBACK_CODE_WAIT, DROPPED, EVENT_NONE, EVENT_STREAM_READ, + EVENT_STREAM_WRITE, context_get, context_set, waitable_join, waitable_set_drop, + waitable_set_new, + }, +}; + +#[cfg(target_arch = "wasm32")] +#[link(wasm_import_module = "[export]local:local/readiness")] +unsafe extern "C" { + #[link_name = "[task-return][async]start"] + fn task_return_start(_: u32, _: *const u8, _: usize); +} +#[cfg(not(target_arch = "wasm32"))] +unsafe extern "C" fn task_return_start(_: u32, _: *const u8, _: usize) { + unreachable!() +} + +#[cfg(target_arch = "wasm32")] +#[link(wasm_import_module = "[export]local:local/readiness")] +unsafe extern "C" { + #[link_name = "[stream-new-0][async]start"] + fn stream_new() -> u64; +} +#[cfg(not(target_arch = "wasm32"))] +unsafe extern "C" fn stream_new() -> u64 { + unreachable!() +} + +#[cfg(target_arch = "wasm32")] +#[link(wasm_import_module = "[export]local:local/readiness")] +unsafe extern "C" { + #[link_name = "[async-lower][stream-write-0][async]start"] + fn stream_write(_: u32, _: *const u8, _: usize) -> u32; +} +#[cfg(not(target_arch = "wasm32"))] +unsafe extern "C" fn stream_write(_: u32, _: *const u8, _: usize) -> u32 { + unreachable!() +} + +#[cfg(target_arch = "wasm32")] +#[link(wasm_import_module = "[export]local:local/readiness")] +unsafe extern "C" { + #[link_name = "[async-lower][stream-read-0][async]start"] + fn stream_read(_: u32, _: *mut u8, _: usize) -> u32; +} +#[cfg(not(target_arch = "wasm32"))] +unsafe extern "C" fn stream_read(_: u32, _: *mut u8, _: usize) -> u32 { + unreachable!() +} + +#[cfg(target_arch = "wasm32")] +#[link(wasm_import_module = "[export]local:local/readiness")] +unsafe extern "C" { + #[link_name = "[stream-drop-readable-0][async]start"] + fn stream_drop_readable(_: u32); +} +#[cfg(not(target_arch = "wasm32"))] +unsafe extern "C" fn stream_drop_readable(_: u32) { + unreachable!() +} + +#[cfg(target_arch = "wasm32")] +#[link(wasm_import_module = "[export]local:local/readiness")] +unsafe extern "C" { + #[link_name = "[stream-drop-writable-0][async]start"] + fn stream_drop_writable(_: u32); +} +#[cfg(not(target_arch = "wasm32"))] +unsafe extern "C" fn stream_drop_writable(_: u32) { + unreachable!() +} + +static BYTES_TO_WRITE: &[u8] = &[1, 3, 5, 7, 11]; + +enum State { + S0 { + rx: u32, + expected: Vec, + }, + S1 { + set: u32, + tx: Option, + rx: Option, + expected: Vec, + }, +} + +#[unsafe(export_name = "[async-lift]local:local/readiness#[async]start")] +unsafe extern "C" fn export_start(rx: u32, expected: u32, expected_len: u32) -> u32 { + let expected_len = usize::try_from(expected_len).unwrap(); + + unsafe { + context_set( + u32::try_from(Box::into_raw(Box::new(State::S0 { + rx, + expected: Vec::from_raw_parts( + expected as usize as *mut u8, + expected_len, + expected_len, + ), + })) as usize) + .unwrap(), + ); + + callback_start(EVENT_NONE, 0, 0) + } +} + +#[unsafe(export_name = "[callback][async-lift]local:local/readiness#[async]start")] +unsafe extern "C" fn callback_start(event0: u32, event1: u32, event2: u32) -> u32 { + unsafe { + let state = &mut *(usize::try_from(context_get()).unwrap() as *mut State); + match state { + State::S0 { rx, expected } => { + assert_eq!(event0, EVENT_NONE); + + // Do a zero-length read to wait until the writer is ready. + // + // Here we assume specific behavior from the writer, namely: + // + // - It is not immediately ready to send us anything. + // + // - When it _is_ ready, it will send us all the bytes it told us to + // expect at once. + let status = stream_read(*rx, ptr::null_mut(), 0); + assert_eq!(status, BLOCKED); + + let set = waitable_set_new(); + + waitable_join(*rx, set); + + let tx = { + let pair = stream_new(); + let tx = u32::try_from(pair >> 32).unwrap(); + let rx = u32::try_from(pair & 0xFFFFFFFF_u64).unwrap(); + + // Do a zero-length write to wait until the reader is ready. + // + // Here we assume specific behavior from the reader, namely: + // + // - It is not immediately ready to receive anything (indeed, it + // can't possibly be ready given that we haven't returned the + // read handle to it yet). + // + // - When it _is_ ready, it will accept all the bytes we told it + // to expect at once. + let status = stream_write(tx, ptr::null(), 0); + assert_eq!(status, BLOCKED); + + waitable_join(tx, set); + + task_return_start(rx, BYTES_TO_WRITE.as_ptr(), BYTES_TO_WRITE.len()); + + tx + }; + + *state = State::S1 { + set, + tx: Some(tx), + rx: Some(*rx), + expected: mem::take(expected), + }; + + CALLBACK_CODE_WAIT | (set << 4) + } + + State::S1 { + set, + tx, + rx, + expected, + } => { + if event0 == EVENT_STREAM_READ { + let rx = rx.take().unwrap(); + assert_eq!(event1, rx); + assert_eq!(event2, 0); + + // The writer is ready now, so this read should not block. + // + // As noted above, we we rely on the writer sending us all the + // expected bytes at once. + let received = &mut vec![0_u8; expected.len()]; + let status = stream_read(rx, received.as_mut_ptr(), received.len()); + assert_eq!( + status, + DROPPED | u32::try_from(received.len() << 4).unwrap() + ); + assert_eq!(received, expected); + + waitable_join(rx, 0); + stream_drop_readable(rx); + + if tx.is_none() { + waitable_set_drop(*set); + + CALLBACK_CODE_EXIT + } else { + CALLBACK_CODE_WAIT | (*set << 4) + } + } else if event0 == EVENT_STREAM_WRITE { + let tx = tx.take().unwrap(); + assert_eq!(event1, tx); + assert_eq!(event2, 0); + + // The reader is ready now, so this write should not block. + // + // As noted above, we we rely on the reader accepting all the + // expected bytes at once. + let status = stream_write(tx, BYTES_TO_WRITE.as_ptr(), BYTES_TO_WRITE.len()); + assert_eq!( + status, + DROPPED | u32::try_from(BYTES_TO_WRITE.len() << 4).unwrap() + ); + + waitable_join(tx, 0); + stream_drop_writable(tx); + + if rx.is_none() { + waitable_set_drop(*set); + + CALLBACK_CODE_EXIT + } else { + CALLBACK_CODE_WAIT | (*set << 4) + } + } else { + unreachable!() + } + } + } + } +} + +// Unused function; required since this file is built as a `bin`: +fn main() {} diff --git a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs index b679ba3c8a8b..21f6570212a9 100644 --- a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs +++ b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs @@ -591,17 +591,17 @@ impl GuestSource<'_, D> { .get_mut(self.id) .unwrap(); - let &ReadState::GuestReady { + let &WriteState::GuestReady { address, count, options, .. - } = &transmit.read + } = &transmit.write else { unreachable!() }; - let &WriteState::HostReady { guest_offset, .. } = &transmit.write else { + let &ReadState::HostReady { guest_offset, .. } = &transmit.read else { unreachable!() }; @@ -1569,30 +1569,39 @@ impl Instance { let state = self.concurrent_state_mut(store.0); let (_, read) = state.new_transmit().unwrap(); let producer = Arc::new(Mutex::new(Some(producer))); - let transmit_id = state.get(read).unwrap().state; + let id = state.get(read).unwrap().state; let produce = Box::new(move || { let producer = producer.clone(); async move { + let zero_length_read = tls::get(|store| { + anyhow::Ok(matches!( + self.concurrent_state_mut(store).get(id)?.read, + ReadState::GuestReady { count: 0, .. } + )) + })?; + let mut mine = producer.lock().unwrap().take().unwrap(); - // TODO: call `StreamProducer::when_ready` instead of `consume` - // for zero-length reads. - let result = mine - .produce( - &Accessor::new(token, Some(self)), + let accessor = &Accessor::new(token, Some(self)); + let result = if zero_length_read { + mine.when_ready(accessor).await + } else { + mine.produce( + accessor, &mut Destination { instance: self, - id: transmit_id, + id, kind, _phantom: PhantomData, }, ) - .await; + .await + }; *producer.lock().unwrap() = Some(mine); result } .boxed() }); - state.get_mut(transmit_id).unwrap().write = WriteState::HostReady { + state.get_mut(id).unwrap().write = WriteState::HostReady { produce, guest_offset: 0, join: None, @@ -1618,19 +1627,28 @@ impl Instance { Box::new(move || { let consumer = consumer.clone(); async move { + let zero_length_write = tls::get(|store| { + anyhow::Ok(matches!( + self.concurrent_state_mut(store).get(id)?.write, + WriteState::GuestReady { count: 0, .. } + )) + })?; + let mut mine = consumer.lock().unwrap().take().unwrap(); - // TODO: call `StreamConsumer::when_ready` instead of - // `consume` for zero-length writes. - let result = mine - .consume( - &Accessor::new(token, Some(self)), + let accessor = &Accessor::new(token, Some(self)); + let result = if zero_length_write { + mine.when_ready(accessor).await + } else { + mine.consume( + accessor, &mut Source { instance: self, id, host_buffer: None, }, ) - .await; + .await + }; *consumer.lock().unwrap() = Some(mine); result } @@ -1638,7 +1656,7 @@ impl Instance { }) }; - match mem::replace(&mut transmit.write, WriteState::Open) { + match &transmit.write { WriteState::Open => { transmit.read = ReadState::HostReady { consume, @@ -1655,7 +1673,13 @@ impl Instance { }; self.pipe_from_guest(store, kind, id, future).unwrap(); } - WriteState::HostReady { produce, .. } => { + WriteState::HostReady { .. } => { + let WriteState::HostReady { produce, .. } = + mem::replace(&mut transmit.write, WriteState::Open) + else { + unreachable!(); + }; + transmit.read = ReadState::HostToHost { accept: Box::new(move |input| { let consumer = consumer.clone(); From 056f064bf874ff9915be503ac5ed23418f71a3fd Mon Sep 17 00:00:00 2001 From: Joel Dice Date: Tue, 26 Aug 2025 09:00:08 -0600 Subject: [PATCH 05/32] add `{Destination,Source}::remaining` methods This can help `Stream{Producer,Consumer}` implementations determine how many items to write or read, respectively. Signed-off-by: Joel Dice --- .../tests/scenario/transmit.rs | 8 ++++ .../concurrent/futures_and_streams.rs | 48 +++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/crates/misc/component-async-tests/tests/scenario/transmit.rs b/crates/misc/component-async-tests/tests/scenario/transmit.rs index 2b8fd88cdc89..07a9799810d4 100644 --- a/crates/misc/component-async-tests/tests/scenario/transmit.rs +++ b/crates/misc/component-async-tests/tests/scenario/transmit.rs @@ -59,6 +59,10 @@ impl StreamProducer for ReadinessProducer { ) -> Result { self.maybe_sleep().await; accessor.with(|mut access| { + assert_eq!( + destination.remaining(access.as_context_mut()), + Some(self.buffer.len()) + ); let mut destination = destination .as_guest_destination(access.as_context_mut()) .unwrap(); @@ -106,6 +110,10 @@ impl StreamConsumer for ReadinessConsumer { ) -> Result { self.maybe_sleep().await; accessor.with(|mut access| { + assert_eq!( + source.remaining(access.as_context_mut()), + self.expected.len() + ); let mut source = source.as_guest_source(access.as_context_mut()).unwrap(); assert_eq!(&self.expected, source.remaining()); source.mark_read(self.expected.len()); diff --git a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs index 21f6570212a9..54cdc610961a 100644 --- a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs +++ b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs @@ -356,6 +356,29 @@ impl Destination { _ => unreachable!(), } } + + /// Return the remaining number of items the current read has capacity to + /// accept, if known. + /// + /// This will return `Some(_)` if the reader is a guest; it will return + /// `None` if the reader is the host. + pub fn remaining(&self, mut store: impl AsContextMut) -> Option { + let transmit = self + .instance + .concurrent_state_mut(store.as_context_mut().0) + .get(self.id) + .unwrap(); + + if let &ReadState::GuestReady { count, .. } = &transmit.read { + let &WriteState::HostReady { guest_offset, .. } = &transmit.write else { + unreachable!() + }; + + Some(count - guest_offset) + } else { + None + } + } } impl Destination { @@ -547,6 +570,31 @@ impl Source<'_, T> { Ok(()) } + + /// Return the number of items remaining to be read from the current write + /// operation. + pub fn remaining(&self, mut store: impl AsContextMut) -> usize + where + T: 'static, + { + let transmit = self + .instance + .concurrent_state_mut(store.as_context_mut().0) + .get(self.id) + .unwrap(); + + if let &WriteState::GuestReady { count, .. } = &transmit.write { + let &ReadState::HostReady { guest_offset, .. } = &transmit.read else { + unreachable!() + }; + + count - guest_offset + } else if let Some(host_buffer) = &self.host_buffer { + host_buffer.remaining().len() + } else { + unreachable!() + } + } } impl Source<'_, u8> { From ddb3036a31115dab61badbe419d9e3f8a62d8407 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Wed, 27 Aug 2025 15:34:26 +0200 Subject: [PATCH 06/32] wasi: migrate sockets to new API Signed-off-by: Roman Volosatovs --- .../bin/p3_sockets_tcp_sample_application.rs | 2 + crates/wasi/src/p3/mod.rs | 44 +- crates/wasi/src/p3/sockets/host/types/tcp.rs | 429 +++++++++++------- 3 files changed, 316 insertions(+), 159 deletions(-) diff --git a/crates/test-programs/src/bin/p3_sockets_tcp_sample_application.rs b/crates/test-programs/src/bin/p3_sockets_tcp_sample_application.rs index ab1ff48f1d9a..83d51fbcf8ff 100644 --- a/crates/test-programs/src/bin/p3_sockets_tcp_sample_application.rs +++ b/crates/test-programs/src/bin/p3_sockets_tcp_sample_application.rs @@ -47,6 +47,7 @@ async fn test_tcp_sample_application(family: IpAddressFamily, bind_address: IpSo // Check that we sent and received our message! assert_eq!(data, first_message); // Not guaranteed to work but should work in practice. + drop(data_rx); fut.await.unwrap() }, ); @@ -76,6 +77,7 @@ async fn test_tcp_sample_application(family: IpAddressFamily, bind_address: IpSo // Check that we sent and received our message! assert_eq!(data, second_message); // Not guaranteed to work but should work in practice. + drop(data_rx); fut.await.unwrap() } ); diff --git a/crates/wasi/src/p3/mod.rs b/crates/wasi/src/p3/mod.rs index 029cf66f5653..e24641a86dd3 100644 --- a/crates/wasi/src/p3/mod.rs +++ b/crates/wasi/src/p3/mod.rs @@ -17,11 +17,53 @@ pub mod sockets; use crate::WasiView; use crate::p3::bindings::LinkOptions; -use wasmtime::component::Linker; +use anyhow::Context as _; +use tokio::sync::oneshot; +use wasmtime::component::{ + Accessor, Destination, FutureProducer, Linker, StreamProducer, StreamState, +}; // Default buffer capacity to use for reads of byte-sized values. const DEFAULT_BUFFER_CAPACITY: usize = 8192; +struct StreamEmptyProducer; + +impl StreamProducer for StreamEmptyProducer { + async fn produce( + &mut self, + _: &Accessor, + _: &mut Destination, + ) -> wasmtime::Result { + Ok(StreamState::Closed) + } + + async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { + Ok(StreamState::Closed) + } +} + +struct FutureReadyProducer(T); + +impl FutureProducer for FutureReadyProducer +where + T: Send + 'static, +{ + async fn produce(self, _: &Accessor) -> wasmtime::Result { + Ok(self.0) + } +} + +struct FutureOneshotProducer(oneshot::Receiver); + +impl FutureProducer for FutureOneshotProducer +where + T: Send + 'static, +{ + async fn produce(self, _: &Accessor) -> wasmtime::Result { + self.0.await.context("oneshot sender dropped") + } +} + /// Add all WASI interfaces from this module into the `linker` provided. /// /// This function will add all interfaces implemented by this module to the diff --git a/crates/wasi/src/p3/sockets/host/types/tcp.rs b/crates/wasi/src/p3/sockets/host/types/tcp.rs index 532424cf0963..0a4f3c803dca 100644 --- a/crates/wasi/src/p3/sockets/host/types/tcp.rs +++ b/crates/wasi/src/p3/sockets/host/types/tcp.rs @@ -1,25 +1,27 @@ use super::is_addr_allowed; -use crate::TrappableError; -use crate::p3::DEFAULT_BUFFER_CAPACITY; use crate::p3::bindings::sockets::types::{ Duration, ErrorCode, HostTcpSocket, HostTcpSocketWithStore, IpAddressFamily, IpSocketAddress, TcpSocket, }; +use crate::p3::sockets::SocketError; use crate::p3::sockets::{SocketResult, WasiSockets}; +use crate::p3::{ + DEFAULT_BUFFER_CAPACITY, FutureOneshotProducer, FutureReadyProducer, StreamEmptyProducer, +}; use crate::sockets::{NonInheritedOptions, SocketAddrUse, SocketAddressFamily, WasiSocketsCtxView}; use anyhow::Context; use bytes::BytesMut; +use core::mem; use io_lifetimes::AsSocketlike as _; -use std::future::poll_fn; use std::io::Cursor; use std::net::{Shutdown, SocketAddr}; -use std::pin::pin; use std::sync::Arc; -use std::task::Poll; use tokio::net::{TcpListener, TcpStream}; +use tokio::sync::oneshot; +use wasmtime::AsContextMut as _; use wasmtime::component::{ - Accessor, AccessorTask, FutureReader, FutureWriter, GuardedFutureWriter, GuardedStreamWriter, - Resource, ResourceTable, StreamReader, StreamWriter, + Accessor, Destination, FutureReader, Resource, ResourceTable, Source, StreamConsumer, + StreamProducer, StreamReader, StreamState, }; fn get_socket<'a>( @@ -29,7 +31,7 @@ fn get_socket<'a>( table .get(socket) .context("failed to get socket resource from table") - .map_err(TrappableError::trap) + .map_err(SocketError::trap) } fn get_socket_mut<'a>( @@ -39,106 +41,226 @@ fn get_socket_mut<'a>( table .get_mut(socket) .context("failed to get socket resource from table") - .map_err(TrappableError::trap) + .map_err(SocketError::trap) } -struct ListenTask { +struct ListenStreamProducer { + accepted: Option>, listener: Arc, family: SocketAddressFamily, - tx: StreamWriter>, options: NonInheritedOptions, + getter: for<'a> fn(&'a mut T) -> WasiSocketsCtxView<'a>, +} + +impl ListenStreamProducer { + async fn accept(&mut self) -> std::io::Result { + if let Some(res) = self.accepted.take() { + return res; + } + let (stream, _) = self.listener.accept().await?; + Ok(stream) + } } -impl AccessorTask> for ListenTask { - async fn run(self, store: &Accessor) -> wasmtime::Result<()> { - let mut tx = GuardedStreamWriter::new(store, self.tx); - while !tx.is_closed() { - let Some(res) = ({ - let mut accept = pin!(self.listener.accept()); - let mut tx = pin!(tx.watch_reader()); - poll_fn(|cx| match tx.as_mut().poll(cx) { - Poll::Ready(()) => return Poll::Ready(None), - Poll::Pending => accept.as_mut().poll(cx).map(Some), - }) - .await - }) else { - return Ok(()); - }; - let socket = TcpSocket::new_accept(res.map(|p| p.0), &self.options, self.family) - .unwrap_or_else(|err| TcpSocket::new_error(err, self.family)); - let socket = store.with(|mut view| { - view.get() +impl StreamProducer> for ListenStreamProducer +where + D: 'static, +{ + async fn produce( + &mut self, + store: &Accessor, + dst: &mut Destination>, + ) -> wasmtime::Result { + let res = self.accept().await; + let socket = TcpSocket::new_accept(res, &self.options, self.family) + .unwrap_or_else(|err| TcpSocket::new_error(err, self.family)); + let store = store.with_getter::(self.getter); + let socket = store.with(|mut store| { + store + .get() + .table + .push(socket) + .context("failed to push socket resource to table") + })?; + if let Some(socket) = dst.write(&store, Some(socket)).await? { + store.with(|mut store| { + store + .get() .table - .push(socket) - .context("failed to push socket resource to table") + .delete(socket) + .context("failed to delete socket resource from table") })?; - if let Some(socket) = tx.write(Some(socket)).await { - debug_assert!(tx.is_closed()); - store.with(|mut view| { - view.get() - .table - .delete(socket) - .context("failed to delete socket resource from table") - })?; - return Ok(()); - } + return Ok(StreamState::Closed); } - Ok(()) + Ok(StreamState::Open) + } + + async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { + if self.accepted.is_none() { + let res = self.accept().await; + self.accepted = Some(res); + } + Ok(StreamState::Open) } } -struct ReceiveTask { +struct ReceiveStreamProducer { stream: Arc, - data_tx: StreamWriter, - result_tx: FutureWriter>, + result: Option>>, + buffer: BytesMut, +} + +impl Drop for ReceiveStreamProducer { + fn drop(&mut self) { + self.close(Ok(())) + } } -impl AccessorTask> for ReceiveTask { - async fn run(self, store: &Accessor) -> wasmtime::Result<()> { - let mut buf = BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY); - let mut data_tx = GuardedStreamWriter::new(store, self.data_tx); - let result_tx = GuardedFutureWriter::new(store, self.result_tx); +impl ReceiveStreamProducer { + fn close(&mut self, res: Result<(), ErrorCode>) { + if let Some(tx) = self.result.take() { + _ = self + .stream + .as_socketlike_view::() + .shutdown(Shutdown::Read); + _ = tx.send(res); + } + } +} + +impl StreamProducer for ReceiveStreamProducer { + async fn produce( + &mut self, + store: &Accessor, + dst: &mut Destination, + ) -> wasmtime::Result { let res = loop { - match self.stream.try_read_buf(&mut buf) { - Ok(0) => { - break Ok(()); + match store.with(|mut store| { + if let Some(mut dst) = dst.as_guest_destination(store.as_context_mut()) { + let n = self.stream.try_read(dst.remaining())?; + if n > 0 { + dst.mark_written(n); + } + Ok(n) + } else { + self.buffer.reserve(DEFAULT_BUFFER_CAPACITY); + self.stream.try_read_buf(&mut self.buffer) } - Ok(..) => { - buf = data_tx.write_all(Cursor::new(buf)).await.into_inner(); - if data_tx.is_closed() { - break Ok(()); + }) { + Ok(0) => break Ok(()), + Ok(..) if self.buffer.is_empty() => return Ok(StreamState::Open), + Ok(n) => { + let mut buf = Cursor::new(mem::take(&mut self.buffer)); + while buf.position() as usize != n { + buf = dst.write(store, buf).await? } - buf.clear(); + self.buffer = buf.into_inner(); + self.buffer.clear(); } Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { - let Some(res) = ({ - let mut readable = pin!(self.stream.readable()); - let mut tx = pin!(data_tx.watch_reader()); - poll_fn(|cx| match tx.as_mut().poll(cx) { - Poll::Ready(()) => return Poll::Ready(None), - Poll::Pending => readable.as_mut().poll(cx).map(Some), - }) - .await - }) else { - break Ok(()); - }; - if let Err(err) = res { + if let Err(err) = self.stream.readable().await { break Err(err.into()); } } - Err(err) => { - break Err(err.into()); + Err(err) => break Err(err.into()), + } + }; + self.close(res); + Ok(StreamState::Closed) + } + + async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { + if let Err(err) = self.stream.readable().await { + self.close(Err(err.into())); + return Ok(StreamState::Closed); + } + Ok(StreamState::Open) + } +} + +struct SendStreamConsumer { + stream: Arc, + result: Option>>, + buffer: BytesMut, +} + +impl Drop for SendStreamConsumer { + fn drop(&mut self) { + self.close(Ok(())) + } +} + +impl SendStreamConsumer { + fn close(&mut self, res: Result<(), ErrorCode>) { + if let Some(tx) = self.result.take() { + _ = self + .stream + .as_socketlike_view::() + .shutdown(Shutdown::Write); + _ = tx.send(res); + } + } +} + +impl StreamConsumer for SendStreamConsumer { + async fn consume( + &mut self, + store: &Accessor, + src: &mut Source<'_, u8>, + ) -> wasmtime::Result { + let res = 'outer: loop { + match store.with(|mut store| { + let n = if let Some(mut src) = src.as_guest_source(store.as_context_mut()) { + let n = self.stream.try_write(src.remaining())?; + src.mark_read(n); + n + } else { + // NOTE: The implementation might want to use Linux SIOCOUTQ ioctl or similar construct + // on other platforms to only read `min(socket_capacity, src.remaining())` and prevent + // short writes + self.buffer.reserve(src.remaining(&mut store)); + if let Err(err) = src.read(&mut store, &mut self.buffer) { + return Ok(Err(err)); + } + self.stream.try_write(&self.buffer)? + }; + debug_assert!(n > 0); + std::io::Result::Ok(Ok(n)) + }) { + Ok(Ok(..)) if self.buffer.is_empty() => return Ok(StreamState::Open), + Ok(Ok(n)) => { + let mut buf = &self.buffer[n..]; + while !buf.is_empty() { + if let Err(err) = self.stream.writable().await { + break 'outer Err(err.into()); + } + match self.stream.try_write(buf) { + Ok(n) => buf = &buf[n..], + Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => continue, + Err(err) => break 'outer Err(err.into()), + } + } + self.buffer.clear(); } + Ok(Err(err)) => return Err(err), + Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { + if let Err(err) = self.stream.writable().await { + break 'outer Err(err.into()); + } + } + Err(err) => break 'outer Err(err.into()), } }; - _ = self - .stream - .as_socketlike_view::() - .shutdown(Shutdown::Read); - drop(self.stream); - drop(data_tx); - result_tx.write(res).await; - Ok(()) + self.close(res); + Ok(StreamState::Closed) + } + + async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { + if let Err(err) = self.stream.writable().await { + self.close(Err(err.into())); + return Ok(StreamState::Closed); + } + Ok(StreamState::Open) } } @@ -152,8 +274,8 @@ impl HostTcpSocketWithStore for WasiSockets { if !is_addr_allowed(store, local_address, SocketAddrUse::TcpBind).await { return Err(ErrorCode::AccessDenied.into()); } - store.with(|mut view| { - let socket = get_socket_mut(view.get().table, &socket)?; + store.with(|mut store| { + let socket = get_socket_mut(store.get().table, &socket)?; socket.start_bind(local_address)?; socket.finish_bind()?; Ok(()) @@ -169,16 +291,17 @@ impl HostTcpSocketWithStore for WasiSockets { if !is_addr_allowed(store, remote_address, SocketAddrUse::TcpConnect).await { return Err(ErrorCode::AccessDenied.into()); } - let sock = store.with(|mut view| -> SocketResult<_> { - let socket = get_socket_mut(view.get().table, &socket)?; - Ok(socket.start_connect(&remote_address)?) + let sock = store.with(|mut store| { + let socket = get_socket_mut(store.get().table, &socket)?; + let socket = socket.start_connect(&remote_address)?; + SocketResult::Ok(socket) })?; // FIXME: handle possible cancellation of the outer `connect` // https://github.com/bytecodealliance/wasmtime/pull/11291#discussion_r2223917986 let res = sock.connect(remote_address).await; - store.with(|mut view| -> SocketResult<_> { - let socket = get_socket_mut(view.get().table, &socket)?; + store.with(|mut store| { + let socket = get_socket_mut(store.get().table, &socket)?; socket.finish_connect(res)?; Ok(()) }) @@ -188,98 +311,88 @@ impl HostTcpSocketWithStore for WasiSockets { store: &Accessor, socket: Resource, ) -> SocketResult>> { - store.with(|mut view| { - let socket = get_socket_mut(view.get().table, &socket)?; + let instance = store.instance(); + let getter = store.getter(); + store.with(|mut store| { + let socket = get_socket_mut(store.get().table, &socket)?; socket.start_listen()?; socket.finish_listen()?; let listener = socket.tcp_listener_arc().unwrap().clone(); let family = socket.address_family(); let options = socket.non_inherited_options().clone(); - let (tx, rx) = view - .instance() - .stream(&mut view) - .context("failed to create stream") - .map_err(TrappableError::trap)?; - let task = ListenTask { - listener, - family, - tx, - options, - }; - view.spawn(task); - Ok(rx) + Ok(StreamReader::new( + instance, + &mut store, + ListenStreamProducer { + accepted: None, + listener, + family, + options, + getter, + }, + )) }) } async fn send( store: &Accessor, socket: Resource, - mut data: StreamReader, + data: StreamReader, ) -> SocketResult<()> { - let stream = store.with(|mut view| -> SocketResult<_> { - let sock = get_socket(view.get().table, &socket)?; + let (result_tx, result_rx) = oneshot::channel(); + store.with(|mut store| { + let sock = get_socket(store.get().table, &socket)?; let stream = sock.tcp_stream_arc()?; - Ok(Arc::clone(stream)) + let stream = Arc::clone(stream); + data.pipe( + store, + SendStreamConsumer { + stream, + result: Some(result_tx), + buffer: BytesMut::default(), + }, + ); + SocketResult::Ok(()) })?; - let mut buf = Vec::with_capacity(DEFAULT_BUFFER_CAPACITY); - let mut result = Ok(()); - while !data.is_closed() { - buf = data.read(store, buf).await; - let mut slice = buf.as_slice(); - while !slice.is_empty() { - match stream.try_write(&slice) { - Ok(n) => slice = &slice[n..], - Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { - if let Err(err) = stream.writable().await { - result = Err(ErrorCode::from(err).into()); - break; - } - } - Err(err) => { - result = Err(ErrorCode::from(err).into()); - break; - } - } - } - buf.clear(); - } - _ = stream - .as_socketlike_view::() - .shutdown(Shutdown::Write); - result + result_rx + .await + .context("oneshot sender dropped") + .map_err(SocketError::trap)??; + Ok(()) } async fn receive( store: &Accessor, socket: Resource, ) -> wasmtime::Result<(StreamReader, FutureReader>)> { - store.with(|mut view| { - let instance = view.instance(); - let (mut data_tx, data_rx) = instance - .stream(&mut view) - .context("failed to create stream")?; - let socket = get_socket_mut(view.get().table, &socket)?; + let instance = store.instance(); + store.with(|mut store| { + let socket = get_socket_mut(store.get().table, &socket)?; match socket.start_receive() { Some(stream) => { - let stream = stream.clone(); - let (result_tx, result_rx) = instance - .future(&mut view, || unreachable!()) - .context("failed to create future")?; - view.spawn(ReceiveTask { - stream, - data_tx, - result_tx, - }); - Ok((data_rx, result_rx)) - } - None => { - let (mut result_tx, result_rx) = instance - .future(&mut view, || Err(ErrorCode::InvalidState)) - .context("failed to create future")?; - result_tx.close(&mut view); - data_tx.close(&mut view); - Ok((data_rx, result_rx)) + let stream = Arc::clone(stream); + let (result_tx, result_rx) = oneshot::channel(); + Ok(( + StreamReader::new( + instance, + &mut store, + ReceiveStreamProducer { + stream, + result: Some(result_tx), + buffer: BytesMut::default(), + }, + ), + FutureReader::new(instance, &mut store, FutureOneshotProducer(result_rx)), + )) } + None => Ok(( + StreamReader::new(instance, &mut store, StreamEmptyProducer), + FutureReader::new( + instance, + &mut store, + FutureReadyProducer(Err(ErrorCode::InvalidState)), + ), + )), } }) } @@ -293,7 +406,7 @@ impl HostTcpSocket for WasiSocketsCtxView<'_> { .table .push(socket) .context("failed to push socket resource to table") - .map_err(TrappableError::trap)?; + .map_err(SocketError::trap)?; Ok(resource) } From cae75ffd25afc44c35b545ce01e92beeb29de184 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Wed, 27 Aug 2025 16:27:30 +0200 Subject: [PATCH 07/32] tests: read the socket stream until EOF Signed-off-by: Roman Volosatovs --- .../bin/p3_sockets_tcp_sample_application.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/crates/test-programs/src/bin/p3_sockets_tcp_sample_application.rs b/crates/test-programs/src/bin/p3_sockets_tcp_sample_application.rs index 83d51fbcf8ff..c2a3602144fe 100644 --- a/crates/test-programs/src/bin/p3_sockets_tcp_sample_application.rs +++ b/crates/test-programs/src/bin/p3_sockets_tcp_sample_application.rs @@ -44,11 +44,14 @@ async fn test_tcp_sample_application(family: IpAddressFamily, bind_address: IpSo let (mut data_rx, fut) = sock.receive(); let (result, data) = data_rx.read(Vec::with_capacity(100)).await; assert_eq!(result, StreamResult::Complete(first_message.len())); - // Check that we sent and received our message! assert_eq!(data, first_message); // Not guaranteed to work but should work in practice. - drop(data_rx); - fut.await.unwrap() + + let (result, data) = data_rx.read(Vec::with_capacity(1)).await; + assert_eq!(result, StreamResult::Dropped); + assert_eq!(data, []); + + fut.await.unwrap(); }, ); @@ -74,11 +77,14 @@ async fn test_tcp_sample_application(family: IpAddressFamily, bind_address: IpSo let (mut data_rx, fut) = sock.receive(); let (result, data) = data_rx.read(Vec::with_capacity(100)).await; assert_eq!(result, StreamResult::Complete(second_message.len())); - // Check that we sent and received our message! assert_eq!(data, second_message); // Not guaranteed to work but should work in practice. - drop(data_rx); - fut.await.unwrap() + + let (result, data) = data_rx.read(Vec::with_capacity(1)).await; + assert_eq!(result, StreamResult::Dropped); + assert_eq!(data, []); + + fut.await.unwrap(); } ); } From 99397e7a9d9213fea91c67432b3198154217fb93 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 28 Aug 2025 11:43:40 +0200 Subject: [PATCH 08/32] p3-sockets: account for cancellation Signed-off-by: Roman Volosatovs --- crates/wasi/src/p3/sockets/host/types/tcp.rs | 61 +++++++++++++++----- 1 file changed, 45 insertions(+), 16 deletions(-) diff --git a/crates/wasi/src/p3/sockets/host/types/tcp.rs b/crates/wasi/src/p3/sockets/host/types/tcp.rs index 0a4f3c803dca..a1ca12865bf7 100644 --- a/crates/wasi/src/p3/sockets/host/types/tcp.rs +++ b/crates/wasi/src/p3/sockets/host/types/tcp.rs @@ -3,8 +3,7 @@ use crate::p3::bindings::sockets::types::{ Duration, ErrorCode, HostTcpSocket, HostTcpSocketWithStore, IpAddressFamily, IpSocketAddress, TcpSocket, }; -use crate::p3::sockets::SocketError; -use crate::p3::sockets::{SocketResult, WasiSockets}; +use crate::p3::sockets::{SocketError, SocketResult, WasiSockets}; use crate::p3::{ DEFAULT_BUFFER_CAPACITY, FutureOneshotProducer, FutureReadyProducer, StreamEmptyProducer, }; @@ -82,6 +81,7 @@ where .push(socket) .context("failed to push socket resource to table") })?; + // FIXME: Handle cancellation if let Some(socket) = dst.write(&store, Some(socket)).await? { store.with(|mut store| { store @@ -107,7 +107,7 @@ where struct ReceiveStreamProducer { stream: Arc, result: Option>>, - buffer: BytesMut, + buffer: Cursor, } impl Drop for ReceiveStreamProducer { @@ -134,6 +134,30 @@ impl StreamProducer for ReceiveStreamProducer { store: &Accessor, dst: &mut Destination, ) -> wasmtime::Result { + if !self.buffer.get_ref().is_empty() { + if !store.with(|mut store| { + dst.as_guest_destination(store.as_context_mut()) + .map(|mut dst| { + let start = self.buffer.position() as _; + let buffered = self.buffer.get_ref().len().saturating_sub(start); + let n = dst.remaining().len().min(buffered); + debug_assert!(n > 0); + let end = start.saturating_add(n); + dst.remaining()[..n].copy_from_slice(&self.buffer.get_ref()[start..end]); + dst.mark_written(n); + self.buffer.set_position(end as _); + }) + .is_some() + }) { + // FIXME: Handle cancellation + self.buffer = dst.write(store, mem::take(&mut self.buffer)).await?; + } + if self.buffer.position() as usize == self.buffer.get_ref().len() { + self.buffer.get_mut().clear(); + self.buffer.set_position(0); + } + return Ok(StreamState::Open); + } let res = loop { match store.with(|mut store| { if let Some(mut dst) = dst.as_guest_destination(store.as_context_mut()) { @@ -143,19 +167,21 @@ impl StreamProducer for ReceiveStreamProducer { } Ok(n) } else { - self.buffer.reserve(DEFAULT_BUFFER_CAPACITY); - self.stream.try_read_buf(&mut self.buffer) + self.buffer.get_mut().reserve(DEFAULT_BUFFER_CAPACITY); + self.stream.try_read_buf(self.buffer.get_mut()) } }) { Ok(0) => break Ok(()), - Ok(..) if self.buffer.is_empty() => return Ok(StreamState::Open), - Ok(n) => { - let mut buf = Cursor::new(mem::take(&mut self.buffer)); - while buf.position() as usize != n { - buf = dst.write(store, buf).await? + Ok(..) => { + if !self.buffer.get_ref().is_empty() { + // FIXME: Handle cancellation + self.buffer = dst.write(store, mem::take(&mut self.buffer)).await?; + if self.buffer.position() as usize == self.buffer.get_ref().len() { + self.buffer.get_mut().clear(); + self.buffer.set_position(0); + } } - self.buffer = buf.into_inner(); - self.buffer.clear(); + return Ok(StreamState::Open); } Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { if let Err(err) = self.stream.readable().await { @@ -170,9 +196,11 @@ impl StreamProducer for ReceiveStreamProducer { } async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { - if let Err(err) = self.stream.readable().await { - self.close(Err(err.into())); - return Ok(StreamState::Closed); + if self.buffer.get_ref().is_empty() { + if let Err(err) = self.stream.readable().await { + self.close(Err(err.into())); + return Ok(StreamState::Closed); + } } Ok(StreamState::Open) } @@ -231,6 +259,7 @@ impl StreamConsumer for SendStreamConsumer { Ok(Ok(n)) => { let mut buf = &self.buffer[n..]; while !buf.is_empty() { + // FIXME: Handle cancellation if let Err(err) = self.stream.writable().await { break 'outer Err(err.into()); } @@ -379,7 +408,7 @@ impl HostTcpSocketWithStore for WasiSockets { ReceiveStreamProducer { stream, result: Some(result_tx), - buffer: BytesMut::default(), + buffer: Cursor::default(), }, ), FutureReader::new(instance, &mut store, FutureOneshotProducer(result_rx)), From 7a0b96fc8cb6c2f1dddc19e2cda3b546fc5d03e6 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 28 Aug 2025 13:48:15 +0200 Subject: [PATCH 09/32] p3-sockets: mostly ensure byte buffer cancellation-safety Signed-off-by: Roman Volosatovs --- crates/wasi/src/p3/mod.rs | 34 ++++++++++++++ crates/wasi/src/p3/sockets/host/types/tcp.rs | 48 +++++++------------- 2 files changed, 50 insertions(+), 32 deletions(-) diff --git a/crates/wasi/src/p3/mod.rs b/crates/wasi/src/p3/mod.rs index e24641a86dd3..3013ea08fe21 100644 --- a/crates/wasi/src/p3/mod.rs +++ b/crates/wasi/src/p3/mod.rs @@ -18,7 +18,10 @@ pub mod sockets; use crate::WasiView; use crate::p3::bindings::LinkOptions; use anyhow::Context as _; +use bytes::BytesMut; +use std::io::Cursor; use tokio::sync::oneshot; +use wasmtime::AsContextMut as _; use wasmtime::component::{ Accessor, Destination, FutureProducer, Linker, StreamProducer, StreamState, }; @@ -64,6 +67,37 @@ where } } +async fn write_buffered_bytes( + store: &Accessor, + src: &mut Cursor, + dst: &mut Destination, +) -> wasmtime::Result<()> { + if !store.with(|mut store| { + dst.as_guest_destination(store.as_context_mut()) + .map(|mut dst| { + let start = src.position() as _; + let buffered = src.get_ref().len().saturating_sub(start); + let n = dst.remaining().len().min(buffered); + debug_assert!(n > 0); + let end = start.saturating_add(n); + dst.remaining()[..n].copy_from_slice(&src.get_ref()[start..end]); + dst.mark_written(n); + src.set_position(end as _); + }) + .is_some() + }) { + // FIXME: `mem::take` rather than `clone` when we can ensure cancellation-safety + //let buf = mem::take(src); + let buf = src.clone(); + *src = dst.write(store, buf).await?; + } + if src.position() as usize == src.get_ref().len() { + src.get_mut().clear(); + src.set_position(0); + } + Ok(()) +} + /// Add all WASI interfaces from this module into the `linker` provided. /// /// This function will add all interfaces implemented by this module to the diff --git a/crates/wasi/src/p3/sockets/host/types/tcp.rs b/crates/wasi/src/p3/sockets/host/types/tcp.rs index a1ca12865bf7..3cc9fe5db1d9 100644 --- a/crates/wasi/src/p3/sockets/host/types/tcp.rs +++ b/crates/wasi/src/p3/sockets/host/types/tcp.rs @@ -6,6 +6,7 @@ use crate::p3::bindings::sockets::types::{ use crate::p3::sockets::{SocketError, SocketResult, WasiSockets}; use crate::p3::{ DEFAULT_BUFFER_CAPACITY, FutureOneshotProducer, FutureReadyProducer, StreamEmptyProducer, + write_buffered_bytes, }; use crate::sockets::{NonInheritedOptions, SocketAddrUse, SocketAddressFamily, WasiSocketsCtxView}; use anyhow::Context; @@ -135,30 +136,11 @@ impl StreamProducer for ReceiveStreamProducer { dst: &mut Destination, ) -> wasmtime::Result { if !self.buffer.get_ref().is_empty() { - if !store.with(|mut store| { - dst.as_guest_destination(store.as_context_mut()) - .map(|mut dst| { - let start = self.buffer.position() as _; - let buffered = self.buffer.get_ref().len().saturating_sub(start); - let n = dst.remaining().len().min(buffered); - debug_assert!(n > 0); - let end = start.saturating_add(n); - dst.remaining()[..n].copy_from_slice(&self.buffer.get_ref()[start..end]); - dst.mark_written(n); - self.buffer.set_position(end as _); - }) - .is_some() - }) { - // FIXME: Handle cancellation - self.buffer = dst.write(store, mem::take(&mut self.buffer)).await?; - } - if self.buffer.position() as usize == self.buffer.get_ref().len() { - self.buffer.get_mut().clear(); - self.buffer.set_position(0); - } + write_buffered_bytes(store, &mut self.buffer, dst).await?; return Ok(StreamState::Open); } - let res = loop { + + let res = 'result: loop { match store.with(|mut store| { if let Some(mut dst) = dst.as_guest_destination(store.as_context_mut()) { let n = self.stream.try_read(dst.remaining())?; @@ -171,11 +153,13 @@ impl StreamProducer for ReceiveStreamProducer { self.stream.try_read_buf(self.buffer.get_mut()) } }) { - Ok(0) => break Ok(()), + Ok(0) => break 'result Ok(()), Ok(..) => { if !self.buffer.get_ref().is_empty() { - // FIXME: Handle cancellation - self.buffer = dst.write(store, mem::take(&mut self.buffer)).await?; + // FIXME: `mem::take` rather than `clone` when we can ensure cancellation-safety + //let buf = mem::take(&mut self.buffer); + let buf = self.buffer.clone(); + self.buffer = dst.write(store, buf).await?; if self.buffer.position() as usize == self.buffer.get_ref().len() { self.buffer.get_mut().clear(); self.buffer.set_position(0); @@ -185,10 +169,10 @@ impl StreamProducer for ReceiveStreamProducer { } Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { if let Err(err) = self.stream.readable().await { - break Err(err.into()); + break 'result Err(err.into()); } } - Err(err) => break Err(err.into()), + Err(err) => break 'result Err(err.into()), } }; self.close(res); @@ -236,7 +220,7 @@ impl StreamConsumer for SendStreamConsumer { store: &Accessor, src: &mut Source<'_, u8>, ) -> wasmtime::Result { - let res = 'outer: loop { + let res = 'result: loop { match store.with(|mut store| { let n = if let Some(mut src) = src.as_guest_source(store.as_context_mut()) { let n = self.stream.try_write(src.remaining())?; @@ -261,12 +245,12 @@ impl StreamConsumer for SendStreamConsumer { while !buf.is_empty() { // FIXME: Handle cancellation if let Err(err) = self.stream.writable().await { - break 'outer Err(err.into()); + break 'result Err(err.into()); } match self.stream.try_write(buf) { Ok(n) => buf = &buf[n..], Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => continue, - Err(err) => break 'outer Err(err.into()), + Err(err) => break 'result Err(err.into()), } } self.buffer.clear(); @@ -274,10 +258,10 @@ impl StreamConsumer for SendStreamConsumer { Ok(Err(err)) => return Err(err), Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { if let Err(err) = self.stream.writable().await { - break 'outer Err(err.into()); + break 'result Err(err.into()); } } - Err(err) => break 'outer Err(err.into()), + Err(err) => break 'result Err(err.into()), } }; self.close(res); From efdf6d354736567a399f614f46eb7e5eeb8530b3 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 28 Aug 2025 13:48:51 +0200 Subject: [PATCH 10/32] p3-filesystem: switch to new API Signed-off-by: Roman Volosatovs --- crates/wasi/src/p3/filesystem/host.rs | 550 +++++++++++++++++--------- 1 file changed, 369 insertions(+), 181 deletions(-) diff --git a/crates/wasi/src/p3/filesystem/host.rs b/crates/wasi/src/p3/filesystem/host.rs index 34e8304ec0d6..9d48a3026594 100644 --- a/crates/wasi/src/p3/filesystem/host.rs +++ b/crates/wasi/src/p3/filesystem/host.rs @@ -1,4 +1,3 @@ -use crate::DirPerms; use crate::filesystem::{Descriptor, Dir, File, WasiFilesystem, WasiFilesystemCtxView}; use crate::p3::DEFAULT_BUFFER_CAPACITY; use crate::p3::bindings::clocks::wall_clock; @@ -7,14 +6,19 @@ use crate::p3::bindings::filesystem::types::{ Filesize, MetadataHashValue, NewTimestamp, OpenFlags, PathFlags, }; use crate::p3::filesystem::{FilesystemError, FilesystemResult, preopens}; -use crate::{FilePerms, TrappableError}; -use anyhow::Context as _; +use crate::p3::{ + FutureOneshotProducer, FutureReadyProducer, StreamEmptyProducer, write_buffered_bytes, +}; +use crate::{DirPerms, FilePerms}; +use anyhow::{Context as _, bail}; use bytes::BytesMut; +use core::mem; use std::io::Cursor; use system_interface::fs::FileIoExt as _; +use tokio::sync::oneshot; use wasmtime::component::{ - Accessor, AccessorTask, FutureReader, FutureWriter, GuardedFutureWriter, GuardedStreamWriter, - Resource, ResourceTable, StreamReader, StreamWriter, + Accessor, Destination, FutureReader, Resource, ResourceTable, Source, StreamConsumer, + StreamProducer, StreamReader, StreamState, }; fn get_descriptor<'a>( @@ -24,7 +28,7 @@ fn get_descriptor<'a>( table .get(fd) .context("failed to get descriptor resource from table") - .map_err(TrappableError::trap) + .map_err(FilesystemError::trap) } fn get_file<'a>( @@ -108,20 +112,45 @@ fn systemtimespec_from(t: NewTimestamp) -> Result, - result_tx: FutureWriter>, + result: Option>>, + buffer: Cursor, } -impl AccessorTask> for ReadFileTask { - async fn run(self, store: &Accessor) -> wasmtime::Result<()> { - let mut buf = BytesMut::zeroed(DEFAULT_BUFFER_CAPACITY); - let mut offset = self.offset; - let mut data_tx = GuardedStreamWriter::new(store, self.data_tx); - let result_tx = GuardedFutureWriter::new(store, self.result_tx); - let res = loop { +impl Drop for ReadStreamProducer { + fn drop(&mut self) { + self.close(Ok(())) + } +} + +impl ReadStreamProducer { + fn close(&mut self, res: Result<(), ErrorCode>) { + if let Some(tx) = self.result.take() { + _ = tx.send(res); + } + } +} + +impl StreamProducer for ReadStreamProducer { + async fn produce( + &mut self, + store: &Accessor, + dst: &mut Destination, + ) -> wasmtime::Result { + if !self.buffer.get_ref().is_empty() { + write_buffered_bytes(store, &mut self.buffer, dst).await?; + return Ok(StreamState::Open); + } + + let n = store + .with(|store| dst.remaining(store)) + .unwrap_or(DEFAULT_BUFFER_CAPACITY); + let mut buf = mem::take(&mut self.buffer).into_inner(); + buf.resize(n, 0); + let offset = self.offset; + let res = 'result: { match self .file .run_blocking(move |file| { @@ -131,102 +160,247 @@ impl AccessorTask> for ReadFileTask { }) .await { - Ok(chunk) if chunk.is_empty() => { - break Ok(()); - } - Ok(chunk) => { - let Ok(n) = chunk.len().try_into() else { - break Err(ErrorCode::Overflow); + Ok(buf) if buf.is_empty() => break 'result Ok(()), + Ok(buf) => { + let Ok(n) = buf.len().try_into() else { + break 'result Err(ErrorCode::Overflow); }; let Some(n) = offset.checked_add(n) else { - break Err(ErrorCode::Overflow); + break 'result Err(ErrorCode::Overflow); }; - offset = n; - buf = data_tx.write_all(Cursor::new(chunk)).await.into_inner(); - if data_tx.is_closed() { - break Ok(()); - } - buf.resize(DEFAULT_BUFFER_CAPACITY, 0); - } - Err(err) => { - break Err(err.into()); + self.offset = n; + self.buffer = Cursor::new(buf); + write_buffered_bytes(store, &mut self.buffer, dst).await?; + return Ok(StreamState::Open); } + Err(err) => break 'result Err(err.into()), } }; - drop(self.file); - drop(data_tx); - result_tx.write(res).await; - Ok(()) + self.close(res); + Ok(StreamState::Closed) + } + + async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { + Ok(StreamState::Open) } } -struct ReadDirectoryTask { +struct DirectoryStreamProducer { dir: Dir, - data_tx: StreamWriter, - result_tx: FutureWriter>, + entries: Option, + result: Option>>, } -impl AccessorTask> for ReadDirectoryTask { - async fn run(self, store: &Accessor) -> wasmtime::Result<()> { - let mut data_tx = GuardedStreamWriter::new(store, self.data_tx); - let result_tx = GuardedFutureWriter::new(store, self.result_tx); - let res = loop { - let mut entries = match self.dir.run_blocking(cap_std::fs::Dir::entries).await { - Ok(entries) => entries, - Err(err) => break Err(err.into()), +impl DirectoryStreamProducer { + fn close(&mut self, res: Result<(), ErrorCode>) { + if let Some(tx) = self.result.take() { + _ = tx.send(res); + } + } +} + +impl StreamProducer for DirectoryStreamProducer { + async fn produce( + &mut self, + store: &Accessor, + dst: &mut Destination, + ) -> wasmtime::Result { + let res = 'result: loop { + let mut entries = if let Some(entries) = self.entries.take() { + entries + } else { + // FIXME: Handle cancellation + match self.dir.run_blocking(cap_std::fs::Dir::entries).await { + Ok(entries) => entries, + Err(err) => break 'result Err(err.into()), + } + }; + // FIXME: Handle cancellation + let Some((res, tail)) = self + .dir + .run_blocking(move |_| entries.next().map(|entry| (entry, entries))) + .await + else { + break 'result Ok(()); }; - if let Err(err) = loop { - let Some((res, tail)) = self - .dir - .run_blocking(move |_| entries.next().map(|entry| (entry, entries))) - .await - else { - break Ok(()); - }; - entries = tail; - let entry = match res { - Ok(entry) => entry, - Err(err) => { - // On windows, filter out files like `C:\DumpStack.log.tmp` which we - // can't get full metadata for. - #[cfg(windows)] + self.entries = Some(tail); + let entry = match res { + Ok(entry) => entry, + Err(err) => { + // On windows, filter out files like `C:\DumpStack.log.tmp` which we + // can't get full metadata for. + #[cfg(windows)] + { + use windows_sys::Win32::Foundation::{ + ERROR_ACCESS_DENIED, ERROR_SHARING_VIOLATION, + }; + if err.raw_os_error() == Some(ERROR_SHARING_VIOLATION as i32) + || err.raw_os_error() == Some(ERROR_ACCESS_DENIED as i32) { - use windows_sys::Win32::Foundation::{ - ERROR_ACCESS_DENIED, ERROR_SHARING_VIOLATION, - }; - if err.raw_os_error() == Some(ERROR_SHARING_VIOLATION as i32) - || err.raw_os_error() == Some(ERROR_ACCESS_DENIED as i32) - { - continue; - } + continue; } - break Err(err.into()); } - }; - let meta = match entry.metadata() { - Ok(meta) => meta, - Err(err) => break Err(err.into()), - }; - let Ok(name) = entry.file_name().into_string() else { - break Err(ErrorCode::IllegalByteSequence); - }; - data_tx - .write(Some(DirectoryEntry { + break 'result Err(err.into()); + } + }; + let meta = match entry.metadata() { + Ok(meta) => meta, + Err(err) => break 'result Err(err.into()), + }; + let Ok(name) = entry.file_name().into_string() else { + break 'result Err(ErrorCode::IllegalByteSequence); + }; + // FIXME: Handle cancellation + if let Some(_) = dst + .write( + store, + Some(DirectoryEntry { type_: meta.file_type().into(), name, - })) - .await; - if data_tx.is_closed() { - break Ok(()); + }), + ) + .await? + { + bail!("failed to write entry") + } + return Ok(StreamState::Open); + }; + self.close(res); + Ok(StreamState::Closed) + } + + async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { + Ok(StreamState::Open) + } +} + +struct WriteStreamConsumer { + file: File, + offset: u64, + result: Option>>, + buffer: BytesMut, +} + +impl Drop for WriteStreamConsumer { + fn drop(&mut self) { + self.close(Ok(())) + } +} + +impl WriteStreamConsumer { + fn close(&mut self, res: Result<(), ErrorCode>) { + if let Some(tx) = self.result.take() { + _ = tx.send(res); + } + } +} + +impl StreamConsumer for WriteStreamConsumer { + async fn consume( + &mut self, + store: &Accessor, + src: &mut Source<'_, u8>, + ) -> wasmtime::Result { + let res = 'result: loop { + store.with(|mut store| { + self.buffer.reserve(src.remaining(&mut store)); + src.read(&mut store, &mut self.buffer) + })?; + // FIXME: `mem::take` rather than `clone` when we can ensure cancellation-safety + //let buf = mem::take(&mut self.buffer); + let buf = self.buffer.clone(); + let mut offset = self.offset; + match self + .file + .spawn_blocking(move |file| { + let mut pos = 0; + while pos != buf.len() { + let n = file.write_at(&buf[pos..], offset)?; + pos = pos.saturating_add(n); + let n = n.try_into().or(Err(ErrorCode::Overflow))?; + offset = offset.checked_add(n).ok_or(ErrorCode::Overflow)?; + } + Ok((buf, offset)) + }) + .await + { + Ok((buf, offset)) => { + self.buffer = buf; + self.buffer.clear(); + self.offset = offset; + return Ok(StreamState::Open); } - } { - break Err(err); - }; + Err(err) => break 'result Err(err), + } }; - drop(self.dir); - drop(data_tx); - result_tx.write(res).await; - Ok(()) + self.close(res); + Ok(StreamState::Closed) + } + + async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { + Ok(StreamState::Open) + } +} + +struct AppendStreamConsumer { + file: File, + result: Option>>, + buffer: BytesMut, +} + +impl Drop for AppendStreamConsumer { + fn drop(&mut self) { + self.close(Ok(())) + } +} + +impl AppendStreamConsumer { + fn close(&mut self, res: Result<(), ErrorCode>) { + if let Some(tx) = self.result.take() { + _ = tx.send(res); + } + } +} + +impl StreamConsumer for AppendStreamConsumer { + async fn consume( + &mut self, + store: &Accessor, + src: &mut Source<'_, u8>, + ) -> wasmtime::Result { + let res = 'result: loop { + store.with(|mut store| { + self.buffer.reserve(src.remaining(&mut store)); + src.read(&mut store, &mut self.buffer) + })?; + let buf = mem::take(&mut self.buffer); + // FIXME: Handle cancellation + match self + .file + .spawn_blocking(move |file| { + let mut pos = 0; + while pos != buf.len() { + let n = file.append(&buf[pos..])?; + pos = pos.saturating_add(n); + } + Ok(buf) + }) + .await + { + Ok(buf) => { + self.buffer = buf; + self.buffer.clear(); + return Ok(StreamState::Open); + } + Err(err) => break 'result Err(err), + } + }; + self.close(res); + Ok(StreamState::Closed) + } + + async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { + Ok(StreamState::Open) } } @@ -242,87 +416,95 @@ impl types::HostDescriptorWithStore for WasiFilesystem { fd: Resource, offset: Filesize, ) -> wasmtime::Result<(StreamReader, FutureReader>)> { - let (file, (data_tx, data_rx), (result_tx, result_rx)) = store.with(|mut store| { - let file = get_file(store.get().table, &fd).cloned()?; - let instance = store.instance(); - let data = instance - .stream(&mut store) - .context("failed to create stream")?; - let result = if !file.perms.contains(FilePerms::READ) { - instance.future(&mut store, || Err(types::ErrorCode::NotPermitted)) - } else { - instance.future(&mut store, || unreachable!()) + let instance = store.instance(); + store.with(|mut store| { + let file = get_file(store.get().table, &fd)?; + if !file.perms.contains(FilePerms::READ) { + return Ok(( + StreamReader::new(instance, &mut store, StreamEmptyProducer), + FutureReader::new( + instance, + &mut store, + FutureReadyProducer(Err(ErrorCode::NotPermitted)), + ), + )); } - .context("failed to create future")?; - anyhow::Ok((file, data, result)) - })?; - if !file.perms.contains(FilePerms::READ) { - return Ok((data_rx, result_rx)); - } - store.spawn(ReadFileTask { - file, - offset, - data_tx, - result_tx, - }); - Ok((data_rx, result_rx)) + + let file = file.clone(); + let (result_tx, result_rx) = oneshot::channel(); + Ok(( + StreamReader::new( + instance, + &mut store, + ReadStreamProducer { + file, + offset, + result: Some(result_tx), + buffer: Cursor::default(), + }, + ), + FutureReader::new(instance, &mut store, FutureOneshotProducer(result_rx)), + )) + }) } async fn write_via_stream( store: &Accessor, fd: Resource, - mut data: StreamReader, - mut offset: Filesize, + data: StreamReader, + offset: Filesize, ) -> FilesystemResult<()> { - let file = store.get_file(&fd)?; - if !file.perms.contains(FilePerms::WRITE) { - return Err(types::ErrorCode::NotPermitted.into()); - } - let mut buf = Vec::with_capacity(DEFAULT_BUFFER_CAPACITY); - while !data.is_closed() { - buf = data.read(store, buf).await; - buf = file - .spawn_blocking(move |file| { - let mut pos = 0; - while pos != buf.len() { - let n = file.write_at(&buf[pos..], offset)?; - pos = pos.saturating_add(n); - let n = n.try_into().or(Err(ErrorCode::Overflow))?; - offset = offset.checked_add(n).ok_or(ErrorCode::Overflow)?; - } - FilesystemResult::Ok(buf) - }) - .await?; - offset = offset.saturating_add(buf.len() as _); - buf.clear(); - } + let (result_tx, result_rx) = oneshot::channel(); + store.with(|mut store| { + let file = get_file(store.get().table, &fd)?; + if !file.perms.contains(FilePerms::WRITE) { + return Err(ErrorCode::NotPermitted.into()); + } + let file = file.clone(); + data.pipe( + store, + WriteStreamConsumer { + file, + offset, + result: Some(result_tx), + buffer: BytesMut::default(), + }, + ); + FilesystemResult::Ok(()) + })?; + result_rx + .await + .context("oneshot sender dropped") + .map_err(FilesystemError::trap)??; Ok(()) } async fn append_via_stream( store: &Accessor, fd: Resource, - mut data: StreamReader, + data: StreamReader, ) -> FilesystemResult<()> { - let file = store.get_file(&fd)?; - if !file.perms.contains(FilePerms::WRITE) { - return Err(types::ErrorCode::NotPermitted.into()); - } - let mut buf = Vec::with_capacity(DEFAULT_BUFFER_CAPACITY); - while !data.is_closed() { - buf = data.read(store, buf).await; - buf = file - .spawn_blocking(move |file| { - let mut pos = 0; - while pos != buf.len() { - let n = file.append(&buf[pos..])?; - pos = pos.saturating_add(n); - } - FilesystemResult::Ok(buf) - }) - .await?; - buf.clear(); - } + let (result_tx, result_rx) = oneshot::channel(); + store.with(|mut store| { + let file = get_file(store.get().table, &fd)?; + if !file.perms.contains(FilePerms::WRITE) { + return Err(ErrorCode::NotPermitted.into()); + } + let file = file.clone(); + data.pipe( + store, + AppendStreamConsumer { + file, + result: Some(result_tx), + buffer: BytesMut::default(), + }, + ); + FilesystemResult::Ok(()) + })?; + result_rx + .await + .context("oneshot sender dropped") + .map_err(FilesystemError::trap)??; Ok(()) } @@ -395,29 +577,35 @@ impl types::HostDescriptorWithStore for WasiFilesystem { StreamReader, FutureReader>, )> { - let (dir, (data_tx, data_rx), (result_tx, result_rx)) = store.with(|mut store| { - let dir = get_dir(store.get().table, &fd).cloned()?; - let instance = store.instance(); - let data = instance - .stream(&mut store) - .context("failed to create stream")?; - let result = if !dir.perms.contains(DirPerms::READ) { - instance.future(&mut store, || Err(types::ErrorCode::NotPermitted)) - } else { - instance.future(&mut store, || unreachable!()) + let instance = store.instance(); + store.with(|mut store| { + let dir = get_dir(store.get().table, &fd)?; + if !dir.perms.contains(DirPerms::READ) { + return Ok(( + StreamReader::new(instance, &mut store, StreamEmptyProducer), + FutureReader::new( + instance, + &mut store, + FutureReadyProducer(Err(ErrorCode::NotPermitted)), + ), + )); } - .context("failed to create future")?; - anyhow::Ok((dir, data, result)) - })?; - if !dir.perms.contains(DirPerms::READ) { - return Ok((data_rx, result_rx)); - } - store.spawn(ReadDirectoryTask { - dir, - data_tx, - result_tx, - }); - Ok((data_rx, result_rx)) + + let dir = dir.clone(); + let (result_tx, result_rx) = oneshot::channel(); + Ok(( + StreamReader::new( + instance, + &mut store, + DirectoryStreamProducer { + dir, + entries: None, + result: Some(result_tx), + }, + ), + FutureReader::new(instance, &mut store, FutureOneshotProducer(result_rx)), + )) + }) } async fn sync(store: &Accessor, fd: Resource) -> FilesystemResult<()> { From 014adb11dece4d55087dd7c1ea9f7b7c902b74a2 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 28 Aug 2025 13:53:33 +0200 Subject: [PATCH 11/32] fixup! p3-sockets: mostly ensure byte buffer cancellation-safety --- crates/wasi/src/p3/sockets/host/types/tcp.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/wasi/src/p3/sockets/host/types/tcp.rs b/crates/wasi/src/p3/sockets/host/types/tcp.rs index 3cc9fe5db1d9..4387faf2fcdf 100644 --- a/crates/wasi/src/p3/sockets/host/types/tcp.rs +++ b/crates/wasi/src/p3/sockets/host/types/tcp.rs @@ -11,7 +11,6 @@ use crate::p3::{ use crate::sockets::{NonInheritedOptions, SocketAddrUse, SocketAddressFamily, WasiSocketsCtxView}; use anyhow::Context; use bytes::BytesMut; -use core::mem; use io_lifetimes::AsSocketlike as _; use std::io::Cursor; use std::net::{Shutdown, SocketAddr}; From 368a1e4f409c250ed6996770ed6e02f760ce31d7 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 28 Aug 2025 14:17:39 +0200 Subject: [PATCH 12/32] p3-cli: switch to new API Signed-off-by: Roman Volosatovs --- crates/wasi/src/p3/cli/host.rs | 157 +++++++++++++++++++-------------- crates/wasi/src/p3/mod.rs | 3 + 2 files changed, 94 insertions(+), 66 deletions(-) diff --git a/crates/wasi/src/p3/cli/host.rs b/crates/wasi/src/p3/cli/host.rs index c86ef58ad615..cd3d49b5c4ce 100644 --- a/crates/wasi/src/p3/cli/host.rs +++ b/crates/wasi/src/p3/cli/host.rs @@ -1,77 +1,96 @@ use crate::I32Exit; use crate::cli::{IsTerminal, WasiCli, WasiCliCtxView}; -use crate::p3::DEFAULT_BUFFER_CAPACITY; use crate::p3::bindings::cli::{ environment, exit, stderr, stdin, stdout, terminal_input, terminal_output, terminal_stderr, terminal_stdin, terminal_stdout, }; use crate::p3::cli::{TerminalInput, TerminalOutput}; +use crate::p3::write_buffered_bytes; +use crate::p3::{DEFAULT_BUFFER_CAPACITY, MAX_BUFFER_CAPACITY}; use anyhow::{Context as _, anyhow}; use bytes::BytesMut; use std::io::Cursor; use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _}; use wasmtime::component::{ - Accessor, AccessorTask, GuardedStreamReader, GuardedStreamWriter, HasData, Resource, - StreamReader, StreamWriter, + Accessor, Destination, Resource, Source, StreamConsumer, StreamProducer, StreamReader, + StreamState, }; -struct InputTask { +struct InputStreamProducer { rx: T, - tx: StreamWriter, + buffer: Cursor, } -impl AccessorTask> for InputTask +impl StreamProducer for InputStreamProducer where - U: HasData, - V: AsyncRead + Send + Sync + Unpin + 'static, + T: AsyncRead + Send + Unpin + 'static, { - async fn run(mut self, store: &Accessor) -> wasmtime::Result<()> { - let mut buf = BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY); - let mut tx = GuardedStreamWriter::new(store, self.tx); - while !tx.is_closed() { - match self.rx.read_buf(&mut buf).await { - Ok(0) => return Ok(()), - Ok(_) => { - buf = tx.write_all(Cursor::new(buf)).await.into_inner(); - buf.clear(); - } - Err(_err) => { - // TODO: Report the error to the guest - return Ok(()); - } + async fn produce( + &mut self, + store: &Accessor, + dst: &mut Destination, + ) -> wasmtime::Result { + if !self.buffer.get_ref().is_empty() { + write_buffered_bytes(store, &mut self.buffer, dst).await?; + return Ok(StreamState::Open); + } + + let n = store + .with(|store| dst.remaining(store)) + .unwrap_or(DEFAULT_BUFFER_CAPACITY) + .min(MAX_BUFFER_CAPACITY); + self.buffer.get_mut().reserve(n); + match self.rx.read_buf(self.buffer.get_mut()).await { + Ok(0) => Ok(StreamState::Closed), + Ok(_) => { + write_buffered_bytes(store, &mut self.buffer, dst).await?; + Ok(StreamState::Open) + } + Err(_err) => { + // TODO: Report the error to the guest + Ok(StreamState::Closed) } } - Ok(()) + } + + async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { + Ok(StreamState::Open) } } -struct OutputTask { - rx: StreamReader, +struct OutputStreamConsumer { tx: T, + buffer: BytesMut, } -impl AccessorTask> for OutputTask +impl StreamConsumer for OutputStreamConsumer where - U: HasData, - V: AsyncWrite + Send + Sync + Unpin + 'static, + T: AsyncWrite + Send + Unpin + 'static, { - async fn run(mut self, store: &Accessor) -> wasmtime::Result<()> { - let mut buf = BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY); - let mut rx = GuardedStreamReader::new(store, self.rx); - while !rx.is_closed() { - buf = rx.read(buf).await; - match self.tx.write_all(&buf).await { - Ok(()) => { - buf.clear(); - continue; - } - Err(_err) => { - // TODO: Report the error to the guest - return Ok(()); - } + async fn consume( + &mut self, + store: &Accessor, + src: &mut Source<'_, u8>, + ) -> wasmtime::Result { + store.with(|mut store| { + let n = src.remaining(&mut store).min(MAX_BUFFER_CAPACITY); + self.buffer.reserve(n); + src.read(&mut store, &mut self.buffer) + })?; + match self.tx.write_all(&self.buffer).await { + Ok(()) => { + self.buffer.clear(); + Ok(StreamState::Open) + } + Err(_err) => { + // TODO: Report the error to the guest + Ok(StreamState::Closed) } } - Ok(()) + } + + async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { + Ok(StreamState::Open) } } @@ -140,17 +159,17 @@ impl terminal_stderr::Host for WasiCliCtxView<'_> { impl stdin::HostWithStore for WasiCli { async fn get_stdin(store: &Accessor) -> wasmtime::Result> { - store.with(|mut view| { - let instance = view.instance(); - let (tx, rx) = instance - .stream(&mut view) - .context("failed to create stream")?; - let stdin = view.get().ctx.stdin.async_stream(); - view.spawn(InputTask { - rx: Box::into_pin(stdin), - tx, - }); - Ok(rx) + let instance = store.instance(); + store.with(|mut store| { + let rx = store.get().ctx.stdin.async_stream(); + Ok(StreamReader::new( + instance, + &mut store, + InputStreamProducer { + rx: Box::into_pin(rx), + buffer: Cursor::default(), + }, + )) }) } } @@ -162,12 +181,15 @@ impl stdout::HostWithStore for WasiCli { store: &Accessor, data: StreamReader, ) -> wasmtime::Result<()> { - store.with(|mut view| { - let tx = view.get().ctx.stdout.async_stream(); - view.spawn(OutputTask { - rx: data, - tx: Box::into_pin(tx), - }); + store.with(|mut store| { + let tx = store.get().ctx.stdout.async_stream(); + data.pipe( + store, + OutputStreamConsumer { + tx: Box::into_pin(tx), + buffer: BytesMut::default(), + }, + ); Ok(()) }) } @@ -180,12 +202,15 @@ impl stderr::HostWithStore for WasiCli { store: &Accessor, data: StreamReader, ) -> wasmtime::Result<()> { - store.with(|mut view| { - let tx = view.get().ctx.stderr.async_stream(); - view.spawn(OutputTask { - rx: data, - tx: Box::into_pin(tx), - }); + store.with(|mut store| { + let tx = store.get().ctx.stderr.async_stream(); + data.pipe( + store, + OutputStreamConsumer { + tx: Box::into_pin(tx), + buffer: BytesMut::default(), + }, + ); Ok(()) }) } diff --git a/crates/wasi/src/p3/mod.rs b/crates/wasi/src/p3/mod.rs index 3013ea08fe21..a92d418085b4 100644 --- a/crates/wasi/src/p3/mod.rs +++ b/crates/wasi/src/p3/mod.rs @@ -29,6 +29,9 @@ use wasmtime::component::{ // Default buffer capacity to use for reads of byte-sized values. const DEFAULT_BUFFER_CAPACITY: usize = 8192; +// Maximum buffer capacity to use for reads of byte-sized values. +const MAX_BUFFER_CAPACITY: usize = 4 * DEFAULT_BUFFER_CAPACITY; + struct StreamEmptyProducer; impl StreamProducer for StreamEmptyProducer { From 58ef641668a5d48e377d8b31244efc777cae8877 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 28 Aug 2025 14:23:32 +0200 Subject: [PATCH 13/32] p3: limit maximum buffer size Signed-off-by: Roman Volosatovs --- crates/wasi/src/p3/filesystem/host.rs | 13 ++++++++----- crates/wasi/src/p3/sockets/host/types/tcp.rs | 7 ++++--- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/crates/wasi/src/p3/filesystem/host.rs b/crates/wasi/src/p3/filesystem/host.rs index 9d48a3026594..c901d04cf6f5 100644 --- a/crates/wasi/src/p3/filesystem/host.rs +++ b/crates/wasi/src/p3/filesystem/host.rs @@ -1,5 +1,4 @@ use crate::filesystem::{Descriptor, Dir, File, WasiFilesystem, WasiFilesystemCtxView}; -use crate::p3::DEFAULT_BUFFER_CAPACITY; use crate::p3::bindings::clocks::wall_clock; use crate::p3::bindings::filesystem::types::{ self, Advice, DescriptorFlags, DescriptorStat, DescriptorType, DirectoryEntry, ErrorCode, @@ -7,7 +6,8 @@ use crate::p3::bindings::filesystem::types::{ }; use crate::p3::filesystem::{FilesystemError, FilesystemResult, preopens}; use crate::p3::{ - FutureOneshotProducer, FutureReadyProducer, StreamEmptyProducer, write_buffered_bytes, + DEFAULT_BUFFER_CAPACITY, FutureOneshotProducer, FutureReadyProducer, MAX_BUFFER_CAPACITY, + StreamEmptyProducer, write_buffered_bytes, }; use crate::{DirPerms, FilePerms}; use anyhow::{Context as _, bail}; @@ -146,7 +146,8 @@ impl StreamProducer for ReadStreamProducer { let n = store .with(|store| dst.remaining(store)) - .unwrap_or(DEFAULT_BUFFER_CAPACITY); + .unwrap_or(DEFAULT_BUFFER_CAPACITY) + .min(MAX_BUFFER_CAPACITY); let mut buf = mem::take(&mut self.buffer).into_inner(); buf.resize(n, 0); let offset = self.offset; @@ -303,7 +304,8 @@ impl StreamConsumer for WriteStreamConsumer { ) -> wasmtime::Result { let res = 'result: loop { store.with(|mut store| { - self.buffer.reserve(src.remaining(&mut store)); + let n = src.remaining(&mut store).min(MAX_BUFFER_CAPACITY); + self.buffer.reserve(n); src.read(&mut store, &mut self.buffer) })?; // FIXME: `mem::take` rather than `clone` when we can ensure cancellation-safety @@ -370,7 +372,8 @@ impl StreamConsumer for AppendStreamConsumer { ) -> wasmtime::Result { let res = 'result: loop { store.with(|mut store| { - self.buffer.reserve(src.remaining(&mut store)); + let n = src.remaining(&mut store).min(MAX_BUFFER_CAPACITY); + self.buffer.reserve(n); src.read(&mut store, &mut self.buffer) })?; let buf = mem::take(&mut self.buffer); diff --git a/crates/wasi/src/p3/sockets/host/types/tcp.rs b/crates/wasi/src/p3/sockets/host/types/tcp.rs index 4387faf2fcdf..8a471074a8f0 100644 --- a/crates/wasi/src/p3/sockets/host/types/tcp.rs +++ b/crates/wasi/src/p3/sockets/host/types/tcp.rs @@ -5,8 +5,8 @@ use crate::p3::bindings::sockets::types::{ }; use crate::p3::sockets::{SocketError, SocketResult, WasiSockets}; use crate::p3::{ - DEFAULT_BUFFER_CAPACITY, FutureOneshotProducer, FutureReadyProducer, StreamEmptyProducer, - write_buffered_bytes, + DEFAULT_BUFFER_CAPACITY, FutureOneshotProducer, FutureReadyProducer, MAX_BUFFER_CAPACITY, + StreamEmptyProducer, write_buffered_bytes, }; use crate::sockets::{NonInheritedOptions, SocketAddrUse, SocketAddressFamily, WasiSocketsCtxView}; use anyhow::Context; @@ -229,7 +229,8 @@ impl StreamConsumer for SendStreamConsumer { // NOTE: The implementation might want to use Linux SIOCOUTQ ioctl or similar construct // on other platforms to only read `min(socket_capacity, src.remaining())` and prevent // short writes - self.buffer.reserve(src.remaining(&mut store)); + let n = src.remaining(&mut store).min(MAX_BUFFER_CAPACITY); + self.buffer.reserve(n); if let Err(err) = src.read(&mut store, &mut self.buffer) { return Ok(Err(err)); } From ec41ec537f345ddeafc272e41725834609ec6e02 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 28 Aug 2025 14:27:41 +0200 Subject: [PATCH 14/32] p3-sockets: remove reuseaddr test loop workaround Signed-off-by: Roman Volosatovs --- .../src/bin/p3_sockets_tcp_bind.rs | 23 ++++--------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/crates/test-programs/src/bin/p3_sockets_tcp_bind.rs b/crates/test-programs/src/bin/p3_sockets_tcp_bind.rs index 05628f58a174..ca14301264a5 100644 --- a/crates/test-programs/src/bin/p3_sockets_tcp_bind.rs +++ b/crates/test-programs/src/bin/p3_sockets_tcp_bind.rs @@ -87,25 +87,10 @@ async fn test_tcp_bind_reuseaddr(ip: IpAddress) { // If SO_REUSEADDR was configured correctly, the following lines // shouldn't be affected by the TIME_WAIT state of the just closed - // `listener1` socket. - // - // Note though that the way things are modeled in Wasmtime right now is that - // the TCP socket is kept alive by a spawned task created in `listen` - // meaning that to fully close the socket it requires the spawned task to - // shut down. That may require yielding to the host or similar so try a few - // times to let the host get around to closing the task while testing each - // time to see if we can reuse the address. This loop is bounded because it - // should complete "quickly". - for _ in 0..10 { - let listener2 = TcpSocket::create(ip.family()).unwrap(); - if listener2.bind(bind_addr).is_ok() { - listener2.listen().unwrap(); - return; - } - yield_blocking(); - } - - panic!("looks like REUSEADDR isn't in use?"); + // `listener1` socket: + let listener2 = TcpSocket::create(ip.family()).unwrap(); + listener2.bind(bind_addr).unwrap(); + listener2.listen().unwrap(); } // Try binding to an address that is not configured on the system. From 2b6f216ab0856ec51f65fb3ca9faca6bc6491158 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 28 Aug 2025 15:44:41 +0200 Subject: [PATCH 15/32] p3: drive I/O in `when_ready` Signed-off-by: Roman Volosatovs --- crates/wasi/src/p3/cli/host.rs | 66 +++++-- crates/wasi/src/p3/filesystem/host.rs | 181 +++++++++++-------- crates/wasi/src/p3/sockets/host/types/tcp.rs | 17 +- 3 files changed, 165 insertions(+), 99 deletions(-) diff --git a/crates/wasi/src/p3/cli/host.rs b/crates/wasi/src/p3/cli/host.rs index cd3d49b5c4ce..fc8956464769 100644 --- a/crates/wasi/src/p3/cli/host.rs +++ b/crates/wasi/src/p3/cli/host.rs @@ -21,6 +21,23 @@ struct InputStreamProducer { buffer: Cursor, } +impl InputStreamProducer +where + T: AsyncRead + Send + Unpin, +{ + async fn read(&mut self, n: usize) -> StreamState { + self.buffer.get_mut().reserve(n); + match self.rx.read_buf(self.buffer.get_mut()).await { + Ok(0) => StreamState::Closed, + Ok(_) => StreamState::Open, + Err(_err) => { + // TODO: Report the error to the guest + StreamState::Closed + } + } + } +} + impl StreamProducer for InputStreamProducer where T: AsyncRead + Send + Unpin + 'static, @@ -34,27 +51,24 @@ where write_buffered_bytes(store, &mut self.buffer, dst).await?; return Ok(StreamState::Open); } - let n = store .with(|store| dst.remaining(store)) .unwrap_or(DEFAULT_BUFFER_CAPACITY) .min(MAX_BUFFER_CAPACITY); - self.buffer.get_mut().reserve(n); - match self.rx.read_buf(self.buffer.get_mut()).await { - Ok(0) => Ok(StreamState::Closed), - Ok(_) => { + match self.read(n).await { + StreamState::Open => { write_buffered_bytes(store, &mut self.buffer, dst).await?; Ok(StreamState::Open) } - Err(_err) => { - // TODO: Report the error to the guest - Ok(StreamState::Closed) - } + StreamState::Closed => Ok(StreamState::Closed), } } async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { - Ok(StreamState::Open) + if !self.buffer.get_ref().is_empty() { + return Ok(StreamState::Open); + } + Ok(self.read(DEFAULT_BUFFER_CAPACITY).await) } } @@ -63,6 +77,24 @@ struct OutputStreamConsumer { buffer: BytesMut, } +impl OutputStreamConsumer +where + T: AsyncWrite + Send + Unpin + 'static, +{ + async fn flush(&mut self) -> StreamState { + match self.tx.write_all(&self.buffer).await { + Ok(()) => { + self.buffer.clear(); + StreamState::Open + } + Err(_err) => { + // TODO: Report the error to the guest + StreamState::Closed + } + } + } +} + impl StreamConsumer for OutputStreamConsumer where T: AsyncWrite + Send + Unpin + 'static, @@ -77,19 +109,13 @@ where self.buffer.reserve(n); src.read(&mut store, &mut self.buffer) })?; - match self.tx.write_all(&self.buffer).await { - Ok(()) => { - self.buffer.clear(); - Ok(StreamState::Open) - } - Err(_err) => { - // TODO: Report the error to the guest - Ok(StreamState::Closed) - } - } + Ok(self.flush().await) } async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { + if !self.buffer.is_empty() { + return Ok(self.flush().await); + } Ok(StreamState::Open) } } diff --git a/crates/wasi/src/p3/filesystem/host.rs b/crates/wasi/src/p3/filesystem/host.rs index c901d04cf6f5..f4478871fdfe 100644 --- a/crates/wasi/src/p3/filesystem/host.rs +++ b/crates/wasi/src/p3/filesystem/host.rs @@ -131,23 +131,8 @@ impl ReadStreamProducer { _ = tx.send(res); } } -} -impl StreamProducer for ReadStreamProducer { - async fn produce( - &mut self, - store: &Accessor, - dst: &mut Destination, - ) -> wasmtime::Result { - if !self.buffer.get_ref().is_empty() { - write_buffered_bytes(store, &mut self.buffer, dst).await?; - return Ok(StreamState::Open); - } - - let n = store - .with(|store| dst.remaining(store)) - .unwrap_or(DEFAULT_BUFFER_CAPACITY) - .min(MAX_BUFFER_CAPACITY); + async fn read(&mut self, n: usize) -> StreamState { let mut buf = mem::take(&mut self.buffer).into_inner(); buf.resize(n, 0); let offset = self.offset; @@ -171,24 +156,51 @@ impl StreamProducer for ReadStreamProducer { }; self.offset = n; self.buffer = Cursor::new(buf); - write_buffered_bytes(store, &mut self.buffer, dst).await?; - return Ok(StreamState::Open); + return StreamState::Open; } Err(err) => break 'result Err(err.into()), } }; self.close(res); - Ok(StreamState::Closed) + StreamState::Closed + } +} + +impl StreamProducer for ReadStreamProducer { + async fn produce( + &mut self, + store: &Accessor, + dst: &mut Destination, + ) -> wasmtime::Result { + if !self.buffer.get_ref().is_empty() { + write_buffered_bytes(store, &mut self.buffer, dst).await?; + return Ok(StreamState::Open); + } + let n = store + .with(|store| dst.remaining(store)) + .unwrap_or(DEFAULT_BUFFER_CAPACITY) + .min(MAX_BUFFER_CAPACITY); + match self.read(n).await { + StreamState::Open => { + write_buffered_bytes(store, &mut self.buffer, dst).await?; + Ok(StreamState::Open) + } + StreamState::Closed => Ok(StreamState::Closed), + } } async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { - Ok(StreamState::Open) + if !self.buffer.get_ref().is_empty() { + return Ok(StreamState::Open); + } + Ok(self.read(DEFAULT_BUFFER_CAPACITY).await) } } struct DirectoryStreamProducer { dir: Dir, entries: Option, + buffered: Option, result: Option>>, } @@ -198,14 +210,8 @@ impl DirectoryStreamProducer { _ = tx.send(res); } } -} -impl StreamProducer for DirectoryStreamProducer { - async fn produce( - &mut self, - store: &Accessor, - dst: &mut Destination, - ) -> wasmtime::Result { + async fn next(&mut self) -> Option { let res = 'result: loop { let mut entries = if let Some(entries) = self.entries.take() { entries @@ -252,25 +258,43 @@ impl StreamProducer for DirectoryStreamProducer { break 'result Err(ErrorCode::IllegalByteSequence); }; // FIXME: Handle cancellation - if let Some(_) = dst - .write( - store, - Some(DirectoryEntry { - type_: meta.file_type().into(), - name, - }), - ) - .await? - { - bail!("failed to write entry") - } - return Ok(StreamState::Open); + return Some(DirectoryEntry { + type_: meta.file_type().into(), + name, + }); }; self.close(res); - Ok(StreamState::Closed) + None + } +} + +impl StreamProducer for DirectoryStreamProducer { + async fn produce( + &mut self, + store: &Accessor, + dst: &mut Destination, + ) -> wasmtime::Result { + let entry = if let Some(entry) = self.buffered.take() { + entry + } else if let Some(entry) = self.next().await { + entry + } else { + return Ok(StreamState::Closed); + }; + // FIXME: Handle cancellation + if let Some(_) = dst.write(store, Some(entry)).await? { + bail!("failed to write entry") + } + return Ok(StreamState::Open); } async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { + if self.buffered.is_none() { + let Some(entry) = self.next().await else { + return Ok(StreamState::Closed); + }; + self.buffered = Some(entry); + } Ok(StreamState::Open) } } @@ -294,20 +318,9 @@ impl WriteStreamConsumer { _ = tx.send(res); } } -} -impl StreamConsumer for WriteStreamConsumer { - async fn consume( - &mut self, - store: &Accessor, - src: &mut Source<'_, u8>, - ) -> wasmtime::Result { - let res = 'result: loop { - store.with(|mut store| { - let n = src.remaining(&mut store).min(MAX_BUFFER_CAPACITY); - self.buffer.reserve(n); - src.read(&mut store, &mut self.buffer) - })?; + async fn flush(&mut self) -> StreamState { + let res = 'result: { // FIXME: `mem::take` rather than `clone` when we can ensure cancellation-safety //let buf = mem::take(&mut self.buffer); let buf = self.buffer.clone(); @@ -330,16 +343,34 @@ impl StreamConsumer for WriteStreamConsumer { self.buffer = buf; self.buffer.clear(); self.offset = offset; - return Ok(StreamState::Open); + return StreamState::Open; } Err(err) => break 'result Err(err), } }; self.close(res); - Ok(StreamState::Closed) + StreamState::Closed + } +} + +impl StreamConsumer for WriteStreamConsumer { + async fn consume( + &mut self, + store: &Accessor, + src: &mut Source<'_, u8>, + ) -> wasmtime::Result { + store.with(|mut store| { + let n = src.remaining(&mut store).min(MAX_BUFFER_CAPACITY); + self.buffer.reserve(n); + src.read(&mut store, &mut self.buffer) + })?; + Ok(self.flush().await) } async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { + if !self.buffer.is_empty() { + return Ok(self.flush().await); + } Ok(StreamState::Open) } } @@ -362,20 +393,9 @@ impl AppendStreamConsumer { _ = tx.send(res); } } -} -impl StreamConsumer for AppendStreamConsumer { - async fn consume( - &mut self, - store: &Accessor, - src: &mut Source<'_, u8>, - ) -> wasmtime::Result { - let res = 'result: loop { - store.with(|mut store| { - let n = src.remaining(&mut store).min(MAX_BUFFER_CAPACITY); - self.buffer.reserve(n); - src.read(&mut store, &mut self.buffer) - })?; + async fn flush(&mut self) -> StreamState { + let res = 'result: { let buf = mem::take(&mut self.buffer); // FIXME: Handle cancellation match self @@ -393,16 +413,34 @@ impl StreamConsumer for AppendStreamConsumer { Ok(buf) => { self.buffer = buf; self.buffer.clear(); - return Ok(StreamState::Open); + return StreamState::Open; } Err(err) => break 'result Err(err), } }; self.close(res); - Ok(StreamState::Closed) + StreamState::Closed + } +} + +impl StreamConsumer for AppendStreamConsumer { + async fn consume( + &mut self, + store: &Accessor, + src: &mut Source<'_, u8>, + ) -> wasmtime::Result { + store.with(|mut store| { + let n = src.remaining(&mut store).min(MAX_BUFFER_CAPACITY); + self.buffer.reserve(n); + src.read(&mut store, &mut self.buffer) + })?; + Ok(self.flush().await) } async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { + if !self.buffer.is_empty() { + return Ok(self.flush().await); + } Ok(StreamState::Open) } } @@ -603,6 +641,7 @@ impl types::HostDescriptorWithStore for WasiFilesystem { DirectoryStreamProducer { dir, entries: None, + buffered: None, result: Some(result_tx), }, ), diff --git a/crates/wasi/src/p3/sockets/host/types/tcp.rs b/crates/wasi/src/p3/sockets/host/types/tcp.rs index 8a471074a8f0..54525df0c0b8 100644 --- a/crates/wasi/src/p3/sockets/host/types/tcp.rs +++ b/crates/wasi/src/p3/sockets/host/types/tcp.rs @@ -44,18 +44,15 @@ fn get_socket_mut<'a>( } struct ListenStreamProducer { - accepted: Option>, listener: Arc, family: SocketAddressFamily, options: NonInheritedOptions, + accepted: Option>, getter: for<'a> fn(&'a mut T) -> WasiSocketsCtxView<'a>, } impl ListenStreamProducer { - async fn accept(&mut self) -> std::io::Result { - if let Some(res) = self.accepted.take() { - return res; - } + async fn next(&mut self) -> std::io::Result { let (stream, _) = self.listener.accept().await?; Ok(stream) } @@ -70,7 +67,11 @@ where store: &Accessor, dst: &mut Destination>, ) -> wasmtime::Result { - let res = self.accept().await; + let res = if let Some(res) = self.accepted.take() { + res + } else { + self.next().await + }; let socket = TcpSocket::new_accept(res, &self.options, self.family) .unwrap_or_else(|err| TcpSocket::new_error(err, self.family)); let store = store.with_getter::(self.getter); @@ -97,7 +98,7 @@ where async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { if self.accepted.is_none() { - let res = self.accept().await; + let res = self.next().await; self.accepted = Some(res); } Ok(StreamState::Open) @@ -337,10 +338,10 @@ impl HostTcpSocketWithStore for WasiSockets { instance, &mut store, ListenStreamProducer { - accepted: None, listener, family, options, + accepted: None, getter, }, )) From f7ff95747a6e2515c1a5b7a697794e5cc002e6f5 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 28 Aug 2025 17:45:50 +0200 Subject: [PATCH 16/32] fixup! p3: drive I/O in `when_ready` --- crates/wasi/src/p3/filesystem/host.rs | 100 +++++++++++++------------- 1 file changed, 49 insertions(+), 51 deletions(-) diff --git a/crates/wasi/src/p3/filesystem/host.rs b/crates/wasi/src/p3/filesystem/host.rs index f4478871fdfe..30b58242fb3a 100644 --- a/crates/wasi/src/p3/filesystem/host.rs +++ b/crates/wasi/src/p3/filesystem/host.rs @@ -320,36 +320,35 @@ impl WriteStreamConsumer { } async fn flush(&mut self) -> StreamState { - let res = 'result: { - // FIXME: `mem::take` rather than `clone` when we can ensure cancellation-safety - //let buf = mem::take(&mut self.buffer); - let buf = self.buffer.clone(); - let mut offset = self.offset; - match self - .file - .spawn_blocking(move |file| { - let mut pos = 0; - while pos != buf.len() { - let n = file.write_at(&buf[pos..], offset)?; - pos = pos.saturating_add(n); - let n = n.try_into().or(Err(ErrorCode::Overflow))?; - offset = offset.checked_add(n).ok_or(ErrorCode::Overflow)?; - } - Ok((buf, offset)) - }) - .await - { - Ok((buf, offset)) => { - self.buffer = buf; - self.buffer.clear(); - self.offset = offset; - return StreamState::Open; + // FIXME: `mem::take` rather than `clone` when we can ensure cancellation-safety + //let buf = mem::take(&mut self.buffer); + let buf = self.buffer.clone(); + let mut offset = self.offset; + match self + .file + .spawn_blocking(move |file| { + let mut pos = 0; + while pos != buf.len() { + let n = file.write_at(&buf[pos..], offset)?; + pos = pos.saturating_add(n); + let n = n.try_into().or(Err(ErrorCode::Overflow))?; + offset = offset.checked_add(n).ok_or(ErrorCode::Overflow)?; } - Err(err) => break 'result Err(err), + Ok((buf, offset)) + }) + .await + { + Ok((buf, offset)) => { + self.buffer = buf; + self.buffer.clear(); + self.offset = offset; + StreamState::Open } - }; - self.close(res); - StreamState::Closed + Err(err) => { + self.close(Err(err)); + StreamState::Closed + } + } } } @@ -395,31 +394,30 @@ impl AppendStreamConsumer { } async fn flush(&mut self) -> StreamState { - let res = 'result: { - let buf = mem::take(&mut self.buffer); - // FIXME: Handle cancellation - match self - .file - .spawn_blocking(move |file| { - let mut pos = 0; - while pos != buf.len() { - let n = file.append(&buf[pos..])?; - pos = pos.saturating_add(n); - } - Ok(buf) - }) - .await - { - Ok(buf) => { - self.buffer = buf; - self.buffer.clear(); - return StreamState::Open; + let buf = mem::take(&mut self.buffer); + // FIXME: Handle cancellation + match self + .file + .spawn_blocking(move |file| { + let mut pos = 0; + while pos != buf.len() { + let n = file.append(&buf[pos..])?; + pos = pos.saturating_add(n); } - Err(err) => break 'result Err(err), + Ok(buf) + }) + .await + { + Ok(buf) => { + self.buffer = buf; + self.buffer.clear(); + StreamState::Open } - }; - self.close(res); - StreamState::Closed + Err(err) => { + self.close(Err(err)); + StreamState::Closed + } + } } } From 879be7dc74f9fa70c5380ff47f69fd4fc51809f7 Mon Sep 17 00:00:00 2001 From: Joel Dice Date: Fri, 29 Aug 2025 16:11:36 -0600 Subject: [PATCH 17/32] Refine `Stream{Producer,Consumer}` APIs Per conversations last week with Roman, Alex, and Lann, I've updated these traits to present a lower-level API based on `poll_{consume,produce}` functions and have documented the implementation requirements for various scenarios which have come up in `wasmtime-wasi`, particularly around graceful cancellation. See the doc comments for those functions for details. Signed-off-by: Joel Dice --- .../src/resource_stream.rs | 4 +- crates/misc/component-async-tests/src/util.rs | 200 +-- .../tests/scenario/streams.rs | 8 +- .../tests/scenario/transmit.rs | 178 ++- .../src/runtime/component/concurrent.rs | 6 +- .../concurrent/futures_and_streams.rs | 1158 +++++++++++------ crates/wasmtime/src/runtime/component/mod.rs | 8 +- 7 files changed, 960 insertions(+), 602 deletions(-) diff --git a/crates/misc/component-async-tests/src/resource_stream.rs b/crates/misc/component-async-tests/src/resource_stream.rs index cb2c1e405e70..baf3bdcecfe1 100644 --- a/crates/misc/component-async-tests/src/resource_stream.rs +++ b/crates/misc/component-async-tests/src/resource_stream.rs @@ -1,4 +1,4 @@ -use crate::util::MpscProducer; +use crate::util::PipeProducer; use anyhow::Result; use futures::channel::mpsc; use wasmtime::component::{Accessor, Resource, StreamReader}; @@ -45,7 +45,7 @@ impl bindings::local::local::resource_stream::HostWithStore for Ctx { .unwrap() } let instance = access.instance(); - Ok(StreamReader::new(instance, access, MpscProducer::new(rx))) + Ok(StreamReader::new(instance, access, PipeProducer::new(rx))) }) } } diff --git a/crates/misc/component-async-tests/src/util.rs b/crates/misc/component-async-tests/src/util.rs index 558434c1f12a..d435bdc3717b 100644 --- a/crates/misc/component-async-tests/src/util.rs +++ b/crates/misc/component-async-tests/src/util.rs @@ -1,12 +1,17 @@ use anyhow::Result; -use futures::{ - SinkExt, StreamExt, - channel::{mpsc, oneshot}, +use futures::{Sink, Stream, channel::oneshot}; +use std::{ + marker::PhantomData, + pin::Pin, + task::{Context, Poll}, + thread, }; -use std::{future, thread}; -use wasmtime::component::{ - Accessor, Destination, FutureConsumer, FutureProducer, Lift, Lower, Source, StreamConsumer, - StreamProducer, StreamState, +use wasmtime::{ + StoreContextMut, + component::{ + Accessor, Destination, FutureConsumer, FutureProducer, Lift, Lower, Source, StreamConsumer, + StreamProducer, StreamResult, + }, }; pub async fn sleep(duration: std::time::Duration) { @@ -30,133 +35,128 @@ pub async fn sleep(duration: std::time::Duration) { } } -pub struct MpscProducer { - rx: mpsc::Receiver, - next: Option, - closed: bool, -} - -impl MpscProducer { - pub fn new(rx: mpsc::Receiver) -> Self { - Self { - rx, - next: None, - closed: false, - } - } +pub struct PipeProducer(S); - fn state(&self) -> StreamState { - if self.closed { - StreamState::Closed - } else { - StreamState::Open - } +impl PipeProducer { + pub fn new(rx: S) -> Self { + Self(rx) } } -impl StreamProducer for MpscProducer { - async fn produce( - &mut self, - accessor: &Accessor, - destination: &mut Destination, - ) -> Result { - let item = if let Some(item) = self.next.take() { - Some(item) - } else if let Some(item) = self.rx.next().await { - Some(item) - } else { - None - }; - - if let Some(item) = item { - let item = destination.write(accessor, Some(item)).await?; - assert!(item.is_none()); - } else { - self.closed = true; - } - - Ok(self.state()) - } - - async fn when_ready(&mut self, _: &Accessor) -> Result { - if !self.closed && self.next.is_none() { - if let Some(item) = self.rx.next().await { - self.next = Some(item); - } else { - self.closed = true; +impl + Send + 'static> StreamProducer + for PipeProducer +{ + type Item = T; + type Buffer = Option; + + fn poll_produce<'a>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + _: StoreContextMut, + destination: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll> { + // SAFETY: This is a standard pin-projection, and we never move + // out of `self`. + let stream = unsafe { self.map_unchecked_mut(|v| &mut v.0) }; + + match stream.poll_next(cx) { + Poll::Pending => { + if finish { + Poll::Ready(Ok(StreamResult::Cancelled)) + } else { + Poll::Pending + } } + Poll::Ready(Some(item)) => { + *destination.buffer() = Some(item); + Poll::Ready(Ok(StreamResult::Completed)) + } + Poll::Ready(None) => Poll::Ready(Ok(StreamResult::Dropped)), } - - Ok(self.state()) } } -pub struct MpscConsumer { - tx: mpsc::Sender, -} +pub struct PipeConsumer(S, PhantomData T>); -impl MpscConsumer { - pub fn new(tx: mpsc::Sender) -> Self { - Self { tx } - } - - fn state(&self) -> StreamState { - if self.tx.is_closed() { - StreamState::Closed - } else { - StreamState::Open - } +impl PipeConsumer { + pub fn new(tx: S) -> Self { + Self(tx, PhantomData) } } -impl StreamConsumer for MpscConsumer { - async fn consume( - &mut self, - accessor: &Accessor, - source: &mut Source<'_, T>, - ) -> Result { - let item = &mut None; - accessor.with(|access| source.read(access, item))?; - _ = self.tx.send(item.take().unwrap()).await; - Ok(self.state()) - } - - async fn when_ready(&mut self, _: &Accessor) -> Result { - future::poll_fn(|cx| self.tx.poll_ready(cx)).await?; +impl + Send + 'static> + StreamConsumer for PipeConsumer +{ + type Item = T; + + fn poll_consume( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut, + source: &mut Source, + finish: bool, + ) -> Poll> { + // SAFETY: This is a standard pin-projection, and we never move + // out of `self`. + let mut sink = unsafe { self.map_unchecked_mut(|v| &mut v.0) }; + + let on_pending = || { + if finish { + Poll::Ready(Ok(StreamResult::Cancelled)) + } else { + Poll::Pending + } + }; - Ok(self.state()) + match sink.as_mut().poll_flush(cx) { + Poll::Pending => on_pending(), + Poll::Ready(result) => { + result?; + match sink.as_mut().poll_ready(cx) { + Poll::Pending => on_pending(), + Poll::Ready(result) => { + result?; + let item = &mut None; + source.read(store, item)?; + sink.start_send(item.take().unwrap())?; + Poll::Ready(Ok(StreamResult::Completed)) + } + } + } + } } } -pub struct OneshotProducer { - rx: oneshot::Receiver, -} +pub struct OneshotProducer(oneshot::Receiver); impl OneshotProducer { pub fn new(rx: oneshot::Receiver) -> Self { - Self { rx } + Self(rx) } } -impl FutureProducer for OneshotProducer { +impl FutureProducer for OneshotProducer { + type Item = T; + async fn produce(self, _: &Accessor) -> Result { - Ok(self.rx.await?) + Ok(self.0.await?) } } -pub struct OneshotConsumer { - tx: oneshot::Sender, -} +pub struct OneshotConsumer(oneshot::Sender); impl OneshotConsumer { pub fn new(tx: oneshot::Sender) -> Self { - Self { tx } + Self(tx) } } -impl FutureConsumer for OneshotConsumer { +impl FutureConsumer for OneshotConsumer { + type Item = T; + async fn consume(self, _: &Accessor, value: T) -> Result<()> { - _ = self.tx.send(value); + _ = self.0.send(value); Ok(()) } } diff --git a/crates/misc/component-async-tests/tests/scenario/streams.rs b/crates/misc/component-async-tests/tests/scenario/streams.rs index 122153ee44af..e63f2d303182 100644 --- a/crates/misc/component-async-tests/tests/scenario/streams.rs +++ b/crates/misc/component-async-tests/tests/scenario/streams.rs @@ -3,7 +3,7 @@ use { anyhow::Result, component_async_tests::{ Ctx, closed_streams, - util::{MpscConsumer, MpscProducer, OneshotConsumer, OneshotProducer}, + util::{OneshotConsumer, OneshotProducer, PipeConsumer, PipeProducer}, }, futures::{ SinkExt, StreamExt, @@ -52,8 +52,8 @@ pub async fn async_closed_streams() -> Result<()> { { let (mut input_tx, input_rx) = mpsc::channel(1); let (output_tx, mut output_rx) = mpsc::channel(1); - StreamReader::new(instance, &mut store, MpscProducer::new(input_rx)) - .pipe(&mut store, MpscConsumer::new(output_tx)); + StreamReader::new(instance, &mut store, PipeProducer::new(input_rx)) + .pipe(&mut store, PipeConsumer::new(output_tx)); instance .run_concurrent(&mut store, async |_| { @@ -99,7 +99,7 @@ pub async fn async_closed_streams() -> Result<()> { // Next, test stream host->guest { let (mut tx, rx) = mpsc::channel(1); - let rx = StreamReader::new(instance, &mut store, MpscProducer::new(rx)); + let rx = StreamReader::new(instance, &mut store, PipeProducer::new(rx)); let closed_streams = closed_streams::bindings::ClosedStreams::new(&mut store, &instance)?; diff --git a/crates/misc/component-async-tests/tests/scenario/transmit.rs b/crates/misc/component-async-tests/tests/scenario/transmit.rs index 07a9799810d4..80de705e731d 100644 --- a/crates/misc/component-async-tests/tests/scenario/transmit.rs +++ b/crates/misc/component-async-tests/tests/scenario/transmit.rs @@ -1,13 +1,14 @@ use std::future::{self, Future}; use std::pin::Pin; use std::sync::{Arc, Mutex}; +use std::task::{Context, Poll}; use std::time::Duration; use super::util::{config, make_component, test_run, test_run_with_count}; use anyhow::{Result, anyhow}; use cancel::exports::local::local::cancel::Mode; use component_async_tests::transmit::bindings::exports::local::local::transmit::Control; -use component_async_tests::util::{MpscConsumer, MpscProducer, OneshotConsumer, OneshotProducer}; +use component_async_tests::util::{OneshotConsumer, OneshotProducer, PipeConsumer, PipeProducer}; use component_async_tests::{Ctx, sleep, transmit}; use futures::{ FutureExt, SinkExt, StreamExt, TryStreamExt, @@ -16,9 +17,9 @@ use futures::{ }; use wasmtime::component::{ Accessor, Component, Destination, FutureReader, HasSelf, Instance, Linker, ResourceTable, - Source, StreamConsumer, StreamProducer, StreamReader, StreamState, Val, + Source, StreamConsumer, StreamProducer, StreamReader, StreamResult, Val, }; -use wasmtime::{AsContextMut, Engine, Store, Trap}; +use wasmtime::{AsContextMut, Engine, Store, StoreContextMut, Trap}; use wasmtime_wasi::WasiCtxBuilder; mod readiness { @@ -30,104 +31,91 @@ mod readiness { struct ReadinessProducer { buffer: Vec, - slept: bool, - closed: bool, + sleep: Pin + Send>>, } -impl ReadinessProducer { - async fn maybe_sleep(&mut self) { - if !self.slept { - self.slept = true; - component_async_tests::util::sleep(Duration::from_millis(delay_millis())).await; - } - } - - fn state(&self) -> StreamState { - if self.closed { - StreamState::Closed - } else { - StreamState::Open +impl StreamProducer for ReadinessProducer { + type Item = u8; + type Buffer = Option; + + fn poll_produce<'a>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + mut store: StoreContextMut<'a, D>, + destination: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll> { + let me = self.get_mut(); + + match me.sleep.as_mut().poll(cx) { + Poll::Pending => { + if finish { + Poll::Ready(Ok(StreamResult::Cancelled)) + } else { + Poll::Pending + } + } + Poll::Ready(()) => { + me.sleep = async {}.boxed(); + let capacity = destination.remaining(store.as_context_mut()); + if capacity == Some(0) { + Poll::Ready(Ok(StreamResult::Completed)) + } else { + assert_eq!(capacity, Some(me.buffer.len())); + let mut destination = destination.as_direct_destination(store).unwrap(); + destination.remaining().copy_from_slice(&me.buffer); + destination.mark_written(me.buffer.len()); + + Poll::Ready(Ok(StreamResult::Dropped)) + } + } } } } -impl StreamProducer for ReadinessProducer { - async fn produce( - &mut self, - accessor: &Accessor, - destination: &mut Destination, - ) -> Result { - self.maybe_sleep().await; - accessor.with(|mut access| { - assert_eq!( - destination.remaining(access.as_context_mut()), - Some(self.buffer.len()) - ); - let mut destination = destination - .as_guest_destination(access.as_context_mut()) - .unwrap(); - destination.remaining().copy_from_slice(&self.buffer); - destination.mark_written(self.buffer.len()); - }); - self.closed = true; - Ok(self.state()) - } - - async fn when_ready(&mut self, _: &Accessor) -> Result { - self.maybe_sleep().await; - Ok(self.state()) - } -} - struct ReadinessConsumer { expected: Vec, - slept: bool, - closed: bool, + sleep: Pin + Send>>, } -impl ReadinessConsumer { - async fn maybe_sleep(&mut self) { - if !self.slept { - self.slept = true; - component_async_tests::util::sleep(Duration::from_millis(delay_millis())).await; - } - } - - fn state(&self) -> StreamState { - if self.closed { - StreamState::Closed - } else { - StreamState::Open +impl StreamConsumer for ReadinessConsumer { + type Item = u8; + + fn poll_consume( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + mut store: StoreContextMut, + source: &mut Source, + finish: bool, + ) -> Poll> { + let me = self.get_mut(); + + match me.sleep.as_mut().poll(cx) { + Poll::Pending => { + if finish { + Poll::Ready(Ok(StreamResult::Cancelled)) + } else { + Poll::Pending + } + } + Poll::Ready(()) => { + me.sleep = async {}.boxed(); + let available = source.remaining(store.as_context_mut()); + if available == 0 { + Poll::Ready(Ok(StreamResult::Completed)) + } else { + assert_eq!(available, me.expected.len()); + let mut source = source.as_direct_source(store); + assert_eq!(&me.expected, source.remaining()); + source.mark_read(me.expected.len()); + + Poll::Ready(Ok(StreamResult::Dropped)) + } + } } } } -impl StreamConsumer for ReadinessConsumer { - async fn consume( - &mut self, - accessor: &Accessor, - source: &mut Source<'_, u8>, - ) -> Result { - self.maybe_sleep().await; - accessor.with(|mut access| { - assert_eq!( - source.remaining(access.as_context_mut()), - self.expected.len() - ); - let mut source = source.as_guest_source(access.as_context_mut()).unwrap(); - assert_eq!(&self.expected, source.remaining()); - source.mark_read(self.expected.len()); - }); - self.closed = true; - Ok(self.state()) - } - - async fn when_ready(&mut self, _: &Accessor) -> Result { - self.maybe_sleep().await; - Ok(self.state()) - } -} - #[tokio::test] pub async fn async_readiness() -> Result<()> { let component = test_programs_artifacts::ASYNC_READINESS_COMPONENT; @@ -158,8 +146,8 @@ pub async fn async_readiness() -> Result<()> { &mut store, ReadinessProducer { buffer: expected.clone(), - slept: false, - closed: false, + sleep: component_async_tests::util::sleep(Duration::from_millis(delay_millis())) + .boxed(), }, ); let result = instance @@ -174,8 +162,10 @@ pub async fn async_readiness() -> Result<()> { access, ReadinessConsumer { expected, - slept: false, - closed: false, + sleep: component_async_tests::util::sleep(Duration::from_millis( + delay_millis(), + )) + .boxed(), }, ) }); @@ -553,10 +543,10 @@ async fn test_transmit_with(component: &str) -> Re } let (mut control_tx, control_rx) = mpsc::channel(1); - let control_rx = StreamReader::new(instance, &mut store, MpscProducer::new(control_rx)); + let control_rx = StreamReader::new(instance, &mut store, PipeProducer::new(control_rx)); let (mut caller_stream_tx, caller_stream_rx) = mpsc::channel(1); let caller_stream_rx = - StreamReader::new(instance, &mut store, MpscProducer::new(caller_stream_rx)); + StreamReader::new(instance, &mut store, PipeProducer::new(caller_stream_rx)); let (caller_future1_tx, caller_future1_rx) = oneshot::channel(); let caller_future1_rx = FutureReader::new( instance, @@ -622,7 +612,7 @@ async fn test_transmit_with(component: &str) -> Re Test::from_result(&mut store, instance, result)?; callee_stream_rx.pipe( &mut store, - MpscConsumer::new(callee_stream_tx.take().unwrap()), + PipeConsumer::new(callee_stream_tx.take().unwrap()), ); callee_future1_rx.pipe( &mut store, diff --git a/crates/wasmtime/src/runtime/component/concurrent.rs b/crates/wasmtime/src/runtime/component/concurrent.rs index 2e09d1204949..e0432a40eeca 100644 --- a/crates/wasmtime/src/runtime/component/concurrent.rs +++ b/crates/wasmtime/src/runtime/component/concurrent.rs @@ -89,9 +89,9 @@ use wasmtime_environ::component::{ pub use abort::JoinHandle; pub use futures_and_streams::{ - Destination, ErrorContext, FutureConsumer, FutureProducer, FutureReader, GuardedFutureReader, - GuardedStreamReader, GuestDestination, GuestSource, ReadBuffer, Source, StreamConsumer, - StreamProducer, StreamReader, StreamState, VecBuffer, WriteBuffer, + Destination, DirectDestination, DirectSource, ErrorContext, FutureConsumer, FutureProducer, + FutureReader, GuardedFutureReader, GuardedStreamReader, ReadBuffer, Source, StreamConsumer, + StreamProducer, StreamReader, StreamResult, VecBuffer, WriteBuffer, }; pub(crate) use futures_and_streams::{ ResourcePair, lower_error_context_to_index, lower_future_to_index, lower_stream_to_index, diff --git a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs index 54cdc610961a..f41d4a00264f 100644 --- a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs +++ b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs @@ -1,6 +1,6 @@ use super::table::{TableDebug, TableId}; use super::{Event, GlobalErrorContextRefCount, Waitable, WaitableCommon}; -use crate::component::concurrent::{Accessor, ConcurrentState, JoinHandle, WorkItem, tls}; +use crate::component::concurrent::{Accessor, ConcurrentState, WorkItem, tls}; use crate::component::func::{self, LiftContext, LowerContext, Options}; use crate::component::matching::InstanceType; use crate::component::values::{ErrorContextAny, FutureAny, StreamAny}; @@ -16,6 +16,7 @@ use futures::FutureExt; use futures::channel::oneshot; use std::boxed::Box; use std::fmt; +use std::future; use std::iter; use std::marker::PhantomData; use std::mem::{self, MaybeUninit}; @@ -213,148 +214,25 @@ pub(super) struct FlatAbi { } /// Represents the buffer for a host- or guest-initiated stream read. -pub struct Destination { +pub struct Destination<'a, T, B> { instance: Instance, - kind: TransmitKind, id: TableId, + buffer: &'a mut B, _phantom: PhantomData T>, } -impl Destination { - /// Deliver zero or more items to the reader. - pub async fn write(&mut self, accessor: A, mut buffer: B) -> Result - where - T: func::Lower + 'static, - B: WriteBuffer, - { - let accessor = accessor.as_accessor(); - let (read, guest_offset) = accessor.with(|mut access| { - let transmit = self - .instance - .concurrent_state_mut(access.as_context_mut().0) - .get_mut(self.id)?; - - let guest_offset = if let &WriteState::HostReady { guest_offset, .. } = &transmit.write - { - Some(guest_offset) - } else { - None - }; - - anyhow::Ok(( - mem::replace(&mut transmit.read, ReadState::Open), - guest_offset, - )) - })?; - - match read { - ReadState::GuestReady { - ty, - flat_abi, - options, - address, - count, - handle, - } => { - let guest_offset = guest_offset.unwrap(); - - if let TransmitKind::Future = self.kind { - accessor.with(|mut access| { - self.instance - .concurrent_state_mut(access.as_context_mut().0) - .get_mut(self.id)? - .done = true; - anyhow::Ok(()) - })?; - } - - let old_remaining = buffer.remaining().len(); - let instance = self.instance; - let accept = move |mut store: StoreContextMut| { - lower::( - store.as_context_mut(), - instance, - &options, - ty, - address + (T::SIZE32 * guest_offset), - count - guest_offset, - &mut buffer, - )?; - anyhow::Ok(buffer) - }; - - let buffer = if T::MAY_REQUIRE_REALLOC { - // For payloads which may require a realloc call, use a - // oneshot::channel and background task. This is necessary - // because calling the guest while there are host embedder - // frames on the stack is unsound. - let (tx, rx) = oneshot::channel(); - accessor.with(move |mut access| { - let mut store = access.as_context_mut(); - let token = StoreToken::new(store.as_context_mut()); - instance.concurrent_state_mut(store.0).push_high_priority( - WorkItem::WorkerFunction(Mutex::new(Box::new(move |store, _| { - _ = tx.send(accept(token.as_context_mut(store))?); - Ok(()) - }))), - ) - }); - rx.await? - } else { - // Optimize flat payloads (i.e. those which do not require - // calling the guest's realloc function) by lowering - // directly instead of using a oneshot::channel and - // background task. - accessor.with(|mut access| accept(access.as_context_mut()))? - }; - - accessor.with(|mut access| { - let count = old_remaining - buffer.remaining().len(); - - let transmit = self - .instance - .concurrent_state_mut(access.as_context_mut().0) - .get_mut(self.id)?; - - let WriteState::HostReady { guest_offset, .. } = &mut transmit.write else { - unreachable!(); - }; - - *guest_offset += count; - - transmit.read = ReadState::GuestReady { - ty, - flat_abi, - options, - address, - count, - handle, - }; - - anyhow::Ok(()) - })?; - - Ok(buffer) - } - - ReadState::HostToHost { accept } => { - let state = accept(&mut UntypedWriteBuffer::new(&mut buffer)).await?; - accessor.with(|mut access| { - self.instance - .concurrent_state_mut(access.as_context_mut().0) - .get_mut(self.id)? - .read = match state { - StreamState::Closed => ReadState::Dropped, - StreamState::Open => ReadState::HostToHost { accept }, - }; - - anyhow::Ok(()) - })?; - Ok(buffer) - } - - _ => unreachable!(), - } +impl<'a, T, B> Destination<'a, T, B> { + /// Return a unique reference to the buffer in which items may be stored. + /// + /// Any items added to this buffer will be delivered to the reader after the + /// `StreamProducer::poll_produce` call to which this `Destination` was + /// passed returns. + /// + /// If items are added to this buffer _and_ written via a `DirectDestination` + /// view of `self`, then the items in the buffer will be delivered after the + /// ones written using `DirectDestination`. + pub fn buffer(&'a mut self) -> &'a mut B { + self.buffer } /// Return the remaining number of items the current read has capacity to @@ -362,6 +240,13 @@ impl Destination { /// /// This will return `Some(_)` if the reader is a guest; it will return /// `None` if the reader is the host. + /// + /// Note that, if this returns `None(0)`, the producer must still attempt to + /// produce at least one item if the value of `finish` passed to + /// `StreamProducer::poll_produce` is false. In that case, the reader is + /// effectively asking when the producer will be able to produce items + /// without blocking (or reach a terminal state such as end-of-stream), + /// meaning the next non-zero read must complete without blocking. pub fn remaining(&self, mut store: impl AsContextMut) -> Option { let transmit = self .instance @@ -381,12 +266,12 @@ impl Destination { } } -impl Destination { - /// Return a `GuestDestination` view of `self` if the guest is reading. - pub fn as_guest_destination<'a, D>( +impl<'a, B> Destination<'a, u8, B> { + /// Return a `DirectDestination` view of `self` if the guest is reading. + pub fn as_direct_destination( &'a mut self, store: StoreContextMut<'a, D>, - ) -> Option> { + ) -> Option> { if let ReadState::GuestReady { .. } = self .instance .concurrent_state_mut(store.0) @@ -394,7 +279,7 @@ impl Destination { .unwrap() .read { - Some(GuestDestination { + Some(DirectDestination { instance: self.instance, id: self.id, store, @@ -405,16 +290,16 @@ impl Destination { } } -/// Represents a guest read from a `stream`, providing direct access to the -/// guest's buffer. -pub struct GuestDestination<'a, D: 'static> { +/// Represents a read from a `stream`, providing direct access to the +/// writer's buffer. +pub struct DirectDestination<'a, D: 'static> { instance: Instance, id: TableId, store: StoreContextMut<'a, D>, } -impl GuestDestination<'_, D> { - /// Provide direct access to the guest's buffer. +impl DirectDestination<'_, D> { + /// Provide direct access to the writer's buffer. pub fn remaining(&mut self) -> &mut [u8] { let transmit = self .instance @@ -443,7 +328,7 @@ impl GuestDestination<'_, D> { .unwrap() } - /// Mark the specified number of bytes as written to the guest's buffer. + /// Mark the specified number of bytes as written to the writer's buffer. /// /// This will panic if the count is larger than the size of the /// buffer returned by `Self::remaining`. @@ -475,35 +360,93 @@ impl GuestDestination<'_, D> { /// Represents the state of a `Stream{Producer,Consumer}`. #[derive(Copy, Clone, Debug)] -pub enum StreamState { - /// The producer or consumer may be able to produce or consume more items, - /// respectively. - Open, - /// The producer or consumer is _not_ able to produce or consume more items, - /// respectively. - Closed, +pub enum StreamResult { + /// The operation completed normally, and the producer or consumer may be + /// able to produce or consume more items, respectively. + Completed, + /// The operation was interrupted (i.e. it wrapped up early after receiving + /// a `finish` parameter value of true in a call to `poll_produce` or + /// `poll_consume`), and the producer or consumer may be able to produce or + /// consume more items, respectively. + Cancelled, + /// The operation completed normally, but the producer or consumer will + /// _not_ able to produce or consume more items, respectively. + Dropped, } - /// Represents the host-owned write end of a stream. -pub trait StreamProducer: Send + 'static { +pub trait StreamProducer: Send + 'static { + /// The payload type of this stream. + type Item; + + /// The `WriteBuffer` type to use when delivering items. + type Buffer: WriteBuffer + Default; + /// Handle a host- or guest-initiated read by delivering zero or more items /// to the specified destination. /// - /// The returned future will resolve to `Ok(StreamState::Closed)` if and - /// when this producer cannot produce any more items. - fn produce( - &mut self, - accessor: &Accessor, - destination: &mut Destination, - ) -> impl Future> + Send; - - /// Handle a guest-initiated zero-length read by returning a future which - /// resolves once this producer is either ready to produce more items or is - /// closed. - fn when_ready( - &mut self, - accessor: &Accessor, - ) -> impl Future> + Send; + /// This will be called whenever the reader starts a read. + /// + /// If the implementation is able to produce one or more items immediately, + /// it should write them to `destination` and return either + /// `Poll::Ready(Ok(StreamResult::Completed))` if it expects to produce more + /// items, or `Poll::Ready(Ok(StreamResult::Dropped))` if it cannot produce + /// any more items. + /// + /// If the implementation is unable to produce any items immediately, but + /// expects to do so later, and `finish` is _false_, it should store the + /// waker from `cx` for later and return `Poll::Pending` without writing + /// anything to `destination`. Later, it should alert the waker when either + /// the items arrive, the stream has ended, or an error occurs. + /// + /// If the implementation is unable to produce any items immediately, but + /// expects to do so later, and `finish` is _true_, it should, if possible, + /// return `Poll::Ready(Ok(StreamResult::Cancelled))` immediately without + /// writing anything to `destination`. However, that might not be possible + /// if an earlier call to `poll_produce` kicked off an asynchronous + /// operation which needs to be completed (and possibly interrupted) + /// gracefully, in which case the implementation may return `Poll::Pending` + /// and later alert the waker as described above. In other words, when + /// `finish` is true, the implementation should prioritize returning a + /// result to the reader (even if no items can be produced) rather than wait + /// indefinitely for at least one item to arrive. + /// + /// In all of the above cases, the implementation may alternatively choose + /// to return `Err(_)` to indicate an unrecoverable error. This will cause + /// the guest (if any) to trap and render the component instance (if any) + /// unusable. The implementation should report errors that _are_ + /// recoverable by other means (e.g. by writing to a `future`) and return + /// `Poll::Ready(Ok(StreamResult::Dropped))`. + /// + /// Note that the implementation should never return `Poll::Pending` after + /// writing one or more items to `destination`; if it does, the caller will + /// trap as if `Err(_)` was returned. Conversely, it should only return + /// `Poll::Ready(Ok(StreamResult::Cancelled))` without writing any items to + /// `destination` if called with `finish` set to true. If it does so when + /// `finish` is false, the caller will trap. Additionally, it should only + /// return `Poll::Ready(Ok(StreamResult::Completed))` after writing at least + /// one item to `destination` if it has capacity to accept that item; + /// otherwise, the caller will trap. + /// + /// If more items are written to `destination` than the reader has immediate + /// capacity to accept, they will be retained in memory by the caller and + /// used to satisify future reads, in which case `poll_produce` will only be + /// called again once all those items have been delivered. This is + /// particularly important for zero-length reads, in which case the + /// implementation is expected to either: + /// + /// 1. Produce at least one item (if possible, and if `finish` is false) so + /// that it is ready to be delivered immediately upon the next + /// non-zero-length read. + /// + /// 2. Produce at least one item the next time `poll_produce` is called with + /// non-zero capacity and `finish` set to false. + fn poll_produce<'a>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut<'a, D>, + destination: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll>; } /// Represents the buffer for a host- or guest-initiated stream write. @@ -598,40 +541,29 @@ impl Source<'_, T> { } impl Source<'_, u8> { - /// Return a `GuestSource` view of `self` if the guest is writing. - pub fn as_guest_source<'a, D>( + /// Return a `DirectSource` view of `self`. + pub fn as_direct_source<'a, D>( &'a mut self, store: StoreContextMut<'a, D>, - ) -> Option> { - if let WriteState::GuestReady { .. } = self - .instance - .concurrent_state_mut(store.0) - .get_mut(self.id) - .unwrap() - .write - { - assert!(self.host_buffer.is_none()); - Some(GuestSource { - instance: self.instance, - id: self.id, - store, - }) - } else { - None + ) -> DirectSource<'a, D> { + DirectSource { + instance: self.instance, + id: self.id, + store, } } } -/// Represents a guest write to a `stream`, providing direct access to the -/// guest's buffer. -pub struct GuestSource<'a, D: 'static> { +/// Represents a write to a `stream`, providing direct access to the +/// writer's buffer. +pub struct DirectSource<'a, D: 'static> { instance: Instance, id: TableId, store: StoreContextMut<'a, D>, } -impl GuestSource<'_, D> { - /// Provide direct access to the guest's buffer. +impl DirectSource<'_, D> { + /// Provide direct access to the writer's buffer. pub fn remaining(&mut self) -> &[u8] { let transmit = self .instance @@ -660,7 +592,7 @@ impl GuestSource<'_, D> { .unwrap() } - /// Mark the specified number of bytes as read from the guest's buffer. + /// Mark the specified number of bytes as read from the writer's buffer. /// /// This will panic if the count is larger than the size of the buffer /// returned by `Self::remaining`. @@ -691,37 +623,121 @@ impl GuestSource<'_, D> { } /// Represents the host-owned read end of a stream. -pub trait StreamConsumer: Send + 'static { +pub trait StreamConsumer: Send + 'static { + /// The payload type of this stream. + type Item; + /// Handle a host- or guest-initiated write by accepting zero or more items /// from the specified source. /// - /// The returned future will resolve to `Ok(StreamState::Closed)` if and - /// when this consumer cannot accept any more items. - fn consume( - &mut self, - accessor: &Accessor, - source: &mut Source, - ) -> impl Future> + Send; - - /// Handle a guest-initiated zero-length write by returning a future which - /// resolves once this consumer is either ready to consume more items or is - /// closed. - fn when_ready( - &mut self, - accessor: &Accessor, - ) -> impl Future> + Send; + /// This will be called whenever the writer starts a write. + /// + /// If the implementation is able to consume one or more items immediately, + /// it should take them from `source` and return either + /// `Poll::Ready(Ok(StreamResult::Completed))` if it expects to be able to consume + /// more items, or `Poll::Ready(Ok(StreamResult::Dropped))` if it cannot + /// accept any more items. Alternatively, it may return `Poll::Pending` to + /// indicate that the caller should delay sending a `COMPLETED` event to the + /// writer until a later call to this function returns `Poll::Ready(_)`. + /// For more about that, see the `Backpressure` section below. + /// + /// If the implementation cannot consume any items immediately and `finish` + /// is _false_, it should store the waker from `cx` for later and return + /// `Poll::Pending` without writing anything to `destination`. Later, it + /// should alert the waker when either (1) the items arrive, (2) the stream + /// has ended, or (3) an error occurs. + /// + /// If the implementation cannot consume any items immediately and `finish` + /// is _true_, it should, if possible, return + /// `Poll::Ready(Ok(StreamResult::Cancelled))` immediately without taking + /// anything from `source`. However, that might not be possible if an + /// earlier call to `poll_consume` kicked off an asynchronous operation + /// which needs to be completed (and possibly interrupted) gracefully, in + /// which case the implementation may return `Poll::Pending` and later alert + /// the waker as described above. In other words, when `finish` is true, + /// the implementation should prioritize returning a result to the reader + /// (even if no items can be consumed) rather than wait indefinitely for at + /// capacity to free up. + /// + /// In all of the above cases, the implementation may alternatively choose + /// to return `Err(_)` to indicate an unrecoverable error. This will cause + /// the guest (if any) to trap and render the component instance (if any) + /// unusable. The implementation should report errors that _are_ + /// recoverable by other means (e.g. by writing to a `future`) and return + /// `Poll::Ready(Ok(StreamResult::Dropped))`. + /// + /// Note that the implementation should only return + /// `Poll::Ready(Ok(StreamResult::Cancelled))` without having taken any + /// items from `source` if called with `finish` set to true. If it does so + /// when `finish` is false, the caller will trap. Additionally, it should + /// only return `Poll::Ready(Ok(StreamResult::Completed))` after taking at + /// least one item from `source` if there is an item available; otherwise, + /// the caller will trap. If `poll_consume` is called with no items in + /// `source`, it should only return `Poll::Ready(_)` once it is able to + /// accept at least one item during the next call to `poll_consume`. + /// + /// Note that any items which the implementation of this trait takes from + /// `source` become the responsibility of that implementation. For that + /// reason, an implementation which forwards items to an upstream sink + /// should reserve capacity in that sink before taking items out of + /// `source`, if possible. Alternatively, it might buffer items which can't + /// be forwarded immediately and send them once capacity is freed up. + /// + /// ## Backpressure + /// + /// As mentioned above, an implementation might choose to return + /// `Poll::Pending` after taking items from `source`, which tells the caller + /// to delay sending a `COMPLETED` event to the writer. This can be used as + /// a form of backpressure when the items are forwarded to an upstream sink + /// asynchronously. Note, however, that it's not possible to "put back" + /// items into `source` once they've been taken out, so if the upstream sink + /// is unable to accept all the items, that cannot be communicated to the + /// writer at this level of abstraction. Just as with application-specific, + /// recoverable errors, information about which items could be forwarded and + /// which could not must be communicated out-of-band, e.g. by writing to an + /// application-specific `future`. + /// + /// Similarly, if the writer cancels the write after items have been taken + /// from `source` but before the items have all been forwarded to an + /// upstream sink, `poll_consume` will be called with `finish` set to true, + /// and the implementation may either: + /// + /// - Interrupt the forwarding process gracefully. This may be preferrable + /// if there is an out-of-band channel for communicating to the writer how + /// many items were forwarded before being interrupted. + /// + /// - Allow the forwarding to complete without interrupting it. This is + /// usually preferable if there's no out-of-band channel for reporting back + /// to the writer how many items were forwarded. + fn poll_consume( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut, + source: &mut Source, + finish: bool, + ) -> Poll>; } /// Represents a host-owned write end of a future. -pub trait FutureProducer: Send + 'static { +pub trait FutureProducer: Send + 'static { + /// The payload type of this future. + type Item; + /// Handle a host- or guest-initiated read by producing a value. - fn produce(self, accessor: &Accessor) -> impl Future> + Send; + fn produce(self, accessor: &Accessor) -> impl Future> + Send; } /// Represents a host-owned read end of a future. -pub trait FutureConsumer: Send + 'static { +pub trait FutureConsumer: Send + 'static { + /// The payload type of this future. + type Item; + /// Handle a host- or guest-initiated write by consuming a value. - fn consume(self, accessor: &Accessor, value: T) -> impl Future> + Send; + fn consume( + self, + accessor: &Accessor, + value: Self::Item, + ) -> impl Future> + Send; } /// Represents the readable end of a Component Model `future`. @@ -740,33 +756,66 @@ impl FutureReader { /// Create a new future with the specified producer. pub fn new( instance: Instance, - store: S, - producer: impl FutureProducer, + mut store: S, + producer: impl FutureProducer, ) -> Self where T: func::Lower + func::Lift + Send + Sync + 'static, { - struct Producer

(Option

); - - impl> StreamProducer for Producer

{ - async fn produce( - &mut self, - accessor: &Accessor, - destination: &mut Destination, - ) -> Result { - let value = self.0.take().unwrap().produce(accessor).await?; - let value = destination.write(accessor, Some(value)).await?; - assert!(value.is_none()); - Ok(StreamState::Open) - } + struct Producer(F); - async fn when_ready(&mut self, _: &Accessor) -> Result { - Ok(StreamState::Open) + impl> + Send + 'static> + StreamProducer for Producer + { + type Item = T; + type Buffer = Option; + + fn poll_produce<'a>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut, + destination: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll> { + // SAFETY: This is a standard pin-projection, and we never move + // out of `self`. + let future = unsafe { self.map_unchecked_mut(|v| &mut v.0) }; + + match tls::set(store.0, || future.poll(cx)) { + Poll::Pending => { + if finish { + Poll::Ready(Ok(StreamResult::Cancelled)) + } else { + Poll::Pending + } + } + Poll::Ready(value) => { + *destination.buffer() = Some(value?); + + // Here we return `StreamResult::Completed` even though + // we've produced the last item we'll ever produce. + // That's because the ABI expects + // `ReturnCode::Completed(1)` rather than + // `ReturnCode::Dropped(1)`. In any case, we won't be + // called again since the future will have resolved. + Poll::Ready(Ok(StreamResult::Completed)) + } + } } } + let mut store = store.as_context_mut(); + let token = StoreToken::new(store.as_context_mut()); Self::new_( - instance.new_transmit(store, TransmitKind::Future, Producer(Some(producer))), + instance.new_transmit( + store, + TransmitKind::Future, + Producer(async move { + producer + .produce(&Accessor::new(token, Some(instance))) + .await + }), + ), instance, ) } @@ -780,32 +829,69 @@ impl FutureReader { } /// Set the consumer that accepts the result of this future. - pub fn pipe(self, store: S, consumer: impl FutureConsumer) - where + pub fn pipe( + self, + store: S, + consumer: impl FutureConsumer + Unpin, + ) where T: func::Lift + 'static, { - struct Consumer(Option); + enum Consumer { + Start(C, Instance), + Poll(Pin> + Send>>), + Invalid, + } - impl> StreamConsumer - for Consumer + impl + Unpin> + StreamConsumer for Consumer { - async fn consume( - &mut self, - accessor: &Accessor, - source: &mut Source<'_, T>, - ) -> Result { - let value = &mut None; - accessor.with(|access| source.read(access, value))?; - self.0 - .take() - .unwrap() - .consume(accessor, value.take().unwrap()) - .await?; - Ok(StreamState::Open) - } + type Item = T; + + fn poll_consume( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + mut store: StoreContextMut, + source: &mut Source, + finish: bool, + ) -> Poll> { + let me = self.get_mut(); + + if let Consumer::Start(consumer, instance) = mem::replace(me, Consumer::Invalid) { + let token = StoreToken::new(store.as_context_mut()); + let value = &mut None; + source.read(store.as_context_mut(), value)?; + let value = value.take().unwrap(); + *me = Consumer::Poll(Box::pin(async move { + consumer + .consume(&Accessor::new(token, Some(instance)), value) + .await + })); + } - async fn when_ready(&mut self, _: &Accessor) -> Result { - Ok(StreamState::Open) + let Consumer::Poll(future) = me else { + unreachable!(); + }; + + match tls::set(store.0, || future.as_mut().poll(cx)) { + Poll::Pending => { + if finish { + Poll::Ready(Ok(StreamResult::Cancelled)) + } else { + Poll::Pending + } + } + Poll::Ready(result) => { + result?; + + // Here we return `StreamResult::Completed` even though + // we've consumed the last item we'll ever consume. + // That's because the ABI expects + // `ReturnCode::Completed(1)` rather than + // `ReturnCode::Dropped(1)`. In any case, we won't be + // called again since the future will have resolved. + Poll::Ready(Ok(StreamResult::Completed)) + } + } } } @@ -813,7 +899,7 @@ impl FutureReader { store, self.id, TransmitKind::Future, - Consumer(Some(consumer)), + Consumer::Start(consumer, self.instance), ); } @@ -1074,7 +1160,7 @@ impl StreamReader { pub fn new( instance: Instance, store: S, - producer: impl StreamProducer, + producer: impl StreamProducer, ) -> Self where T: func::Lower + func::Lift + Send + Sync + 'static, @@ -1094,7 +1180,7 @@ impl StreamReader { } /// Set the consumer that accepts the items delivered to this stream. - pub fn pipe(self, store: S, consumer: impl StreamConsumer) + pub fn pipe(self, store: S, consumer: impl StreamConsumer) where T: 'static, { @@ -1525,12 +1611,13 @@ enum WriteState { /// The write end is owned by the host, which is ready to produce items. HostReady { produce: Box< - dyn Fn() -> Pin> + Send + 'static>> + dyn Fn() -> Pin> + Send + 'static>> + Send + Sync, >, guest_offset: usize, - join: Option, + cancel: bool, + cancel_waker: Option, }, /// The write end has been dropped. Dropped, @@ -1563,12 +1650,13 @@ enum ReadState { /// The read end is owned by a host task, and it is ready to consume items. HostReady { consume: Box< - dyn Fn() -> Pin> + Send + 'static>> + dyn Fn() -> Pin> + Send + 'static>> + Send + Sync, >, guest_offset: usize, - join: Option, + cancel: bool, + cancel_waker: Option, }, /// Both the read and write ends are owned by the host. HostToHost { @@ -1576,7 +1664,7 @@ enum ReadState { dyn for<'a> Fn( &'a mut UntypedWriteBuffer<'a>, ) - -> Pin> + Send + 'a>> + -> Pin> + Send + 'a>> + Send + Sync, >, @@ -1597,110 +1685,263 @@ impl fmt::Debug for ReadState { } } -fn return_code(kind: TransmitKind, state: StreamState, guest_offset: usize) -> ReturnCode { +fn return_code(kind: TransmitKind, state: StreamResult, guest_offset: usize) -> ReturnCode { let count = guest_offset.try_into().unwrap(); match state { - StreamState::Closed => ReturnCode::Dropped(count), - StreamState::Open => ReturnCode::completed(kind, count), + StreamResult::Dropped => ReturnCode::Dropped(count), + StreamResult::Completed => ReturnCode::completed(kind, count), + StreamResult::Cancelled => ReturnCode::Cancelled(count), } } impl Instance { - fn new_transmit( + fn new_transmit>( self, mut store: S, kind: TransmitKind, - producer: impl StreamProducer, - ) -> TableId { + producer: P, + ) -> TableId + where + P::Item: func::Lower, + { let mut store = store.as_context_mut(); let token = StoreToken::new(store.as_context_mut()); let state = self.concurrent_state_mut(store.0); let (_, read) = state.new_transmit().unwrap(); - let producer = Arc::new(Mutex::new(Some(producer))); + let producer = Arc::new(Mutex::new(Some((Box::pin(producer), P::Buffer::default())))); let id = state.get(read).unwrap().state; let produce = Box::new(move || { let producer = producer.clone(); async move { - let zero_length_read = tls::get(|store| { - anyhow::Ok(matches!( - self.concurrent_state_mut(store).get(id)?.read, - ReadState::GuestReady { count: 0, .. } - )) - })?; - - let mut mine = producer.lock().unwrap().take().unwrap(); - let accessor = &Accessor::new(token, Some(self)); - let result = if zero_length_read { - mine.when_ready(accessor).await + let (mut mine, mut buffer) = producer.lock().unwrap().take().unwrap(); + + let (result, cancelled) = if buffer.remaining().is_empty() { + future::poll_fn(|cx| { + tls::get(|store| { + let &WriteState::HostReady { cancel, .. } = + &self.concurrent_state_mut(store).get_mut(id).unwrap().write + else { + unreachable!(); + }; + + let poll = mine.as_mut().poll_produce( + cx, + token.as_context_mut(store), + &mut Destination { + instance: self, + id, + buffer: &mut buffer, + _phantom: PhantomData, + }, + cancel, + ); + + { + let WriteState::HostReady { + guest_offset, + cancel, + cancel_waker, + .. + } = &mut self + .concurrent_state_mut(store) + .get_mut(id) + .unwrap() + .write + else { + unreachable!(); + }; + + if let Poll::Pending = &poll { + if !buffer.remaining().is_empty() || *guest_offset > 0 { + return Poll::Ready(Err(anyhow!( + "StreamProducer::poll_produce returned Poll::Pending \ + after producing at least one item" + ))); + } + + *cancel_waker = Some(cx.waker().clone()); + } else { + *cancel_waker = None; + *cancel = false; + } + } + + poll.map(|v| v.map(|result| (result, cancel))) + }) + }) + .await? } else { - mine.produce( - accessor, - &mut Destination { - instance: self, - id, - kind, - _phantom: PhantomData, + (StreamResult::Completed, false) + }; + + let (guest_offset, count) = tls::get(|store| { + let transmit = self.concurrent_state_mut(store).get_mut(id).unwrap(); + ( + match &transmit.write { + &WriteState::HostReady { guest_offset, .. } => guest_offset, + _ => unreachable!(), + }, + match &transmit.read { + &ReadState::GuestReady { count, .. } => count, + ReadState::HostToHost { .. } => 1, + _ => unreachable!(), }, ) - .await - }; - *producer.lock().unwrap() = Some(mine); - result + }); + + match result { + StreamResult::Completed => { + if count > 1 && buffer.remaining().is_empty() && guest_offset == 0 { + bail!( + "StreamProducer::poll_produce returned StreamResult::Completed \ + without producing any items" + ); + } + } + StreamResult::Cancelled => { + if !cancelled { + bail!( + "StreamProducer::poll_produce returned StreamResult::Cancelled \ + without being given a `finish` parameter value of true" + ); + } + } + StreamResult::Dropped => {} + } + + let write = !buffer.remaining().is_empty(); + + *producer.lock().unwrap() = Some((mine, buffer)); + + if write { + self.write(token, id, producer, kind).await?; + } + + Ok(result) } .boxed() }); state.get_mut(id).unwrap().write = WriteState::HostReady { produce, guest_offset: 0, - join: None, + cancel: false, + cancel_waker: None, }; read } - fn set_consumer( + fn set_consumer>( self, mut store: S, id: TableId, kind: TransmitKind, - consumer: impl StreamConsumer, + consumer: C, ) { let mut store = store.as_context_mut(); let token = StoreToken::new(store.as_context_mut()); let state = self.concurrent_state_mut(store.0); let id = state.get(id).unwrap().state; let transmit = state.get_mut(id).unwrap(); - let consumer = Arc::new(Mutex::new(Some(consumer))); - let consume = { + let consumer = Arc::new(Mutex::new(Some(Box::pin(consumer)))); + let consume_with_buffer = { let consumer = consumer.clone(); - Box::new(move || { - let consumer = consumer.clone(); - async move { - let zero_length_write = tls::get(|store| { - anyhow::Ok(matches!( - self.concurrent_state_mut(store).get(id)?.write, - WriteState::GuestReady { count: 0, .. } - )) - })?; - - let mut mine = consumer.lock().unwrap().take().unwrap(); - let accessor = &Accessor::new(token, Some(self)); - let result = if zero_length_write { - mine.when_ready(accessor).await - } else { - mine.consume( - accessor, + async move |mut host_buffer: Option<&mut dyn WriteBuffer>| { + let mut mine = consumer.lock().unwrap().take().unwrap(); + + let host_buffer_remaining_before = + host_buffer.as_deref_mut().map(|v| v.remaining().len()); + + let (result, cancelled) = future::poll_fn(|cx| { + tls::get(|store| { + let cancel = + match &self.concurrent_state_mut(store).get_mut(id).unwrap().read { + &ReadState::HostReady { cancel, .. } => cancel, + ReadState::Open => false, + _ => unreachable!(), + }; + + let poll = mine.as_mut().poll_consume( + cx, + token.as_context_mut(store), &mut Source { instance: self, id, - host_buffer: None, + host_buffer: host_buffer.as_deref_mut(), }, - ) - .await - }; - *consumer.lock().unwrap() = Some(mine); - result + cancel, + ); + + if let ReadState::HostReady { + cancel_waker, + cancel, + .. + } = &mut self.concurrent_state_mut(store).get_mut(id).unwrap().read + { + if let Poll::Pending = &poll { + *cancel_waker = Some(cx.waker().clone()); + } else { + *cancel_waker = None; + *cancel = false; + } + } + + poll.map(|v| v.map(|result| (result, cancel))) + }) + }) + .await?; + + let (guest_offset, count) = tls::get(|store| { + let transmit = self.concurrent_state_mut(store).get_mut(id).unwrap(); + ( + match &transmit.read { + &ReadState::HostReady { guest_offset, .. } => guest_offset, + ReadState::Open => 0, + _ => unreachable!(), + }, + match &transmit.write { + &WriteState::GuestReady { count, .. } => count, + WriteState::HostReady { .. } => host_buffer_remaining_before.unwrap(), + _ => unreachable!(), + }, + ) + }); + + match result { + StreamResult::Completed => { + if count > 0 + && guest_offset == 0 + && host_buffer_remaining_before + .zip(host_buffer.map(|v| v.remaining().len())) + .map(|(before, after)| before == after) + .unwrap_or(false) + { + bail!( + "StreamConsumer::poll_consume returned StreamResult::Completed \ + without consuming any items" + ); + } + } + StreamResult::Cancelled => { + if !cancelled { + bail!( + "StreamConsumer::poll_consume returned StreamResult::Cancelled \ + without being given a `finish` parameter value of true" + ); + } + } + StreamResult::Dropped => {} } - .boxed() + + *consumer.lock().unwrap() = Some(mine); + + Ok(result) + } + }; + let consume = { + let consume = consume_with_buffer.clone(); + Box::new(move || { + let consume = consume.clone(); + async move { consume(None).await }.boxed() }) }; @@ -1709,7 +1950,8 @@ impl Instance { transmit.read = ReadState::HostReady { consume, guest_offset: 0, - join: None, + cancel: false, + cancel_waker: None, }; } WriteState::GuestReady { .. } => { @@ -1717,36 +1959,28 @@ impl Instance { transmit.read = ReadState::HostReady { consume, guest_offset: 0, - join: None, + cancel: false, + cancel_waker: None, }; - self.pipe_from_guest(store, kind, id, future).unwrap(); + self.pipe_from_guest(store, kind, id, future); } WriteState::HostReady { .. } => { - let WriteState::HostReady { produce, .. } = - mem::replace(&mut transmit.write, WriteState::Open) - else { + let WriteState::HostReady { produce, .. } = mem::replace( + &mut transmit.write, + WriteState::HostReady { + produce: Box::new(|| unreachable!()), + guest_offset: 0, + cancel: false, + cancel_waker: None, + }, + ) else { unreachable!(); }; transmit.read = ReadState::HostToHost { accept: Box::new(move |input| { - let consumer = consumer.clone(); - async move { - let mut mine = consumer.lock().unwrap().take().unwrap(); - let result = mine - .consume( - &Accessor::new(token, Some(self)), - &mut Source { - instance: self, - id, - host_buffer: Some(input.get_mut::()), - }, - ) - .await; - *consumer.lock().unwrap() = Some(mine); - result - } - .boxed() + let consume = consume_with_buffer.clone(); + async move { consume(Some(input.get_mut::())).await }.boxed() }), }; @@ -1762,8 +1996,8 @@ impl Instance { } match produce().await? { - StreamState::Open => {} - StreamState::Closed => break Ok(()), + StreamResult::Completed | StreamResult::Cancelled => {} + StreamResult::Dropped => break Ok(()), } if let TransmitKind::Future = kind { @@ -1782,14 +2016,148 @@ impl Instance { } } + async fn write>( + self, + token: StoreToken, + id: TableId, + pair: Arc>>, + kind: TransmitKind, + ) -> Result<()> { + let (read, guest_offset) = tls::get(|store| { + let transmit = self.concurrent_state_mut(store).get_mut(id)?; + + let guest_offset = if let &WriteState::HostReady { guest_offset, .. } = &transmit.write + { + Some(guest_offset) + } else { + None + }; + + anyhow::Ok(( + mem::replace(&mut transmit.read, ReadState::Open), + guest_offset, + )) + })?; + + match read { + ReadState::GuestReady { + ty, + flat_abi, + options, + address, + count, + handle, + } => { + let guest_offset = guest_offset.unwrap(); + + if let TransmitKind::Future = kind { + tls::get(|store| { + self.concurrent_state_mut(store).get_mut(id)?.done = true; + anyhow::Ok(()) + })?; + } + + let old_remaining = pair.lock().unwrap().as_mut().unwrap().1.remaining().len(); + let accept = { + let pair = pair.clone(); + move |mut store: StoreContextMut| { + lower::( + store.as_context_mut(), + self, + &options, + ty, + address + (T::SIZE32 * guest_offset), + count - guest_offset, + &mut pair.lock().unwrap().as_mut().unwrap().1, + )?; + anyhow::Ok(()) + } + }; + + if guest_offset < count { + if T::MAY_REQUIRE_REALLOC { + // For payloads which may require a realloc call, use a + // oneshot::channel and background task. This is + // necessary because calling the guest while there are + // host embedder frames on the stack is unsound. + let (tx, rx) = oneshot::channel(); + tls::get(move |store| { + self.concurrent_state_mut(store).push_high_priority( + WorkItem::WorkerFunction(Mutex::new(Box::new(move |store, _| { + _ = tx.send(accept(token.as_context_mut(store))?); + Ok(()) + }))), + ) + }); + rx.await? + } else { + // Optimize flat payloads (i.e. those which do not + // require calling the guest's realloc function) by + // lowering directly instead of using a oneshot::channel + // and background task. + tls::get(|store| accept(token.as_context_mut(store)))? + }; + } + + tls::get(|store| { + let count = + old_remaining - pair.lock().unwrap().as_mut().unwrap().1.remaining().len(); + + let transmit = self.concurrent_state_mut(store).get_mut(id)?; + + let WriteState::HostReady { guest_offset, .. } = &mut transmit.write else { + unreachable!(); + }; + + *guest_offset += count; + + transmit.read = ReadState::GuestReady { + ty, + flat_abi, + options, + address, + count, + handle, + }; + + anyhow::Ok(()) + })?; + + Ok(()) + } + + ReadState::HostToHost { accept } => { + let (mine, mut buffer) = pair.lock().unwrap().take().unwrap(); + + let state = accept(&mut UntypedWriteBuffer::new(&mut buffer)).await?; + + *pair.lock().unwrap() = Some((mine, buffer)); + + tls::get(|store| { + self.concurrent_state_mut(store).get_mut(id)?.read = match state { + StreamResult::Dropped => ReadState::Dropped, + StreamResult::Completed | StreamResult::Cancelled => { + ReadState::HostToHost { accept } + } + }; + + anyhow::Ok(()) + })?; + Ok(()) + } + + _ => unreachable!(), + } + } + fn pipe_from_guest( self, mut store: impl AsContextMut, kind: TransmitKind, id: TableId, - future: Pin> + Send + 'static>>, - ) -> Result<()> { - let (join, future) = JoinHandle::run(async move { + future: Pin> + Send + 'static>>, + ) { + let future = async move { let stream_state = future.await?; tls::get(|store| { let state = self.concurrent_state_mut(store); @@ -1804,11 +2172,12 @@ impl Instance { }; let code = return_code(kind, stream_state, guest_offset); transmit.read = match stream_state { - StreamState::Closed => ReadState::Dropped, - StreamState::Open => ReadState::HostReady { + StreamResult::Dropped => ReadState::Dropped, + StreamResult::Completed | StreamResult::Cancelled => ReadState::HostReady { consume, guest_offset: 0, - join: None, + cancel: false, + cancel_waker: None, }, }; let WriteState::GuestReady { ty, handle, .. } = @@ -1819,17 +2188,10 @@ impl Instance { state.send_write_result(ty, id, handle, code)?; Ok(()) }) - }); - let state = self.concurrent_state_mut(store.as_context_mut().0); - state.push_future(future.map(|result| result.unwrap_or(Ok(()))).boxed()); - let ReadState::HostReady { - join: state_join, .. - } = &mut state.get_mut(id)?.read - else { - unreachable!() }; - *state_join = Some(join); - Ok(()) + + self.concurrent_state_mut(store.as_context_mut().0) + .push_future(future.boxed()); } fn pipe_to_guest( @@ -1837,9 +2199,9 @@ impl Instance { mut store: impl AsContextMut, kind: TransmitKind, id: TableId, - future: Pin> + Send + 'static>>, - ) -> Result<()> { - let (join, future) = JoinHandle::run(async move { + future: Pin> + Send + 'static>>, + ) { + let future = async move { let stream_state = future.await?; tls::get(|store| { let state = self.concurrent_state_mut(store); @@ -1854,11 +2216,12 @@ impl Instance { }; let code = return_code(kind, stream_state, guest_offset); transmit.write = match stream_state { - StreamState::Closed => WriteState::Dropped, - StreamState::Open => WriteState::HostReady { + StreamResult::Dropped => WriteState::Dropped, + StreamResult::Completed | StreamResult::Cancelled => WriteState::HostReady { produce, guest_offset: 0, - join: None, + cancel: false, + cancel_waker: None, }, }; let ReadState::GuestReady { ty, handle, .. } = @@ -1869,17 +2232,10 @@ impl Instance { state.send_read_result(ty, id, handle, code)?; Ok(()) }) - }); - let state = self.concurrent_state_mut(store.as_context_mut().0); - state.push_future(future.map(|result| result.unwrap_or(Ok(()))).boxed()); - let WriteState::HostReady { - join: state_join, .. - } = &mut state.get_mut(id)?.write - else { - unreachable!() }; - *state_join = Some(join); - Ok(()) + + self.concurrent_state_mut(store.as_context_mut().0) + .push_future(future.boxed()); } /// Drop the read end of a stream or future read from the host. @@ -2430,9 +2786,11 @@ impl Instance { ReadState::HostReady { consume, guest_offset, - join, + cancel, + cancel_waker, } => { - assert!(join.is_none()); + assert!(cancel_waker.is_none()); + assert!(!cancel); assert_eq!(0, guest_offset); if let TransmitIndex::Future(_) = ty { @@ -2443,7 +2801,8 @@ impl Instance { transmit.read = ReadState::HostReady { consume, guest_offset: 0, - join: None, + cancel: false, + cancel_waker: None, }; set_guest_ready(concurrent_state)?; let poll = self.set_tls(store.0, || { @@ -2468,7 +2827,7 @@ impl Instance { ty.kind(), transmit_id, future, - )?; + ); ReturnCode::Blocked } } @@ -2656,9 +3015,11 @@ impl Instance { WriteState::HostReady { produce, guest_offset, - join, + cancel, + cancel_waker, } => { - assert!(join.is_none()); + assert!(cancel_waker.is_none()); + assert!(!cancel); assert_eq!(0, guest_offset); if let TransmitIndex::Future(_) = ty { @@ -2669,7 +3030,8 @@ impl Instance { transmit.write = WriteState::HostReady { produce, guest_offset: 0, - join: None, + cancel: false, + cancel_waker: None, }; set_guest_ready(concurrent_state)?; let poll = self.set_tls(store.0, || { @@ -2689,7 +3051,7 @@ impl Instance { code } Poll::Pending => { - self.pipe_to_guest(store.as_context_mut(), ty.kind(), transmit_id, future)?; + self.pipe_to_guest(store.as_context_mut(), ty.kind(), transmit_id, future); ReturnCode::Blocked } } @@ -3312,13 +3674,16 @@ impl ConcurrentState { _ => unreachable!(), } } else if let ReadState::HostReady { - join, guest_offset, .. + cancel, + cancel_waker, + .. } = &mut self.get_mut(transmit_id)?.read { - if let Some(join) = join.take() { - join.abort(); + *cancel = true; + if let Some(waker) = cancel_waker.take() { + waker.wake() } - ReturnCode::Cancelled(u32::try_from(mem::replace(guest_offset, 0)).unwrap()) + ReturnCode::Blocked } else { ReturnCode::Cancelled(0) }; @@ -3364,13 +3729,16 @@ impl ConcurrentState { _ => unreachable!(), } } else if let WriteState::HostReady { - join, guest_offset, .. + cancel, + cancel_waker, + .. } = &mut self.get_mut(transmit_id)?.write { - if let Some(join) = join.take() { - join.abort(); + *cancel = true; + if let Some(waker) = cancel_waker.take() { + waker.wake() } - ReturnCode::Cancelled(u32::try_from(mem::replace(guest_offset, 0)).unwrap()) + ReturnCode::Blocked } else { ReturnCode::Cancelled(0) }; diff --git a/crates/wasmtime/src/runtime/component/mod.rs b/crates/wasmtime/src/runtime/component/mod.rs index fd5afa11767a..35f1a27880f6 100644 --- a/crates/wasmtime/src/runtime/component/mod.rs +++ b/crates/wasmtime/src/runtime/component/mod.rs @@ -119,10 +119,10 @@ mod values; pub use self::component::{Component, ComponentExportIndex}; #[cfg(feature = "component-model-async")] pub use self::concurrent::{ - Access, Accessor, AccessorTask, AsAccessor, Destination, ErrorContext, FutureConsumer, - FutureProducer, FutureReader, GuardedFutureReader, GuardedStreamReader, GuestDestination, - GuestSource, JoinHandle, ReadBuffer, Source, StreamConsumer, StreamProducer, StreamReader, - StreamState, VMComponentAsyncStore, VecBuffer, WriteBuffer, + Access, Accessor, AccessorTask, AsAccessor, Destination, DirectDestination, DirectSource, + ErrorContext, FutureConsumer, FutureProducer, FutureReader, GuardedFutureReader, + GuardedStreamReader, JoinHandle, ReadBuffer, Source, StreamConsumer, StreamProducer, + StreamReader, StreamResult, VMComponentAsyncStore, VecBuffer, WriteBuffer, }; pub use self::func::{ ComponentNamedList, ComponentType, Func, Lift, Lower, TypedFunc, WasmList, WasmStr, From ca8a435b3c8f63652ba39fb9c86afad0cc1b6bcb Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Wed, 3 Sep 2025 18:50:53 +0200 Subject: [PATCH 18/32] being integration of new API Signed-off-by: Roman Volosatovs --- crates/wasi/src/p3/mod.rs | 31 ++++--- crates/wasi/src/p3/sockets/host/types/tcp.rs | 97 ++++++++------------ 2 files changed, 56 insertions(+), 72 deletions(-) diff --git a/crates/wasi/src/p3/mod.rs b/crates/wasi/src/p3/mod.rs index a92d418085b4..a15df48e1360 100644 --- a/crates/wasi/src/p3/mod.rs +++ b/crates/wasi/src/p3/mod.rs @@ -19,11 +19,13 @@ use crate::WasiView; use crate::p3::bindings::LinkOptions; use anyhow::Context as _; use bytes::BytesMut; +use core::pin::Pin; +use core::task::{Context, Poll}; use std::io::Cursor; use tokio::sync::oneshot; use wasmtime::AsContextMut as _; use wasmtime::component::{ - Accessor, Destination, FutureProducer, Linker, StreamProducer, StreamState, + Accessor, Destination, FutureProducer, Linker, StreamProducer, StreamResult, StreamState, }; // Default buffer capacity to use for reads of byte-sized values. @@ -34,26 +36,29 @@ const MAX_BUFFER_CAPACITY: usize = 4 * DEFAULT_BUFFER_CAPACITY; struct StreamEmptyProducer; -impl StreamProducer for StreamEmptyProducer { - async fn produce( - &mut self, - _: &Accessor, - _: &mut Destination, - ) -> wasmtime::Result { - Ok(StreamState::Closed) - } +impl StreamProducer for StreamEmptyProducer { + type Item = T; + type Buffer = Option; - async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { + fn poll_produce<'a>( + self: Pin<&mut Self>, + _: &mut Context<'_>, + _: StoreContextMut<'a, D>, + _: &'a mut Destination<'a, Self::Item, Self::Buffer>, + _: bool, + ) -> Poll> { Ok(StreamState::Closed) } } struct FutureReadyProducer(T); -impl FutureProducer for FutureReadyProducer +impl FutureProducer for FutureReadyProducer where T: Send + 'static, { + type Item = T; + async fn produce(self, _: &Accessor) -> wasmtime::Result { Ok(self.0) } @@ -61,10 +66,12 @@ where struct FutureOneshotProducer(oneshot::Receiver); -impl FutureProducer for FutureOneshotProducer +impl FutureProducer for FutureOneshotProducer where T: Send + 'static, { + type Item = T; + async fn produce(self, _: &Accessor) -> wasmtime::Result { self.0.await.context("oneshot sender dropped") } diff --git a/crates/wasi/src/p3/sockets/host/types/tcp.rs b/crates/wasi/src/p3/sockets/host/types/tcp.rs index 54525df0c0b8..e06e4fb64c96 100644 --- a/crates/wasi/src/p3/sockets/host/types/tcp.rs +++ b/crates/wasi/src/p3/sockets/host/types/tcp.rs @@ -9,19 +9,21 @@ use crate::p3::{ StreamEmptyProducer, write_buffered_bytes, }; use crate::sockets::{NonInheritedOptions, SocketAddrUse, SocketAddressFamily, WasiSocketsCtxView}; -use anyhow::Context; +use anyhow::Context as _; use bytes::BytesMut; +use core::pin::Pin; +use core::task::{Context, Poll, ready}; use io_lifetimes::AsSocketlike as _; use std::io::Cursor; use std::net::{Shutdown, SocketAddr}; use std::sync::Arc; use tokio::net::{TcpListener, TcpStream}; use tokio::sync::oneshot; -use wasmtime::AsContextMut as _; use wasmtime::component::{ Accessor, Destination, FutureReader, Resource, ResourceTable, Source, StreamConsumer, - StreamProducer, StreamReader, StreamState, + StreamProducer, StreamReader, StreamResult, StreamState, }; +use wasmtime::{AsContextMut as _, StoreContextMut}; fn get_socket<'a>( table: &'a ResourceTable, @@ -58,11 +60,14 @@ impl ListenStreamProducer { } } -impl StreamProducer> for ListenStreamProducer +impl StreamProducer for ListenStreamProducer where D: 'static, { - async fn produce( + type Item = Resource; + type Buffer = Option; + + fn poll_produce( &mut self, store: &Accessor, dst: &mut Destination>, @@ -129,11 +134,16 @@ impl ReceiveStreamProducer { } } -impl StreamProducer for ReceiveStreamProducer { - async fn produce( +impl StreamProducer for ReceiveStreamProducer { + type Item = u8; + type Buffer = Cursor; + + fn poll_produce( &mut self, - store: &Accessor, - dst: &mut Destination, + cx: &mut Context<'_>, + store: StoreContextMut<'a, D>, + dst: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, ) -> wasmtime::Result { if !self.buffer.get_ref().is_empty() { write_buffered_bytes(store, &mut self.buffer, dst).await?; @@ -142,7 +152,7 @@ impl StreamProducer for ReceiveStreamProducer { let res = 'result: loop { match store.with(|mut store| { - if let Some(mut dst) = dst.as_guest_destination(store.as_context_mut()) { + if let Some(mut dst) = dst.as_direct_destination(store.as_context_mut()) { let n = self.stream.try_read(dst.remaining())?; if n > 0 { dst.mark_written(n); @@ -214,51 +224,26 @@ impl SendStreamConsumer { } } -impl StreamConsumer for SendStreamConsumer { - async fn consume( - &mut self, - store: &Accessor, - src: &mut Source<'_, u8>, - ) -> wasmtime::Result { +impl StreamConsumer for SendStreamConsumer { + type Item = u8; + + fn poll_consume( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut, + src: &mut Source, + finish: bool, + ) -> Poll> { + let mut src = src.as_direct_source(&mut store); let res = 'result: loop { - match store.with(|mut store| { - let n = if let Some(mut src) = src.as_guest_source(store.as_context_mut()) { - let n = self.stream.try_write(src.remaining())?; + match self.stream.try_write(src.remaining()) { + Ok(n) => { + debug_assert!(n > 0); src.mark_read(n); - n - } else { - // NOTE: The implementation might want to use Linux SIOCOUTQ ioctl or similar construct - // on other platforms to only read `min(socket_capacity, src.remaining())` and prevent - // short writes - let n = src.remaining(&mut store).min(MAX_BUFFER_CAPACITY); - self.buffer.reserve(n); - if let Err(err) = src.read(&mut store, &mut self.buffer) { - return Ok(Err(err)); - } - self.stream.try_write(&self.buffer)? - }; - debug_assert!(n > 0); - std::io::Result::Ok(Ok(n)) - }) { - Ok(Ok(..)) if self.buffer.is_empty() => return Ok(StreamState::Open), - Ok(Ok(n)) => { - let mut buf = &self.buffer[n..]; - while !buf.is_empty() { - // FIXME: Handle cancellation - if let Err(err) = self.stream.writable().await { - break 'result Err(err.into()); - } - match self.stream.try_write(buf) { - Ok(n) => buf = &buf[n..], - Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => continue, - Err(err) => break 'result Err(err.into()), - } - } - self.buffer.clear(); + Poll::Ready(Ok(StreamResult::Open)) } - Ok(Err(err)) => return Err(err), Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { - if let Err(err) = self.stream.writable().await { + if Err(err) = ready!(Pin::new(&mut self.stream.writable()).poll(cx)) { break 'result Err(err.into()); } } @@ -266,15 +251,7 @@ impl StreamConsumer for SendStreamConsumer { } }; self.close(res); - Ok(StreamState::Closed) - } - - async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { - if let Err(err) = self.stream.writable().await { - self.close(Err(err.into())); - return Ok(StreamState::Closed); - } - Ok(StreamState::Open) + Poll::Ready(Ok(StreamResult::Closed)) } } From 00047c3caa6668775f535523ba6141236ce1edd8 Mon Sep 17 00:00:00 2001 From: Joel Dice Date: Wed, 3 Sep 2025 16:11:08 -0600 Subject: [PATCH 19/32] update wasi/src/p3/filesystem to use new stream API This is totally untested so far; I'll run the tests once we have everything else compiling. Signed-off-by: Joel Dice --- crates/misc/component-async-tests/src/util.rs | 2 +- .../src/bin/p3_sockets_tcp_bind.rs | 1 - crates/wasi/src/p3/filesystem/host.rs | 525 +++++++++--------- crates/wasi/src/p3/mod.rs | 42 +- .../concurrent/futures_and_streams.rs | 29 +- 5 files changed, 279 insertions(+), 320 deletions(-) diff --git a/crates/misc/component-async-tests/src/util.rs b/crates/misc/component-async-tests/src/util.rs index d435bdc3717b..40dc572d5e39 100644 --- a/crates/misc/component-async-tests/src/util.rs +++ b/crates/misc/component-async-tests/src/util.rs @@ -69,7 +69,7 @@ impl + Send + 'static> } } Poll::Ready(Some(item)) => { - *destination.buffer() = Some(item); + destination.set_buffer(Some(item)); Poll::Ready(Ok(StreamResult::Completed)) } Poll::Ready(None) => Poll::Ready(Ok(StreamResult::Dropped)), diff --git a/crates/test-programs/src/bin/p3_sockets_tcp_bind.rs b/crates/test-programs/src/bin/p3_sockets_tcp_bind.rs index ca14301264a5..8fec2e69c183 100644 --- a/crates/test-programs/src/bin/p3_sockets_tcp_bind.rs +++ b/crates/test-programs/src/bin/p3_sockets_tcp_bind.rs @@ -4,7 +4,6 @@ use test_programs::p3::wasi::sockets::types::{ ErrorCode, IpAddress, IpAddressFamily, IpSocketAddress, TcpSocket, }; use test_programs::p3::wit_stream; -use wit_bindgen::yield_blocking; struct Component; diff --git a/crates/wasi/src/p3/filesystem/host.rs b/crates/wasi/src/p3/filesystem/host.rs index 30b58242fb3a..21c4bd4ae3f0 100644 --- a/crates/wasi/src/p3/filesystem/host.rs +++ b/crates/wasi/src/p3/filesystem/host.rs @@ -7,19 +7,24 @@ use crate::p3::bindings::filesystem::types::{ use crate::p3::filesystem::{FilesystemError, FilesystemResult, preopens}; use crate::p3::{ DEFAULT_BUFFER_CAPACITY, FutureOneshotProducer, FutureReadyProducer, MAX_BUFFER_CAPACITY, - StreamEmptyProducer, write_buffered_bytes, + StreamEmptyProducer, }; use crate::{DirPerms, FilePerms}; use anyhow::{Context as _, bail}; use bytes::BytesMut; use core::mem; +use futures::FutureExt; use std::io::Cursor; +use std::marker::PhantomData; +use std::pin::Pin; +use std::task::{self, Context, Poll}; use system_interface::fs::FileIoExt as _; use tokio::sync::oneshot; use wasmtime::component::{ Accessor, Destination, FutureReader, Resource, ResourceTable, Source, StreamConsumer, - StreamProducer, StreamReader, StreamState, + StreamProducer, StreamReader, StreamResult, }; +use wasmtime::{AsContextMut as _, StoreContextMut}; fn get_descriptor<'a>( table: &'a ResourceTable, @@ -116,13 +121,7 @@ struct ReadStreamProducer { file: File, offset: u64, result: Option>>, - buffer: Cursor, -} - -impl Drop for ReadStreamProducer { - fn drop(&mut self) { - self.close(Ok(())) - } + future: Option, ErrorCode>> + Send>>>, } impl ReadStreamProducer { @@ -131,77 +130,98 @@ impl ReadStreamProducer { _ = tx.send(res); } } +} - async fn read(&mut self, n: usize) -> StreamState { - let mut buf = mem::take(&mut self.buffer).into_inner(); - buf.resize(n, 0); - let offset = self.offset; - let res = 'result: { - match self - .file - .run_blocking(move |file| { - let n = file.read_at(&mut buf, offset)?; - buf.truncate(n); - std::io::Result::Ok(buf) - }) - .await - { - Ok(buf) if buf.is_empty() => break 'result Ok(()), - Ok(buf) => { - let Ok(n) = buf.len().try_into() else { - break 'result Err(ErrorCode::Overflow); - }; - let Some(n) = offset.checked_add(n) else { - break 'result Err(ErrorCode::Overflow); - }; - self.offset = n; - self.buffer = Cursor::new(buf); - return StreamState::Open; - } - Err(err) => break 'result Err(err.into()), +impl StreamProducer for ReadStreamProducer { + type Item = u8; + type Buffer = Cursor; + + fn poll_produce<'a>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut<'a, D>, + destination: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll> { + let me = self.get_mut(); + if me.future.is_none() { + if finish { + return Poll::Ready(Ok(StreamResult::Cancelled)); } - }; - self.close(res); - StreamState::Closed - } -} -impl StreamProducer for ReadStreamProducer { - async fn produce( - &mut self, - store: &Accessor, - dst: &mut Destination, - ) -> wasmtime::Result { - if !self.buffer.get_ref().is_empty() { - write_buffered_bytes(store, &mut self.buffer, dst).await?; - return Ok(StreamState::Open); + let capacity = destination + .remaining(store) + .unwrap_or(DEFAULT_BUFFER_CAPACITY) + // In the case of small or zero-length reads, we read more than + // was asked for; this will save the runtime from having to + // block or call `poll_produce` on subsequent reads. See the + // documentation for `StreamProducer::poll_produce` for details. + .max(DEFAULT_BUFFER_CAPACITY) + .min(MAX_BUFFER_CAPACITY); + let mut buffer = destination.take_buffer().into_inner(); + buffer.resize(capacity, 0); + let offset = me.offset; + let file = me.file.clone(); + me.future = Some( + async move { + match file + .run_blocking(move |file| { + let n = file.read_at(&mut buffer, offset)?; + buffer.truncate(n); + std::io::Result::Ok(buffer) + }) + .await + { + Ok(buffer) if buffer.is_empty() => Ok(None), + Ok(buffer) => { + let n_u64 = buffer.len().try_into().or(Err(ErrorCode::Overflow))?; + offset.checked_add(n_u64).ok_or(ErrorCode::Overflow)?; + Ok(Some(buffer)) + } + Err(err) => Err(err.into()), + } + } + .boxed(), + ); } - let n = store - .with(|store| dst.remaining(store)) - .unwrap_or(DEFAULT_BUFFER_CAPACITY) - .min(MAX_BUFFER_CAPACITY); - match self.read(n).await { - StreamState::Open => { - write_buffered_bytes(store, &mut self.buffer, dst).await?; - Ok(StreamState::Open) + + let result = match task::ready!(me.future.as_mut().unwrap().as_mut().poll(cx)) { + Ok(Some(buffer)) => { + // We've already checked for overflow inside the future above, + // so no need to do it again here: + me.offset += u64::try_from(buffer.len()).unwrap(); + destination.set_buffer(Cursor::new(buffer)); + StreamResult::Completed } - StreamState::Closed => Ok(StreamState::Closed), - } - } + Ok(None) => { + me.close(Ok(())); + StreamResult::Dropped + } + Err(error) => { + me.close(Err(error)); + StreamResult::Dropped + } + }; - async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { - if !self.buffer.get_ref().is_empty() { - return Ok(StreamState::Open); - } - Ok(self.read(DEFAULT_BUFFER_CAPACITY).await) + me.future = None; + + Poll::Ready(Ok(result)) } } struct DirectoryStreamProducer { dir: Dir, entries: Option, - buffered: Option, result: Option>>, + future: Option< + Pin< + Box< + dyn Future< + Output = Result, ErrorCode>, + > + Send, + >, + >, + >, } impl DirectoryStreamProducer { @@ -210,106 +230,113 @@ impl DirectoryStreamProducer { _ = tx.send(res); } } +} - async fn next(&mut self) -> Option { - let res = 'result: loop { - let mut entries = if let Some(entries) = self.entries.take() { - entries - } else { - // FIXME: Handle cancellation - match self.dir.run_blocking(cap_std::fs::Dir::entries).await { - Ok(entries) => entries, - Err(err) => break 'result Err(err.into()), - } - }; - // FIXME: Handle cancellation - let Some((res, tail)) = self - .dir - .run_blocking(move |_| entries.next().map(|entry| (entry, entries))) - .await - else { - break 'result Ok(()); - }; - self.entries = Some(tail); - let entry = match res { - Ok(entry) => entry, - Err(err) => { - // On windows, filter out files like `C:\DumpStack.log.tmp` which we - // can't get full metadata for. - #[cfg(windows)] - { - use windows_sys::Win32::Foundation::{ - ERROR_ACCESS_DENIED, ERROR_SHARING_VIOLATION, +impl StreamProducer for DirectoryStreamProducer { + type Item = DirectoryEntry; + type Buffer = Option; + + fn poll_produce<'a>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut<'a, D>, + destination: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll> { + let me = self.get_mut(); + if me.future.is_none() { + if finish { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } + + let dir = me.dir.clone(); + let mut entries = me.entries.take(); + me.future = Some( + async move { + loop { + let mut entries = if let Some(entries) = entries.take() { + entries + } else { + // FIXME: Handle cancellation + match dir.run_blocking(cap_std::fs::Dir::entries).await { + Ok(entries) => entries, + Err(err) => break Err(err.into()), + } }; - if err.raw_os_error() == Some(ERROR_SHARING_VIOLATION as i32) - || err.raw_os_error() == Some(ERROR_ACCESS_DENIED as i32) - { - continue; - } + // FIXME: Handle cancellation + let Some((res, tail)) = dir + .run_blocking(move |_| entries.next().map(|entry| (entry, entries))) + .await + else { + break Ok(None); + }; + let entry = match res { + Ok(entry) => entry, + Err(err) => { + // On windows, filter out files like `C:\DumpStack.log.tmp` which we + // can't get full metadata for. + #[cfg(windows)] + { + use windows_sys::Win32::Foundation::{ + ERROR_ACCESS_DENIED, ERROR_SHARING_VIOLATION, + }; + if err.raw_os_error() == Some(ERROR_SHARING_VIOLATION as i32) + || err.raw_os_error() == Some(ERROR_ACCESS_DENIED as i32) + { + continue; + } + } + break Err(err.into()); + } + }; + let meta = match entry.metadata() { + Ok(meta) => meta, + Err(err) => break Err(err.into()), + }; + let Ok(name) = entry.file_name().into_string() else { + break Err(ErrorCode::IllegalByteSequence); + }; + break Ok(Some(( + DirectoryEntry { + type_: meta.file_type().into(), + name, + }, + tail, + ))); } - break 'result Err(err.into()); } - }; - let meta = match entry.metadata() { - Ok(meta) => meta, - Err(err) => break 'result Err(err.into()), - }; - let Ok(name) = entry.file_name().into_string() else { - break 'result Err(ErrorCode::IllegalByteSequence); - }; - // FIXME: Handle cancellation - return Some(DirectoryEntry { - type_: meta.file_type().into(), - name, - }); - }; - self.close(res); - None - } -} + .boxed(), + ); + } -impl StreamProducer for DirectoryStreamProducer { - async fn produce( - &mut self, - store: &Accessor, - dst: &mut Destination, - ) -> wasmtime::Result { - let entry = if let Some(entry) = self.buffered.take() { - entry - } else if let Some(entry) = self.next().await { - entry - } else { - return Ok(StreamState::Closed); + let result = match task::ready!(me.future.as_mut().unwrap().as_mut().poll(cx)) { + Ok(Some((entry, entries))) => { + destination.set_buffer(Some(entry)); + me.entries = Some(entries); + StreamResult::Completed + } + Ok(None) => { + me.close(Ok(())); + StreamResult::Dropped + } + Err(error) => { + me.close(Err(error)); + StreamResult::Dropped + } }; - // FIXME: Handle cancellation - if let Some(_) = dst.write(store, Some(entry)).await? { - bail!("failed to write entry") - } - return Ok(StreamState::Open); - } - async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { - if self.buffered.is_none() { - let Some(entry) = self.next().await else { - return Ok(StreamState::Closed); - }; - self.buffered = Some(entry); - } - Ok(StreamState::Open) + me.future = None; + + Poll::Ready(Ok(result)) } } struct WriteStreamConsumer { file: File, - offset: u64, - result: Option>>, + offset: Option, buffer: BytesMut, -} - -impl Drop for WriteStreamConsumer { - fn drop(&mut self) { - self.close(Ok(())) - } + result: Option>>, + future: Option> + Send>>>, } impl WriteStreamConsumer { @@ -318,128 +345,75 @@ impl WriteStreamConsumer { _ = tx.send(res); } } - - async fn flush(&mut self) -> StreamState { - // FIXME: `mem::take` rather than `clone` when we can ensure cancellation-safety - //let buf = mem::take(&mut self.buffer); - let buf = self.buffer.clone(); - let mut offset = self.offset; - match self - .file - .spawn_blocking(move |file| { - let mut pos = 0; - while pos != buf.len() { - let n = file.write_at(&buf[pos..], offset)?; - pos = pos.saturating_add(n); - let n = n.try_into().or(Err(ErrorCode::Overflow))?; - offset = offset.checked_add(n).ok_or(ErrorCode::Overflow)?; - } - Ok((buf, offset)) - }) - .await - { - Ok((buf, offset)) => { - self.buffer = buf; - self.buffer.clear(); - self.offset = offset; - StreamState::Open - } - Err(err) => { - self.close(Err(err)); - StreamState::Closed - } - } - } } -impl StreamConsumer for WriteStreamConsumer { - async fn consume( - &mut self, - store: &Accessor, - src: &mut Source<'_, u8>, - ) -> wasmtime::Result { - store.with(|mut store| { - let n = src.remaining(&mut store).min(MAX_BUFFER_CAPACITY); - self.buffer.reserve(n); - src.read(&mut store, &mut self.buffer) - })?; - Ok(self.flush().await) - } - - async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { - if !self.buffer.is_empty() { - return Ok(self.flush().await); - } - Ok(StreamState::Open) - } -} - -struct AppendStreamConsumer { - file: File, - result: Option>>, - buffer: BytesMut, -} - -impl Drop for AppendStreamConsumer { - fn drop(&mut self) { - self.close(Ok(())) - } -} +impl StreamConsumer for WriteStreamConsumer { + type Item = u8; + + fn poll_consume( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + mut store: StoreContextMut, + source: &mut Source, + finish: bool, + ) -> Poll> { + let me = self.get_mut(); + if me.future.is_none() { + if finish { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } -impl AppendStreamConsumer { - fn close(&mut self, res: Result<(), ErrorCode>) { - if let Some(tx) = self.result.take() { - _ = tx.send(res); + let offset = me.offset; + let file = me.file.clone(); + let mut buffer = mem::take(&mut me.buffer); + buffer.clear(); + buffer.extend_from_slice(source.as_direct_source(store.as_context_mut()).remaining()); + + me.future = Some( + async move { + file.spawn_blocking(move |file| { + let n = if let Some(offset) = offset { + let n = file.write_at(&buffer, offset)?; + let n_u64 = n.try_into().or(Err(ErrorCode::Overflow))?; + offset.checked_add(n_u64).ok_or(ErrorCode::Overflow)?; + n + } else { + file.append(&buffer)? + }; + Ok((buffer, n)) + }) + .await + } + .boxed(), + ); } - } - async fn flush(&mut self) -> StreamState { - let buf = mem::take(&mut self.buffer); - // FIXME: Handle cancellation - match self - .file - .spawn_blocking(move |file| { - let mut pos = 0; - while pos != buf.len() { - let n = file.append(&buf[pos..])?; - pos = pos.saturating_add(n); + let result = match task::ready!(me.future.as_mut().unwrap().as_mut().poll(cx)) { + Ok((mut buffer, count)) => { + source.as_direct_source(store).mark_read(count); + let result = if count < buffer.len() && finish { + StreamResult::Cancelled + } else { + StreamResult::Completed + }; + if let Some(offset) = me.offset.as_mut() { + // We've already checked for overflow inside the future + // above, so no need to do it again here: + *offset += u64::try_from(count).unwrap(); } - Ok(buf) - }) - .await - { - Ok(buf) => { - self.buffer = buf; - self.buffer.clear(); - StreamState::Open + buffer.clear(); + me.buffer = buffer; + result } - Err(err) => { - self.close(Err(err)); - StreamState::Closed + Err(error) => { + me.close(Err(error)); + StreamResult::Dropped } - } - } -} + }; -impl StreamConsumer for AppendStreamConsumer { - async fn consume( - &mut self, - store: &Accessor, - src: &mut Source<'_, u8>, - ) -> wasmtime::Result { - store.with(|mut store| { - let n = src.remaining(&mut store).min(MAX_BUFFER_CAPACITY); - self.buffer.reserve(n); - src.read(&mut store, &mut self.buffer) - })?; - Ok(self.flush().await) - } + me.future = None; - async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { - if !self.buffer.is_empty() { - return Ok(self.flush().await); - } - Ok(StreamState::Open) + Poll::Ready(Ok(result)) } } @@ -460,7 +434,7 @@ impl types::HostDescriptorWithStore for WasiFilesystem { let file = get_file(store.get().table, &fd)?; if !file.perms.contains(FilePerms::READ) { return Ok(( - StreamReader::new(instance, &mut store, StreamEmptyProducer), + StreamReader::new(instance, &mut store, StreamEmptyProducer(PhantomData)), FutureReader::new( instance, &mut store, @@ -479,7 +453,7 @@ impl types::HostDescriptorWithStore for WasiFilesystem { file, offset, result: Some(result_tx), - buffer: Cursor::default(), + future: None, }, ), FutureReader::new(instance, &mut store, FutureOneshotProducer(result_rx)), @@ -504,9 +478,10 @@ impl types::HostDescriptorWithStore for WasiFilesystem { store, WriteStreamConsumer { file, - offset, - result: Some(result_tx), + offset: Some(offset), buffer: BytesMut::default(), + result: Some(result_tx), + future: None, }, ); FilesystemResult::Ok(()) @@ -532,10 +507,12 @@ impl types::HostDescriptorWithStore for WasiFilesystem { let file = file.clone(); data.pipe( store, - AppendStreamConsumer { + WriteStreamConsumer { file, - result: Some(result_tx), + offset: None, buffer: BytesMut::default(), + result: Some(result_tx), + future: None, }, ); FilesystemResult::Ok(()) @@ -621,7 +598,7 @@ impl types::HostDescriptorWithStore for WasiFilesystem { let dir = get_dir(store.get().table, &fd)?; if !dir.perms.contains(DirPerms::READ) { return Ok(( - StreamReader::new(instance, &mut store, StreamEmptyProducer), + StreamReader::new(instance, &mut store, StreamEmptyProducer(PhantomData)), FutureReader::new( instance, &mut store, @@ -639,8 +616,8 @@ impl types::HostDescriptorWithStore for WasiFilesystem { DirectoryStreamProducer { dir, entries: None, - buffered: None, result: Some(result_tx), + future: None, }, ), FutureReader::new(instance, &mut store, FutureOneshotProducer(result_rx)), diff --git a/crates/wasi/src/p3/mod.rs b/crates/wasi/src/p3/mod.rs index a15df48e1360..0768d89fccbd 100644 --- a/crates/wasi/src/p3/mod.rs +++ b/crates/wasi/src/p3/mod.rs @@ -22,11 +22,12 @@ use bytes::BytesMut; use core::pin::Pin; use core::task::{Context, Poll}; use std::io::Cursor; +use std::marker::PhantomData; use tokio::sync::oneshot; -use wasmtime::AsContextMut as _; use wasmtime::component::{ - Accessor, Destination, FutureProducer, Linker, StreamProducer, StreamResult, StreamState, + Accessor, Destination, FutureProducer, Linker, StreamProducer, StreamResult, }; +use wasmtime::{AsContextMut as _, StoreContextMut}; // Default buffer capacity to use for reads of byte-sized values. const DEFAULT_BUFFER_CAPACITY: usize = 8192; @@ -34,9 +35,9 @@ const DEFAULT_BUFFER_CAPACITY: usize = 8192; // Maximum buffer capacity to use for reads of byte-sized values. const MAX_BUFFER_CAPACITY: usize = 4 * DEFAULT_BUFFER_CAPACITY; -struct StreamEmptyProducer; +struct StreamEmptyProducer(PhantomData T>); -impl StreamProducer for StreamEmptyProducer { +impl StreamProducer for StreamEmptyProducer { type Item = T; type Buffer = Option; @@ -47,7 +48,7 @@ impl StreamProducer for StreamEmptyProducer { _: &'a mut Destination<'a, Self::Item, Self::Buffer>, _: bool, ) -> Poll> { - Ok(StreamState::Closed) + Poll::Ready(Ok(StreamResult::Dropped)) } } @@ -77,37 +78,6 @@ where } } -async fn write_buffered_bytes( - store: &Accessor, - src: &mut Cursor, - dst: &mut Destination, -) -> wasmtime::Result<()> { - if !store.with(|mut store| { - dst.as_guest_destination(store.as_context_mut()) - .map(|mut dst| { - let start = src.position() as _; - let buffered = src.get_ref().len().saturating_sub(start); - let n = dst.remaining().len().min(buffered); - debug_assert!(n > 0); - let end = start.saturating_add(n); - dst.remaining()[..n].copy_from_slice(&src.get_ref()[start..end]); - dst.mark_written(n); - src.set_position(end as _); - }) - .is_some() - }) { - // FIXME: `mem::take` rather than `clone` when we can ensure cancellation-safety - //let buf = mem::take(src); - let buf = src.clone(); - *src = dst.write(store, buf).await?; - } - if src.position() as usize == src.get_ref().len() { - src.get_mut().clear(); - src.set_position(0); - } - Ok(()) -} - /// Add all WASI interfaces from this module into the `linker` provided. /// /// This function will add all interfaces implemented by this module to the diff --git a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs index 01de5efc10a9..713f3bfc5277 100644 --- a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs +++ b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs @@ -222,17 +222,29 @@ pub struct Destination<'a, T, B> { } impl<'a, T, B> Destination<'a, T, B> { - /// Return a unique reference to the buffer in which items may be stored. + /// Take the buffer out of `self`, leaving a default-initialized one in its + /// place. /// - /// Any items added to this buffer will be delivered to the reader after the - /// `StreamProducer::poll_produce` call to which this `Destination` was - /// passed returns. + /// This can be useful for reusing the previously-stored buffer's capacity + /// instead of allocating a fresh one. + pub fn take_buffer(&mut self) -> B + where + B: Default, + { + mem::take(self.buffer) + } + + /// Store the specified buffer in `self`. + /// + /// Any items contained in the buffer will be delivered to the reader after + /// the `StreamProducer::poll_produce` call to which this `Destination` was + /// passed returns (unless overwritten by another call to `set_buffer`). /// - /// If items are added to this buffer _and_ written via a `DirectDestination` + /// If items are stored via a buffer _and_ written via a `DirectDestination` /// view of `self`, then the items in the buffer will be delivered after the /// ones written using `DirectDestination`. - pub fn buffer(&'a mut self) -> &'a mut B { - self.buffer + pub fn set_buffer(&mut self, buffer: B) { + *self.buffer = buffer; } /// Return the remaining number of items the current read has capacity to @@ -373,6 +385,7 @@ pub enum StreamResult { /// _not_ able to produce or consume more items, respectively. Dropped, } + /// Represents the host-owned write end of a stream. pub trait StreamProducer: Send + 'static { /// The payload type of this stream. @@ -790,7 +803,7 @@ impl FutureReader { } } Poll::Ready(value) => { - *destination.buffer() = Some(value?); + destination.set_buffer(Some(value?)); // Here we return `StreamResult::Completed` even though // we've produced the last item we'll ever produce. From 0f9f372381c66fbdd509d72435fb43a61398f3f2 Mon Sep 17 00:00:00 2001 From: Joel Dice Date: Wed, 3 Sep 2025 17:26:27 -0600 Subject: [PATCH 20/32] update wasi/src/p3/cli to use new stream API This is totally untested and doesn't even compile yet due to a lifetime issue I don't have time to address yet. I'll follow up later with a fix. Signed-off-by: Joel Dice --- crates/wasi/src/p3/cli/host.rs | 198 +++++++++++++++++++-------------- 1 file changed, 114 insertions(+), 84 deletions(-) diff --git a/crates/wasi/src/p3/cli/host.rs b/crates/wasi/src/p3/cli/host.rs index fc8956464769..2290c281b6d3 100644 --- a/crates/wasi/src/p3/cli/host.rs +++ b/crates/wasi/src/p3/cli/host.rs @@ -5,118 +5,151 @@ use crate::p3::bindings::cli::{ terminal_stdin, terminal_stdout, }; use crate::p3::cli::{TerminalInput, TerminalOutput}; -use crate::p3::write_buffered_bytes; use crate::p3::{DEFAULT_BUFFER_CAPACITY, MAX_BUFFER_CAPACITY}; use anyhow::{Context as _, anyhow}; use bytes::BytesMut; use std::io::Cursor; -use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _}; +use std::pin::Pin; +use std::task::{self, Context, Poll}; +use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _, ReadBuf}; +use wasmtime::StoreContextMut; use wasmtime::component::{ Accessor, Destination, Resource, Source, StreamConsumer, StreamProducer, StreamReader, - StreamState, + StreamResult, }; struct InputStreamProducer { rx: T, - buffer: Cursor, } -impl InputStreamProducer -where - T: AsyncRead + Send + Unpin, -{ - async fn read(&mut self, n: usize) -> StreamState { - self.buffer.get_mut().reserve(n); - match self.rx.read_buf(self.buffer.get_mut()).await { - Ok(0) => StreamState::Closed, - Ok(_) => StreamState::Open, - Err(_err) => { - // TODO: Report the error to the guest - StreamState::Closed - } - } - } -} - -impl StreamProducer for InputStreamProducer +impl StreamProducer for InputStreamProducer where T: AsyncRead + Send + Unpin + 'static, { - async fn produce( - &mut self, - store: &Accessor, - dst: &mut Destination, - ) -> wasmtime::Result { - if !self.buffer.get_ref().is_empty() { - write_buffered_bytes(store, &mut self.buffer, dst).await?; - return Ok(StreamState::Open); - } - let n = store - .with(|store| dst.remaining(store)) - .unwrap_or(DEFAULT_BUFFER_CAPACITY) - .min(MAX_BUFFER_CAPACITY); - match self.read(n).await { - StreamState::Open => { - write_buffered_bytes(store, &mut self.buffer, dst).await?; - Ok(StreamState::Open) - } - StreamState::Closed => Ok(StreamState::Closed), - } - } + type Item = u8; + type Buffer = Cursor; - async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { - if !self.buffer.get_ref().is_empty() { - return Ok(StreamState::Open); + fn poll_produce<'a>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut<'a, D>, + destination: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll> { + if finish { + return Poll::Ready(Ok(StreamResult::Cancelled)); } - Ok(self.read(DEFAULT_BUFFER_CAPACITY).await) + + let me = self.get_mut(); + + Poll::Ready(Ok( + if let Some(mut destination) = destination.as_direct_destination(store) + && !destination.remaining().is_empty() + { + let mut buffer = ReadBuf::new(destination.remaining()); + match task::ready!(Pin::new(&mut me.rx).poll_read(cx, &mut buffer)) { + Ok(()) => { + if buffer.filled().is_empty() { + StreamResult::Dropped + } else { + let count = buffer.filled().len(); + destination.mark_written(count); + StreamResult::Completed + } + } + Err(_) => { + // TODO: Report the error to the guest + StreamResult::Dropped + } + } + } else { + let capacity = destination + .remaining(store) + .unwrap_or(DEFAULT_BUFFER_CAPACITY) + // In the case of small or zero-length reads, we read more than + // was asked for; this will save the runtime from having to + // block or call `poll_produce` on subsequent reads. See the + // documentation for `StreamProducer::poll_produce` for details. + .max(DEFAULT_BUFFER_CAPACITY) + .min(MAX_BUFFER_CAPACITY); + + let mut buffer = destination.take_buffer().into_inner(); + buffer.clear(); + buffer.reserve(capacity); + + let mut readbuf = ReadBuf::uninit(buffer.spare_capacity_mut()); + let result = Pin::new(&mut me.rx).poll_read(cx, &mut readbuf); + let count = readbuf.filled().len(); + // SAFETY: `ReadyBuf::filled` promised us `count` bytes have + // been initialized. + unsafe { + buffer.set_len(count); + } + + destination.set_buffer(Cursor::new(buffer)); + + match task::ready!(result) { + Ok(()) => { + if count == 0 { + StreamResult::Dropped + } else { + StreamResult::Completed + } + } + Err(_) => { + // TODO: Report the error to the guest + StreamResult::Dropped + } + } + }, + )) } } struct OutputStreamConsumer { tx: T, - buffer: BytesMut, } -impl OutputStreamConsumer +impl StreamConsumer for OutputStreamConsumer where T: AsyncWrite + Send + Unpin + 'static, { - async fn flush(&mut self) -> StreamState { - match self.tx.write_all(&self.buffer).await { - Ok(()) => { - self.buffer.clear(); - StreamState::Open - } - Err(_err) => { - // TODO: Report the error to the guest - StreamState::Closed + type Item = u8; + + fn poll_consume( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + mut store: StoreContextMut, + source: &mut Source, + finish: bool, + ) -> Poll> { + let me = self.get_mut(); + + let mut source = source.as_direct_source(store); + + let (mut count, mut result) = if !source.remaining().is_empty() { + match task::ready!(Pin::new(&mut me.tx).poll_write(cx, source.remaining())) { + Ok(count) => (count, StreamResult::Completed), + Err(_) => { + // TODO: Report the error to the guest + (0, StreamResult::Dropped) + } } - } - } -} + } else { + (0, StreamResult::Completed) + }; -impl StreamConsumer for OutputStreamConsumer -where - T: AsyncWrite + Send + Unpin + 'static, -{ - async fn consume( - &mut self, - store: &Accessor, - src: &mut Source<'_, u8>, - ) -> wasmtime::Result { - store.with(|mut store| { - let n = src.remaining(&mut store).min(MAX_BUFFER_CAPACITY); - self.buffer.reserve(n); - src.read(&mut store, &mut self.buffer) - })?; - Ok(self.flush().await) - } + if task::ready!(Pin::new(&mut me.tx).poll_flush(cx)).is_err() { + // TODO: Report the error to the guest + count = 0; + result = StreamResult::Dropped; + } - async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { - if !self.buffer.is_empty() { - return Ok(self.flush().await); + if count > 0 { + source.mark_read(count); } - Ok(StreamState::Open) + + Poll::Ready(Ok(result)) } } @@ -193,7 +226,6 @@ impl stdin::HostWithStore for WasiCli { &mut store, InputStreamProducer { rx: Box::into_pin(rx), - buffer: Cursor::default(), }, )) }) @@ -213,7 +245,6 @@ impl stdout::HostWithStore for WasiCli { store, OutputStreamConsumer { tx: Box::into_pin(tx), - buffer: BytesMut::default(), }, ); Ok(()) @@ -234,7 +265,6 @@ impl stderr::HostWithStore for WasiCli { store, OutputStreamConsumer { tx: Box::into_pin(tx), - buffer: BytesMut::default(), }, ); Ok(()) From 7d6edb17538c3d445d5b70ba695bb2c44197431d Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 4 Sep 2025 11:00:56 +0200 Subject: [PATCH 21/32] fix: remove `'a` bound on `&self` Signed-off-by: Roman Volosatovs --- .../src/runtime/component/concurrent/futures_and_streams.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs index 713f3bfc5277..9ae9dfc5e55f 100644 --- a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs +++ b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams.rs @@ -281,7 +281,7 @@ impl<'a, T, B> Destination<'a, T, B> { impl<'a, B> Destination<'a, u8, B> { /// Return a `DirectDestination` view of `self` if the guest is reading. pub fn as_direct_destination( - &'a mut self, + &mut self, store: StoreContextMut<'a, D>, ) -> Option> { if let ReadState::GuestReady { .. } = self @@ -556,7 +556,7 @@ impl Source<'_, T> { impl Source<'_, u8> { /// Return a `DirectSource` view of `self`. pub fn as_direct_source<'a, D>( - &'a mut self, + &mut self, store: StoreContextMut<'a, D>, ) -> DirectSource<'a, D> { DirectSource { From fce49cb91d88517f53fb6dc8d3dcdd711d9ee759 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 4 Sep 2025 11:48:24 +0200 Subject: [PATCH 22/32] finish `wasi:sockets` adaptation Signed-off-by: Roman Volosatovs --- crates/wasi/src/p3/mod.rs | 14 +- crates/wasi/src/p3/sockets/host/types/tcp.rs | 214 +++++++++---------- 2 files changed, 110 insertions(+), 118 deletions(-) diff --git a/crates/wasi/src/p3/mod.rs b/crates/wasi/src/p3/mod.rs index 0768d89fccbd..c97a520dfb79 100644 --- a/crates/wasi/src/p3/mod.rs +++ b/crates/wasi/src/p3/mod.rs @@ -18,16 +18,14 @@ pub mod sockets; use crate::WasiView; use crate::p3::bindings::LinkOptions; use anyhow::Context as _; -use bytes::BytesMut; +use core::marker::PhantomData; use core::pin::Pin; use core::task::{Context, Poll}; -use std::io::Cursor; -use std::marker::PhantomData; use tokio::sync::oneshot; +use wasmtime::StoreContextMut; use wasmtime::component::{ Accessor, Destination, FutureProducer, Linker, StreamProducer, StreamResult, }; -use wasmtime::{AsContextMut as _, StoreContextMut}; // Default buffer capacity to use for reads of byte-sized values. const DEFAULT_BUFFER_CAPACITY: usize = 8192; @@ -35,7 +33,13 @@ const DEFAULT_BUFFER_CAPACITY: usize = 8192; // Maximum buffer capacity to use for reads of byte-sized values. const MAX_BUFFER_CAPACITY: usize = 4 * DEFAULT_BUFFER_CAPACITY; -struct StreamEmptyProducer(PhantomData T>); +struct StreamEmptyProducer(PhantomData T>); + +impl Default for StreamEmptyProducer { + fn default() -> Self { + Self(PhantomData) + } +} impl StreamProducer for StreamEmptyProducer { type Item = T; diff --git a/crates/wasi/src/p3/sockets/host/types/tcp.rs b/crates/wasi/src/p3/sockets/host/types/tcp.rs index e06e4fb64c96..0977c1f96942 100644 --- a/crates/wasi/src/p3/sockets/host/types/tcp.rs +++ b/crates/wasi/src/p3/sockets/host/types/tcp.rs @@ -5,25 +5,24 @@ use crate::p3::bindings::sockets::types::{ }; use crate::p3::sockets::{SocketError, SocketResult, WasiSockets}; use crate::p3::{ - DEFAULT_BUFFER_CAPACITY, FutureOneshotProducer, FutureReadyProducer, MAX_BUFFER_CAPACITY, - StreamEmptyProducer, write_buffered_bytes, + DEFAULT_BUFFER_CAPACITY, FutureOneshotProducer, FutureReadyProducer, StreamEmptyProducer, }; use crate::sockets::{NonInheritedOptions, SocketAddrUse, SocketAddressFamily, WasiSocketsCtxView}; use anyhow::Context as _; use bytes::BytesMut; use core::pin::Pin; -use core::task::{Context, Poll, ready}; +use core::task::{Context, Poll}; use io_lifetimes::AsSocketlike as _; use std::io::Cursor; use std::net::{Shutdown, SocketAddr}; use std::sync::Arc; use tokio::net::{TcpListener, TcpStream}; use tokio::sync::oneshot; +use wasmtime::StoreContextMut; use wasmtime::component::{ Accessor, Destination, FutureReader, Resource, ResourceTable, Source, StreamConsumer, - StreamProducer, StreamReader, StreamResult, StreamState, + StreamProducer, StreamReader, StreamResult, }; -use wasmtime::{AsContextMut as _, StoreContextMut}; fn get_socket<'a>( table: &'a ResourceTable, @@ -49,17 +48,9 @@ struct ListenStreamProducer { listener: Arc, family: SocketAddressFamily, options: NonInheritedOptions, - accepted: Option>, getter: for<'a> fn(&'a mut T) -> WasiSocketsCtxView<'a>, } -impl ListenStreamProducer { - async fn next(&mut self) -> std::io::Result { - let (stream, _) = self.listener.accept().await?; - Ok(stream) - } -} - impl StreamProducer for ListenStreamProducer where D: 'static, @@ -67,53 +58,32 @@ where type Item = Resource; type Buffer = Option; - fn poll_produce( - &mut self, - store: &Accessor, - dst: &mut Destination>, - ) -> wasmtime::Result { - let res = if let Some(res) = self.accepted.take() { - res - } else { - self.next().await + fn poll_produce<'a>( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + mut store: StoreContextMut<'a, D>, + dst: &'a mut Destination<'a, Self::Item, Self::Buffer>, + finish: bool, + ) -> Poll> { + let res = match self.listener.poll_accept(cx) { + Poll::Ready(res) => res.map(|(stream, _)| stream), + Poll::Pending if finish => return Poll::Ready(Ok(StreamResult::Cancelled)), + Poll::Pending => return Poll::Pending, }; let socket = TcpSocket::new_accept(res, &self.options, self.family) .unwrap_or_else(|err| TcpSocket::new_error(err, self.family)); - let store = store.with_getter::(self.getter); - let socket = store.with(|mut store| { - store - .get() - .table - .push(socket) - .context("failed to push socket resource to table") - })?; - // FIXME: Handle cancellation - if let Some(socket) = dst.write(&store, Some(socket)).await? { - store.with(|mut store| { - store - .get() - .table - .delete(socket) - .context("failed to delete socket resource from table") - })?; - return Ok(StreamState::Closed); - } - Ok(StreamState::Open) - } - - async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { - if self.accepted.is_none() { - let res = self.next().await; - self.accepted = Some(res); - } - Ok(StreamState::Open) + let WasiSocketsCtxView { table, .. } = (self.getter)(store.data_mut()); + let socket = table + .push(socket) + .context("failed to push socket resource to table")?; + dst.set_buffer(Some(socket)); + Poll::Ready(Ok(StreamResult::Completed)) } } struct ReceiveStreamProducer { stream: Arc, result: Option>>, - buffer: Cursor, } impl Drop for ReceiveStreamProducer { @@ -138,72 +108,78 @@ impl StreamProducer for ReceiveStreamProducer { type Item = u8; type Buffer = Cursor; - fn poll_produce( - &mut self, + fn poll_produce<'a>( + mut self: Pin<&mut Self>, cx: &mut Context<'_>, store: StoreContextMut<'a, D>, dst: &'a mut Destination<'a, Self::Item, Self::Buffer>, finish: bool, - ) -> wasmtime::Result { - if !self.buffer.get_ref().is_empty() { - write_buffered_bytes(store, &mut self.buffer, dst).await?; - return Ok(StreamState::Open); - } - + ) -> Poll> { let res = 'result: loop { - match store.with(|mut store| { - if let Some(mut dst) = dst.as_direct_destination(store.as_context_mut()) { - let n = self.stream.try_read(dst.remaining())?; - if n > 0 { - dst.mark_written(n); + if let Some(mut dst) = dst.as_direct_destination(store) { + let buf = dst.remaining(); + if buf.is_empty() { + match self.stream.poll_read_ready(cx) { + Poll::Ready(Ok(())) => return Poll::Ready(Ok(StreamResult::Completed)), + Poll::Ready(Err(err)) => break 'result Err(err.into()), + Poll::Pending if finish => return Poll::Ready(Ok(StreamResult::Cancelled)), + Poll::Pending => return Poll::Pending, } - Ok(n) - } else { - self.buffer.get_mut().reserve(DEFAULT_BUFFER_CAPACITY); - self.stream.try_read_buf(self.buffer.get_mut()) } - }) { - Ok(0) => break 'result Ok(()), - Ok(..) => { - if !self.buffer.get_ref().is_empty() { - // FIXME: `mem::take` rather than `clone` when we can ensure cancellation-safety - //let buf = mem::take(&mut self.buffer); - let buf = self.buffer.clone(); - self.buffer = dst.write(store, buf).await?; - if self.buffer.position() as usize == self.buffer.get_ref().len() { - self.buffer.get_mut().clear(); - self.buffer.set_position(0); + loop { + match self.stream.try_read(buf) { + Ok(0) => break 'result Ok(()), + Ok(n) => { + dst.mark_written(n); + return Poll::Ready(Ok(StreamResult::Completed)); } + Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { + match self.stream.poll_read_ready(cx) { + Poll::Ready(Ok(())) => continue, + Poll::Ready(Err(err)) => break 'result Err(err.into()), + Poll::Pending if finish => { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } + Poll::Pending => return Poll::Pending, + } + } + Err(err) => break 'result Err(err.into()), } - return Ok(StreamState::Open); } - Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { - if let Err(err) = self.stream.readable().await { - break 'result Err(err.into()); + } + + let mut buf = dst.take_buffer(); + debug_assert!(buf.get_ref().is_empty()); + buf.get_mut().reserve(DEFAULT_BUFFER_CAPACITY); + loop { + match self.stream.try_read_buf(buf.get_mut()) { + Ok(0) => break 'result Ok(()), + Ok(..) => { + dst.set_buffer(buf); + return Poll::Ready(Ok(StreamResult::Completed)); + } + Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { + match self.stream.poll_read_ready(cx) { + Poll::Ready(Ok(())) => continue, + Poll::Ready(Err(err)) => break 'result Err(err.into()), + Poll::Pending if finish => { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } + Poll::Pending => return Poll::Pending, + } } + Err(err) => break 'result Err(err.into()), } - Err(err) => break 'result Err(err.into()), } }; self.close(res); - Ok(StreamState::Closed) - } - - async fn when_ready(&mut self, _: &Accessor) -> wasmtime::Result { - if self.buffer.get_ref().is_empty() { - if let Err(err) = self.stream.readable().await { - self.close(Err(err.into())); - return Ok(StreamState::Closed); - } - } - Ok(StreamState::Open) + Poll::Ready(Ok(StreamResult::Dropped)) } } struct SendStreamConsumer { stream: Arc, result: Option>>, - buffer: BytesMut, } impl Drop for SendStreamConsumer { @@ -228,30 +204,45 @@ impl StreamConsumer for SendStreamConsumer { type Item = u8; fn poll_consume( - self: Pin<&mut Self>, + mut self: Pin<&mut Self>, cx: &mut Context<'_>, store: StoreContextMut, src: &mut Source, finish: bool, ) -> Poll> { - let mut src = src.as_direct_source(&mut store); - let res = 'result: loop { - match self.stream.try_write(src.remaining()) { - Ok(n) => { - debug_assert!(n > 0); - src.mark_read(n); - Poll::Ready(Ok(StreamResult::Open)) + let mut src = src.as_direct_source(store); + let res = 'result: { + if src.remaining().is_empty() { + match self.stream.poll_write_ready(cx) { + Poll::Ready(Ok(())) => return Poll::Ready(Ok(StreamResult::Completed)), + Poll::Ready(Err(err)) => break 'result Err(err.into()), + Poll::Pending if finish => return Poll::Ready(Ok(StreamResult::Cancelled)), + Poll::Pending => return Poll::Pending, } - Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { - if Err(err) = ready!(Pin::new(&mut self.stream.writable()).poll(cx)) { - break 'result Err(err.into()); + } + loop { + match self.stream.try_write(src.remaining()) { + Ok(n) => { + debug_assert!(n > 0); + src.mark_read(n); + return Poll::Ready(Ok(StreamResult::Completed)); + } + Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { + match self.stream.poll_write_ready(cx) { + Poll::Ready(Ok(())) => continue, + Poll::Ready(Err(err)) => break 'result Err(err.into()), + Poll::Pending if finish => { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } + Poll::Pending => return Poll::Pending, + } } + Err(err) => break 'result Err(err.into()), } - Err(err) => break 'result Err(err.into()), } }; self.close(res); - Poll::Ready(Ok(StreamResult::Closed)) + Poll::Ready(Ok(StreamResult::Dropped)) } } @@ -318,7 +309,6 @@ impl HostTcpSocketWithStore for WasiSockets { listener, family, options, - accepted: None, getter, }, )) @@ -340,7 +330,6 @@ impl HostTcpSocketWithStore for WasiSockets { SendStreamConsumer { stream, result: Some(result_tx), - buffer: BytesMut::default(), }, ); SocketResult::Ok(()) @@ -370,14 +359,13 @@ impl HostTcpSocketWithStore for WasiSockets { ReceiveStreamProducer { stream, result: Some(result_tx), - buffer: Cursor::default(), }, ), FutureReader::new(instance, &mut store, FutureOneshotProducer(result_rx)), )) } None => Ok(( - StreamReader::new(instance, &mut store, StreamEmptyProducer), + StreamReader::new(instance, &mut store, StreamEmptyProducer::default()), FutureReader::new( instance, &mut store, From cdd1ce5c93bbd7727844991092d86a9bbde6fe79 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 4 Sep 2025 12:34:41 +0200 Subject: [PATCH 23/32] finish `wasi:cli` adaptation Note, that this removes the read optimization - let's get the implementation complete first and optimize later Signed-off-by: Roman Volosatovs --- crates/wasi/src/p3/cli/host.rs | 184 +++++++++++++-------------------- 1 file changed, 74 insertions(+), 110 deletions(-) diff --git a/crates/wasi/src/p3/cli/host.rs b/crates/wasi/src/p3/cli/host.rs index 2290c281b6d3..0763598c43c9 100644 --- a/crates/wasi/src/p3/cli/host.rs +++ b/crates/wasi/src/p3/cli/host.rs @@ -1,155 +1,119 @@ use crate::I32Exit; use crate::cli::{IsTerminal, WasiCli, WasiCliCtxView}; +use crate::p3::DEFAULT_BUFFER_CAPACITY; use crate::p3::bindings::cli::{ environment, exit, stderr, stdin, stdout, terminal_input, terminal_output, terminal_stderr, terminal_stdin, terminal_stdout, }; use crate::p3::cli::{TerminalInput, TerminalOutput}; -use crate::p3::{DEFAULT_BUFFER_CAPACITY, MAX_BUFFER_CAPACITY}; use anyhow::{Context as _, anyhow}; use bytes::BytesMut; +use core::pin::Pin; +use core::task::{Context, Poll}; use std::io::Cursor; -use std::pin::Pin; -use std::task::{self, Context, Poll}; -use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _, ReadBuf}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use wasmtime::StoreContextMut; use wasmtime::component::{ Accessor, Destination, Resource, Source, StreamConsumer, StreamProducer, StreamReader, StreamResult, }; -struct InputStreamProducer { - rx: T, +struct InputStreamProducer { + rx: Pin>, } -impl StreamProducer for InputStreamProducer -where - T: AsyncRead + Send + Unpin + 'static, -{ +impl StreamProducer for InputStreamProducer { type Item = u8; type Buffer = Cursor; fn poll_produce<'a>( - self: Pin<&mut Self>, + mut self: Pin<&mut Self>, cx: &mut Context<'_>, store: StoreContextMut<'a, D>, - destination: &'a mut Destination<'a, Self::Item, Self::Buffer>, + dst: &'a mut Destination<'a, Self::Item, Self::Buffer>, finish: bool, ) -> Poll> { - if finish { - return Poll::Ready(Ok(StreamResult::Cancelled)); - } - - let me = self.get_mut(); - - Poll::Ready(Ok( - if let Some(mut destination) = destination.as_direct_destination(store) - && !destination.remaining().is_empty() - { - let mut buffer = ReadBuf::new(destination.remaining()); - match task::ready!(Pin::new(&mut me.rx).poll_read(cx, &mut buffer)) { - Ok(()) => { - if buffer.filled().is_empty() { - StreamResult::Dropped - } else { - let count = buffer.filled().len(); - destination.mark_written(count); - StreamResult::Completed - } - } - Err(_) => { - // TODO: Report the error to the guest - StreamResult::Dropped - } + if let Some(mut dst) = dst.as_direct_destination(store) { + let mut buf = ReadBuf::new(dst.remaining()); + match self.rx.as_mut().poll_read(cx, &mut buf) { + Poll::Ready(Ok(())) if buf.capacity() == 0 => { + Poll::Ready(Ok(StreamResult::Completed)) } - } else { - let capacity = destination - .remaining(store) - .unwrap_or(DEFAULT_BUFFER_CAPACITY) - // In the case of small or zero-length reads, we read more than - // was asked for; this will save the runtime from having to - // block or call `poll_produce` on subsequent reads. See the - // documentation for `StreamProducer::poll_produce` for details. - .max(DEFAULT_BUFFER_CAPACITY) - .min(MAX_BUFFER_CAPACITY); - - let mut buffer = destination.take_buffer().into_inner(); - buffer.clear(); - buffer.reserve(capacity); - - let mut readbuf = ReadBuf::uninit(buffer.spare_capacity_mut()); - let result = Pin::new(&mut me.rx).poll_read(cx, &mut readbuf); - let count = readbuf.filled().len(); - // SAFETY: `ReadyBuf::filled` promised us `count` bytes have - // been initialized. - unsafe { - buffer.set_len(count); + Poll::Ready(Ok(())) if buf.filled().is_empty() => { + Poll::Ready(Ok(StreamResult::Dropped)) } - - destination.set_buffer(Cursor::new(buffer)); - - match task::ready!(result) { - Ok(()) => { - if count == 0 { - StreamResult::Dropped - } else { - StreamResult::Completed - } - } - Err(_) => { - // TODO: Report the error to the guest - StreamResult::Dropped - } + Poll::Ready(Ok(())) => { + let n = buf.filled().len(); + dst.mark_written(n); + Poll::Ready(Ok(StreamResult::Completed)) + } + Poll::Ready(Err(..)) => { + // TODO: Report the error to the guest + Poll::Ready(Ok(StreamResult::Dropped)) + } + Poll::Pending if finish => Poll::Ready(Ok(StreamResult::Cancelled)), + Poll::Pending => Poll::Pending, + } + } else { + let mut buf = dst.take_buffer(); + debug_assert!(buf.get_ref().is_empty()); + buf.get_mut().reserve(DEFAULT_BUFFER_CAPACITY); + let mut rbuf = ReadBuf::uninit(buf.get_mut().spare_capacity_mut()); + match self.rx.as_mut().poll_read(cx, &mut rbuf) { + Poll::Ready(Ok(())) if rbuf.filled().is_empty() => { + Poll::Ready(Ok(StreamResult::Dropped)) + } + Poll::Ready(Ok(())) => { + let n = rbuf.filled().len(); + // SAFETY: `ReadyBuf::filled` promised us `count` bytes have + // been initialized. + unsafe { buf.get_mut().set_len(n) }; + dst.set_buffer(buf); + Poll::Ready(Ok(StreamResult::Completed)) + } + Poll::Ready(Err(..)) => { + // TODO: Report the error to the guest + Poll::Ready(Ok(StreamResult::Dropped)) } - }, - )) + Poll::Pending if finish => Poll::Ready(Ok(StreamResult::Cancelled)), + Poll::Pending => Poll::Pending, + } + } } } -struct OutputStreamConsumer { - tx: T, +struct OutputStreamConsumer { + tx: Pin>, } -impl StreamConsumer for OutputStreamConsumer -where - T: AsyncWrite + Send + Unpin + 'static, -{ +impl StreamConsumer for OutputStreamConsumer { type Item = u8; fn poll_consume( - self: Pin<&mut Self>, + mut self: Pin<&mut Self>, cx: &mut Context<'_>, - mut store: StoreContextMut, - source: &mut Source, + store: StoreContextMut, + src: &mut Source, finish: bool, ) -> Poll> { - let me = self.get_mut(); - - let mut source = source.as_direct_source(store); - - let (mut count, mut result) = if !source.remaining().is_empty() { - match task::ready!(Pin::new(&mut me.tx).poll_write(cx, source.remaining())) { - Ok(count) => (count, StreamResult::Completed), - Err(_) => { - // TODO: Report the error to the guest - (0, StreamResult::Dropped) - } + let mut src = src.as_direct_source(store); + let buf = src.remaining(); + match self.tx.as_mut().poll_write(cx, buf) { + Poll::Ready(Ok(n)) if buf.is_empty() => { + debug_assert_eq!(n, 0); + Poll::Ready(Ok(StreamResult::Completed)) } - } else { - (0, StreamResult::Completed) - }; - - if task::ready!(Pin::new(&mut me.tx).poll_flush(cx)).is_err() { - // TODO: Report the error to the guest - count = 0; - result = StreamResult::Dropped; - } - - if count > 0 { - source.mark_read(count); + Poll::Ready(Ok(n)) => { + src.mark_read(n); + Poll::Ready(Ok(StreamResult::Completed)) + } + Poll::Ready(Err(..)) => { + // TODO: Report the error to the guest + Poll::Ready(Ok(StreamResult::Dropped)) + } + Poll::Pending if finish => Poll::Ready(Ok(StreamResult::Cancelled)), + Poll::Pending => Poll::Pending, } - - Poll::Ready(Ok(result)) } } From 2554c174155d7a520b602819ea5f398538f54476 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 4 Sep 2025 12:35:43 +0200 Subject: [PATCH 24/32] remove redundant loop in sockets Signed-off-by: Roman Volosatovs --- crates/wasi/src/p3/sockets/host/types/tcp.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/wasi/src/p3/sockets/host/types/tcp.rs b/crates/wasi/src/p3/sockets/host/types/tcp.rs index 0977c1f96942..4f39ac299fa7 100644 --- a/crates/wasi/src/p3/sockets/host/types/tcp.rs +++ b/crates/wasi/src/p3/sockets/host/types/tcp.rs @@ -115,7 +115,7 @@ impl StreamProducer for ReceiveStreamProducer { dst: &'a mut Destination<'a, Self::Item, Self::Buffer>, finish: bool, ) -> Poll> { - let res = 'result: loop { + let res = 'result: { if let Some(mut dst) = dst.as_direct_destination(store) { let buf = dst.remaining(); if buf.is_empty() { From 97842543647051a471449e3618580123a9eebbd4 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 4 Sep 2025 12:44:24 +0200 Subject: [PATCH 25/32] wasi: buffer on 0-length reads Signed-off-by: Roman Volosatovs --- crates/wasi/src/p3/cli/host.rs | 80 ++++++++++---------- crates/wasi/src/p3/sockets/host/types/tcp.rs | 40 +++++----- 2 files changed, 56 insertions(+), 64 deletions(-) diff --git a/crates/wasi/src/p3/cli/host.rs b/crates/wasi/src/p3/cli/host.rs index 0763598c43c9..117fe6be0165 100644 --- a/crates/wasi/src/p3/cli/host.rs +++ b/crates/wasi/src/p3/cli/host.rs @@ -34,50 +34,48 @@ impl StreamProducer for InputStreamProducer { finish: bool, ) -> Poll> { if let Some(mut dst) = dst.as_direct_destination(store) { - let mut buf = ReadBuf::new(dst.remaining()); - match self.rx.as_mut().poll_read(cx, &mut buf) { - Poll::Ready(Ok(())) if buf.capacity() == 0 => { - Poll::Ready(Ok(StreamResult::Completed)) + if !dst.remaining().is_empty() { + let mut buf = ReadBuf::new(dst.remaining()); + match self.rx.as_mut().poll_read(cx, &mut buf) { + Poll::Ready(Ok(())) if buf.filled().is_empty() => { + return Poll::Ready(Ok(StreamResult::Dropped)); + } + Poll::Ready(Ok(())) => { + let n = buf.filled().len(); + dst.mark_written(n); + return Poll::Ready(Ok(StreamResult::Completed)); + } + Poll::Ready(Err(..)) => { + // TODO: Report the error to the guest + return Poll::Ready(Ok(StreamResult::Dropped)); + } + Poll::Pending if finish => return Poll::Ready(Ok(StreamResult::Cancelled)), + Poll::Pending => return Poll::Pending, } - Poll::Ready(Ok(())) if buf.filled().is_empty() => { - Poll::Ready(Ok(StreamResult::Dropped)) - } - Poll::Ready(Ok(())) => { - let n = buf.filled().len(); - dst.mark_written(n); - Poll::Ready(Ok(StreamResult::Completed)) - } - Poll::Ready(Err(..)) => { - // TODO: Report the error to the guest - Poll::Ready(Ok(StreamResult::Dropped)) - } - Poll::Pending if finish => Poll::Ready(Ok(StreamResult::Cancelled)), - Poll::Pending => Poll::Pending, } - } else { - let mut buf = dst.take_buffer(); - debug_assert!(buf.get_ref().is_empty()); - buf.get_mut().reserve(DEFAULT_BUFFER_CAPACITY); - let mut rbuf = ReadBuf::uninit(buf.get_mut().spare_capacity_mut()); - match self.rx.as_mut().poll_read(cx, &mut rbuf) { - Poll::Ready(Ok(())) if rbuf.filled().is_empty() => { - Poll::Ready(Ok(StreamResult::Dropped)) - } - Poll::Ready(Ok(())) => { - let n = rbuf.filled().len(); - // SAFETY: `ReadyBuf::filled` promised us `count` bytes have - // been initialized. - unsafe { buf.get_mut().set_len(n) }; - dst.set_buffer(buf); - Poll::Ready(Ok(StreamResult::Completed)) - } - Poll::Ready(Err(..)) => { - // TODO: Report the error to the guest - Poll::Ready(Ok(StreamResult::Dropped)) - } - Poll::Pending if finish => Poll::Ready(Ok(StreamResult::Cancelled)), - Poll::Pending => Poll::Pending, + } + let mut buf = dst.take_buffer(); + debug_assert!(buf.get_ref().is_empty()); + buf.get_mut().reserve(DEFAULT_BUFFER_CAPACITY); + let mut rbuf = ReadBuf::uninit(buf.get_mut().spare_capacity_mut()); + match self.rx.as_mut().poll_read(cx, &mut rbuf) { + Poll::Ready(Ok(())) if rbuf.filled().is_empty() => { + Poll::Ready(Ok(StreamResult::Dropped)) } + Poll::Ready(Ok(())) => { + let n = rbuf.filled().len(); + // SAFETY: `ReadyBuf::filled` promised us `count` bytes have + // been initialized. + unsafe { buf.get_mut().set_len(n) }; + dst.set_buffer(buf); + Poll::Ready(Ok(StreamResult::Completed)) + } + Poll::Ready(Err(..)) => { + // TODO: Report the error to the guest + Poll::Ready(Ok(StreamResult::Dropped)) + } + Poll::Pending if finish => Poll::Ready(Ok(StreamResult::Cancelled)), + Poll::Pending => Poll::Pending, } } } diff --git a/crates/wasi/src/p3/sockets/host/types/tcp.rs b/crates/wasi/src/p3/sockets/host/types/tcp.rs index 4f39ac299fa7..6abafedf5df9 100644 --- a/crates/wasi/src/p3/sockets/host/types/tcp.rs +++ b/crates/wasi/src/p3/sockets/host/types/tcp.rs @@ -118,32 +118,26 @@ impl StreamProducer for ReceiveStreamProducer { let res = 'result: { if let Some(mut dst) = dst.as_direct_destination(store) { let buf = dst.remaining(); - if buf.is_empty() { - match self.stream.poll_read_ready(cx) { - Poll::Ready(Ok(())) => return Poll::Ready(Ok(StreamResult::Completed)), - Poll::Ready(Err(err)) => break 'result Err(err.into()), - Poll::Pending if finish => return Poll::Ready(Ok(StreamResult::Cancelled)), - Poll::Pending => return Poll::Pending, - } - } - loop { - match self.stream.try_read(buf) { - Ok(0) => break 'result Ok(()), - Ok(n) => { - dst.mark_written(n); - return Poll::Ready(Ok(StreamResult::Completed)); - } - Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { - match self.stream.poll_read_ready(cx) { - Poll::Ready(Ok(())) => continue, - Poll::Ready(Err(err)) => break 'result Err(err.into()), - Poll::Pending if finish => { - return Poll::Ready(Ok(StreamResult::Cancelled)); + if !buf.is_empty() { + loop { + match self.stream.try_read(buf) { + Ok(0) => break 'result Ok(()), + Ok(n) => { + dst.mark_written(n); + return Poll::Ready(Ok(StreamResult::Completed)); + } + Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { + match self.stream.poll_read_ready(cx) { + Poll::Ready(Ok(())) => continue, + Poll::Ready(Err(err)) => break 'result Err(err.into()), + Poll::Pending if finish => { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } + Poll::Pending => return Poll::Pending, } - Poll::Pending => return Poll::Pending, } + Err(err) => break 'result Err(err.into()), } - Err(err) => break 'result Err(err.into()), } } } From 55ad074a17a21bb2f2a55aeba2a7ee806eb3626f Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 4 Sep 2025 19:57:18 +0200 Subject: [PATCH 26/32] finish `wasi:filesystem` adaptation Signed-off-by: Roman Volosatovs --- crates/wasi/src/filesystem.rs | 12 +- crates/wasi/src/p3/filesystem/host.rs | 747 ++++++++++++++++++-------- 2 files changed, 521 insertions(+), 238 deletions(-) diff --git a/crates/wasi/src/filesystem.rs b/crates/wasi/src/filesystem.rs index cf2dc7b24da9..ee43c2b19527 100644 --- a/crates/wasi/src/filesystem.rs +++ b/crates/wasi/src/filesystem.rs @@ -701,6 +701,11 @@ impl File { } } + /// Returns reference to the underlying [`cap_std::fs::File`] + pub(crate) fn as_file(&self) -> &Arc { + &self.file + } + pub(crate) async fn advise( &self, offset: u64, @@ -746,7 +751,7 @@ pub struct Dir { /// oflags back out using fcntl. pub open_mode: OpenMode, - allow_blocking_current_thread: bool, + pub(crate) allow_blocking_current_thread: bool, } impl Dir { @@ -793,6 +798,11 @@ impl Dir { } } + /// Returns reference to the underlying [`cap_std::fs::Dir`] + pub(crate) fn as_dir(&self) -> &Arc { + &self.dir + } + pub(crate) async fn create_directory_at(&self, path: String) -> Result<(), ErrorCode> { if !self.perms.contains(DirPerms::MUTATE) { return Err(ErrorCode::NotPermitted); diff --git a/crates/wasi/src/p3/filesystem/host.rs b/crates/wasi/src/p3/filesystem/host.rs index 21c4bd4ae3f0..e92d3b15d2b6 100644 --- a/crates/wasi/src/p3/filesystem/host.rs +++ b/crates/wasi/src/p3/filesystem/host.rs @@ -6,25 +6,24 @@ use crate::p3::bindings::filesystem::types::{ }; use crate::p3::filesystem::{FilesystemError, FilesystemResult, preopens}; use crate::p3::{ - DEFAULT_BUFFER_CAPACITY, FutureOneshotProducer, FutureReadyProducer, MAX_BUFFER_CAPACITY, - StreamEmptyProducer, + DEFAULT_BUFFER_CAPACITY, FutureOneshotProducer, FutureReadyProducer, StreamEmptyProducer, }; use crate::{DirPerms, FilePerms}; -use anyhow::{Context as _, bail}; +use anyhow::{Context as _, anyhow}; use bytes::BytesMut; use core::mem; -use futures::FutureExt; +use core::pin::Pin; +use core::task::{Context, Poll, ready}; use std::io::Cursor; -use std::marker::PhantomData; -use std::pin::Pin; -use std::task::{self, Context, Poll}; +use std::sync::Arc; use system_interface::fs::FileIoExt as _; -use tokio::sync::oneshot; +use tokio::sync::{mpsc, oneshot}; +use tokio::task::{JoinHandle, spawn_blocking}; use wasmtime::component::{ Accessor, Destination, FutureReader, Resource, ResourceTable, Source, StreamConsumer, StreamProducer, StreamReader, StreamResult, }; -use wasmtime::{AsContextMut as _, StoreContextMut}; +use wasmtime::{StoreContextMut, component::VecBuffer}; fn get_descriptor<'a>( table: &'a ResourceTable, @@ -121,7 +120,13 @@ struct ReadStreamProducer { file: File, offset: u64, result: Option>>, - future: Option, ErrorCode>> + Send>>>, + task: Option>>, +} + +impl Drop for ReadStreamProducer { + fn drop(&mut self) { + self.close(Ok(())) + } } impl ReadStreamProducer { @@ -137,94 +142,192 @@ impl StreamProducer for ReadStreamProducer { type Buffer = Cursor; fn poll_produce<'a>( - self: Pin<&mut Self>, + mut self: Pin<&mut Self>, cx: &mut Context<'_>, store: StoreContextMut<'a, D>, - destination: &'a mut Destination<'a, Self::Item, Self::Buffer>, + dst: &'a mut Destination<'a, Self::Item, Self::Buffer>, finish: bool, ) -> Poll> { - let me = self.get_mut(); - if me.future.is_none() { - if finish { - return Poll::Ready(Ok(StreamResult::Cancelled)); + if let Some(task) = self.task.as_mut() { + let res = ready!(Pin::new(task).poll(cx)); + self.task = None; + match res { + Ok(Ok(buf)) if buf.is_empty() => { + self.close(Ok(())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } + Ok(Ok(buf)) => { + let n = buf.len(); + dst.set_buffer(Cursor::new(buf)); + let Ok(n) = n.try_into() else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + let Some(n) = self.offset.checked_add(n) else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + self.offset = n; + return Poll::Ready(Ok(StreamResult::Completed)); + } + Ok(Err(err)) => { + self.close(Err(err.into())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } + Err(err) => { + return Poll::Ready(Err(anyhow!(err).context("failed to join I/O task"))); + } } - - let capacity = destination - .remaining(store) - .unwrap_or(DEFAULT_BUFFER_CAPACITY) - // In the case of small or zero-length reads, we read more than - // was asked for; this will save the runtime from having to - // block or call `poll_produce` on subsequent reads. See the - // documentation for `StreamProducer::poll_produce` for details. - .max(DEFAULT_BUFFER_CAPACITY) - .min(MAX_BUFFER_CAPACITY); - let mut buffer = destination.take_buffer().into_inner(); - buffer.resize(capacity, 0); - let offset = me.offset; - let file = me.file.clone(); - me.future = Some( - async move { - match file - .run_blocking(move |file| { - let n = file.read_at(&mut buffer, offset)?; - buffer.truncate(n); - std::io::Result::Ok(buffer) - }) - .await - { - Ok(buffer) if buffer.is_empty() => Ok(None), - Ok(buffer) => { - let n_u64 = buffer.len().try_into().or(Err(ErrorCode::Overflow))?; - offset.checked_add(n_u64).ok_or(ErrorCode::Overflow)?; - Ok(Some(buffer)) + } + if finish { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } + if let Some(file) = self.file.as_blocking_file() { + if let Some(mut dst) = dst.as_direct_destination(store) { + let buf = dst.remaining(); + if !buf.is_empty() { + match file.read_at(buf, self.offset) { + Ok(0) => { + self.close(Ok(())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } + Ok(n) => { + dst.mark_written(n); + let Ok(n) = n.try_into() else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + let Some(n) = self.offset.checked_add(n) else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + self.offset = n; + return Poll::Ready(Ok(StreamResult::Completed)); + } + Err(err) => { + self.close(Err(err.into())); + return Poll::Ready(Ok(StreamResult::Dropped)); } - Err(err) => Err(err.into()), } } - .boxed(), - ); - } - - let result = match task::ready!(me.future.as_mut().unwrap().as_mut().poll(cx)) { - Ok(Some(buffer)) => { - // We've already checked for overflow inside the future above, - // so no need to do it again here: - me.offset += u64::try_from(buffer.len()).unwrap(); - destination.set_buffer(Cursor::new(buffer)); - StreamResult::Completed } - Ok(None) => { - me.close(Ok(())); - StreamResult::Dropped + let mut buf = dst.take_buffer().into_inner(); + buf.resize(DEFAULT_BUFFER_CAPACITY, 0); + match file.read_at(&mut buf, self.offset) { + Ok(0) => { + self.close(Ok(())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } + Ok(n) => { + buf.truncate(n); + dst.set_buffer(Cursor::new(buf)); + let Ok(n) = n.try_into() else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + let Some(n) = self.offset.checked_add(n) else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + self.offset = n; + return Poll::Ready(Ok(StreamResult::Completed)); + } + Err(err) => { + self.close(Err(err.into())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } } - Err(error) => { - me.close(Err(error)); - StreamResult::Dropped + } + let mut buf = dst.take_buffer().into_inner(); + buf.resize(DEFAULT_BUFFER_CAPACITY, 0); + let file = Arc::clone(self.file.as_file()); + let offset = self.offset; + let mut task = spawn_blocking(move || { + file.read_at(&mut buf, offset).map(|n| { + buf.truncate(n); + buf + }) + }); + let res = match Pin::new(&mut task).poll(cx) { + Poll::Ready(res) => res, + Poll::Pending => { + self.task = Some(task); + return Poll::Pending; } }; + match res { + Ok(Ok(buf)) if buf.is_empty() => { + self.close(Ok(())); + Poll::Ready(Ok(StreamResult::Dropped)) + } + Ok(Ok(buf)) => { + let n = buf.len(); + dst.set_buffer(Cursor::new(buf)); + let Ok(n) = n.try_into() else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + let Some(n) = self.offset.checked_add(n) else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + self.offset = n; + Poll::Ready(Ok(StreamResult::Completed)) + } + Ok(Err(err)) => { + self.close(Err(err.into())); + Poll::Ready(Ok(StreamResult::Dropped)) + } + Err(err) => Poll::Ready(Err(anyhow!(err).context("failed to join I/O task"))), + } + } +} - me.future = None; - - Poll::Ready(Ok(result)) +fn map_dir_entry( + entry: std::io::Result, +) -> Result, ErrorCode> { + match entry { + Ok(entry) => { + let meta = entry.metadata()?; + let Ok(name) = entry.file_name().into_string() else { + return Err(ErrorCode::IllegalByteSequence); + }; + Ok(Some(DirectoryEntry { + type_: meta.file_type().into(), + name, + })) + } + Err(err) => { + // On windows, filter out files like `C:\DumpStack.log.tmp` which we + // can't get full metadata for. + #[cfg(windows)] + { + use windows_sys::Win32::Foundation::{ + ERROR_ACCESS_DENIED, ERROR_SHARING_VIOLATION, + }; + if err.raw_os_error() == Some(ERROR_SHARING_VIOLATION as i32) + || err.raw_os_error() == Some(ERROR_ACCESS_DENIED as i32) + { + return Ok(None); + } + } + Err(err.into()) + } } } -struct DirectoryStreamProducer { - dir: Dir, - entries: Option, +struct BlockingDirectoryStreamProducer { + dir: Arc, result: Option>>, - future: Option< - Pin< - Box< - dyn Future< - Output = Result, ErrorCode>, - > + Send, - >, - >, - >, -} - -impl DirectoryStreamProducer { +} + +impl Drop for BlockingDirectoryStreamProducer { + fn drop(&mut self) { + self.close(Ok(())) + } +} + +impl BlockingDirectoryStreamProducer { fn close(&mut self, res: Result<(), ErrorCode>) { if let Some(tx) = self.result.take() { _ = tx.send(res); @@ -232,111 +335,147 @@ impl DirectoryStreamProducer { } } -impl StreamProducer for DirectoryStreamProducer { +impl StreamProducer for BlockingDirectoryStreamProducer { + type Item = DirectoryEntry; + type Buffer = VecBuffer; + + fn poll_produce<'a>( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + _: StoreContextMut<'a, D>, + dst: &'a mut Destination<'a, Self::Item, Self::Buffer>, + _finish: bool, + ) -> Poll> { + let entries = match self.dir.entries() { + Ok(entries) => entries, + Err(err) => { + self.close(Err(err.into())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } + }; + let res = match entries + .filter_map(|entry| map_dir_entry(entry).transpose()) + .collect::, _>>() + { + Ok(entries) => { + dst.set_buffer(entries.into()); + Ok(()) + } + Err(err) => Err(err), + }; + self.close(res); + Poll::Ready(Ok(StreamResult::Dropped)) + } +} + +struct NonblockingDirectoryStreamProducer(DirStreamState); + +enum DirStreamState { + Init { + dir: Arc, + result: oneshot::Sender>, + }, + InProgress { + rx: mpsc::Receiver, + task: JoinHandle>, + result: oneshot::Sender>, + }, + Closed, +} + +impl Drop for NonblockingDirectoryStreamProducer { + fn drop(&mut self) { + self.close(Ok(())) + } +} + +impl NonblockingDirectoryStreamProducer { + fn close(&mut self, res: Result<(), ErrorCode>) { + if let DirStreamState::Init { result, .. } | DirStreamState::InProgress { result, .. } = + mem::replace(&mut self.0, DirStreamState::Closed) + { + _ = result.send(res); + } + } +} + +impl StreamProducer for NonblockingDirectoryStreamProducer { type Item = DirectoryEntry; type Buffer = Option; fn poll_produce<'a>( - self: Pin<&mut Self>, + mut self: Pin<&mut Self>, cx: &mut Context<'_>, store: StoreContextMut<'a, D>, - destination: &'a mut Destination<'a, Self::Item, Self::Buffer>, + dst: &'a mut Destination<'a, Self::Item, Self::Buffer>, finish: bool, ) -> Poll> { - let me = self.get_mut(); - if me.future.is_none() { - if finish { - return Poll::Ready(Ok(StreamResult::Cancelled)); - } - - let dir = me.dir.clone(); - let mut entries = me.entries.take(); - me.future = Some( - async move { - loop { - let mut entries = if let Some(entries) = entries.take() { - entries - } else { - // FIXME: Handle cancellation - match dir.run_blocking(cap_std::fs::Dir::entries).await { - Ok(entries) => entries, - Err(err) => break Err(err.into()), - } - }; - // FIXME: Handle cancellation - let Some((res, tail)) = dir - .run_blocking(move |_| entries.next().map(|entry| (entry, entries))) - .await - else { - break Ok(None); - }; - let entry = match res { - Ok(entry) => entry, - Err(err) => { - // On windows, filter out files like `C:\DumpStack.log.tmp` which we - // can't get full metadata for. - #[cfg(windows)] - { - use windows_sys::Win32::Foundation::{ - ERROR_ACCESS_DENIED, ERROR_SHARING_VIOLATION, - }; - if err.raw_os_error() == Some(ERROR_SHARING_VIOLATION as i32) - || err.raw_os_error() == Some(ERROR_ACCESS_DENIED as i32) - { - continue; - } - } - break Err(err.into()); + match mem::replace(&mut self.0, DirStreamState::Closed) { + DirStreamState::Init { .. } if finish => Poll::Ready(Ok(StreamResult::Cancelled)), + DirStreamState::Init { dir, result } => { + let (entry_tx, entry_rx) = mpsc::channel(1); + let task = spawn_blocking(move || { + let entries = dir.entries()?; + for entry in entries { + if let Some(entry) = map_dir_entry(entry)? { + if let Err(_) = entry_tx.blocking_send(entry) { + break; } - }; - let meta = match entry.metadata() { - Ok(meta) => meta, - Err(err) => break Err(err.into()), - }; - let Ok(name) = entry.file_name().into_string() else { - break Err(ErrorCode::IllegalByteSequence); - }; - break Ok(Some(( - DirectoryEntry { - type_: meta.file_type().into(), - name, - }, - tail, - ))); + } } - } - .boxed(), - ); - } - - let result = match task::ready!(me.future.as_mut().unwrap().as_mut().poll(cx)) { - Ok(Some((entry, entries))) => { - destination.set_buffer(Some(entry)); - me.entries = Some(entries); - StreamResult::Completed - } - Ok(None) => { - me.close(Ok(())); - StreamResult::Dropped + Ok(()) + }); + self.0 = DirStreamState::InProgress { + rx: entry_rx, + task, + result, + }; + self.poll_produce(cx, store, dst, finish) } - Err(error) => { - me.close(Err(error)); - StreamResult::Dropped + DirStreamState::InProgress { + mut rx, + mut task, + result, + } => { + let Poll::Ready(res) = rx.poll_recv(cx) else { + self.0 = DirStreamState::InProgress { rx, task, result }; + if finish { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } + return Poll::Pending; + }; + match res { + Some(entry) => { + self.0 = DirStreamState::InProgress { rx, task, result }; + dst.set_buffer(Some(entry)); + Poll::Ready(Ok(StreamResult::Completed)) + } + None => { + let res = ready!(Pin::new(&mut task).poll(cx)) + .context("failed to join I/O task")?; + self.0 = DirStreamState::InProgress { rx, task, result }; + self.close(res); + Poll::Ready(Ok(StreamResult::Dropped)) + } + } } - }; - - me.future = None; - - Poll::Ready(Ok(result)) + DirStreamState::Closed => Poll::Ready(Ok(StreamResult::Dropped)), + } } } struct WriteStreamConsumer { file: File, - offset: Option, - buffer: BytesMut, + offset: u64, result: Option>>, - future: Option> + Send>>>, + buffer: BytesMut, + task: Option>>, +} + +impl Drop for WriteStreamConsumer { + fn drop(&mut self) { + self.close(Ok(())) + } } impl WriteStreamConsumer { @@ -351,69 +490,194 @@ impl StreamConsumer for WriteStreamConsumer { type Item = u8; fn poll_consume( - self: Pin<&mut Self>, + mut self: Pin<&mut Self>, cx: &mut Context<'_>, - mut store: StoreContextMut, - source: &mut Source, + store: StoreContextMut, + src: &mut Source, finish: bool, ) -> Poll> { - let me = self.get_mut(); - if me.future.is_none() { - if finish { - return Poll::Ready(Ok(StreamResult::Cancelled)); - } - - let offset = me.offset; - let file = me.file.clone(); - let mut buffer = mem::take(&mut me.buffer); - buffer.clear(); - buffer.extend_from_slice(source.as_direct_source(store.as_context_mut()).remaining()); - - me.future = Some( - async move { - file.spawn_blocking(move |file| { - let n = if let Some(offset) = offset { - let n = file.write_at(&buffer, offset)?; - let n_u64 = n.try_into().or(Err(ErrorCode::Overflow))?; - offset.checked_add(n_u64).ok_or(ErrorCode::Overflow)?; - n - } else { - file.append(&buffer)? - }; - Ok((buffer, n)) - }) - .await + let mut src = src.as_direct_source(store); + if let Some(task) = self.task.as_mut() { + let res = ready!(Pin::new(task).poll(cx)); + self.task = None; + match res { + Ok(Ok((buf, n))) => { + src.mark_read(n); + self.buffer = buf; + self.buffer.clear(); + let Ok(n) = n.try_into() else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + let Some(n) = self.offset.checked_add(n) else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + self.offset = n; + return Poll::Ready(Ok(StreamResult::Completed)); } - .boxed(), - ); + Ok(Err(err)) => { + self.close(Err(err.into())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } + Err(err) => { + return Poll::Ready(Err(anyhow!(err).context("failed to join I/O task"))); + } + } } - - let result = match task::ready!(me.future.as_mut().unwrap().as_mut().poll(cx)) { - Ok((mut buffer, count)) => { - source.as_direct_source(store).mark_read(count); - let result = if count < buffer.len() && finish { - StreamResult::Cancelled - } else { - StreamResult::Completed - }; - if let Some(offset) = me.offset.as_mut() { - // We've already checked for overflow inside the future - // above, so no need to do it again here: - *offset += u64::try_from(count).unwrap(); + if finish { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } + if let Some(file) = self.file.as_blocking_file() { + match file.write_at(src.remaining(), self.offset) { + Ok(n) => { + src.mark_read(n); + let Ok(n) = n.try_into() else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + let Some(n) = self.offset.checked_add(n) else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + self.offset = n; + return Poll::Ready(Ok(StreamResult::Completed)); + } + Err(err) => { + self.close(Err(err.into())); + return Poll::Ready(Ok(StreamResult::Dropped)); } - buffer.clear(); - me.buffer = buffer; - result } - Err(error) => { - me.close(Err(error)); - StreamResult::Dropped + } + debug_assert!(self.buffer.is_empty()); + self.buffer.extend_from_slice(src.remaining()); + let buf = mem::take(&mut self.buffer); + let file = Arc::clone(self.file.as_file()); + let offset = self.offset; + let mut task = spawn_blocking(move || file.write_at(&buf, offset).map(|n| (buf, n))); + let res = match Pin::new(&mut task).poll(cx) { + Poll::Ready(res) => res, + Poll::Pending => { + self.task = Some(task); + return Poll::Pending; } }; + match res { + Ok(Ok((buf, n))) => { + src.mark_read(n); + self.buffer = buf; + self.buffer.clear(); + let Ok(n) = n.try_into() else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + let Some(n) = self.offset.checked_add(n) else { + self.close(Err(ErrorCode::Overflow)); + return Poll::Ready(Ok(StreamResult::Dropped)); + }; + self.offset = n; + Poll::Ready(Ok(StreamResult::Completed)) + } + Ok(Err(err)) => { + self.close(Err(err.into())); + Poll::Ready(Ok(StreamResult::Dropped)) + } + Err(err) => Poll::Ready(Err(anyhow!(err).context("failed to join I/O task"))), + } + } +} - me.future = None; +struct AppendStreamConsumer { + file: File, + result: Option>>, + buffer: BytesMut, + task: Option>>, +} + +impl Drop for AppendStreamConsumer { + fn drop(&mut self) { + self.close(Ok(())) + } +} - Poll::Ready(Ok(result)) +impl AppendStreamConsumer { + fn close(&mut self, res: Result<(), ErrorCode>) { + if let Some(tx) = self.result.take() { + _ = tx.send(res); + } + } +} + +impl StreamConsumer for AppendStreamConsumer { + type Item = u8; + + fn poll_consume( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + store: StoreContextMut, + src: &mut Source, + finish: bool, + ) -> Poll> { + let mut src = src.as_direct_source(store); + if let Some(task) = self.task.as_mut() { + let res = ready!(Pin::new(task).poll(cx)); + self.task = None; + match res { + Ok(Ok((buf, n))) => { + src.mark_read(n); + self.buffer = buf; + self.buffer.clear(); + return Poll::Ready(Ok(StreamResult::Completed)); + } + Ok(Err(err)) => { + self.close(Err(err.into())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } + Err(err) => { + return Poll::Ready(Err(anyhow!(err).context("failed to join I/O task"))); + } + } + } + if finish { + return Poll::Ready(Ok(StreamResult::Cancelled)); + } + if let Some(file) = self.file.as_blocking_file() { + match file.append(src.remaining()) { + Ok(n) => { + src.mark_read(n); + return Poll::Ready(Ok(StreamResult::Completed)); + } + Err(err) => { + self.close(Err(err.into())); + return Poll::Ready(Ok(StreamResult::Dropped)); + } + } + } + debug_assert!(self.buffer.is_empty()); + self.buffer.extend_from_slice(src.remaining()); + let buf = mem::take(&mut self.buffer); + let file = Arc::clone(self.file.as_file()); + let mut task = spawn_blocking(move || file.append(&buf).map(|n| (buf, n))); + let res = match Pin::new(&mut task).poll(cx) { + Poll::Ready(res) => res, + Poll::Pending => { + self.task = Some(task); + return Poll::Pending; + } + }; + match res { + Ok(Ok((buf, n))) => { + src.mark_read(n); + self.buffer = buf; + self.buffer.clear(); + Poll::Ready(Ok(StreamResult::Completed)) + } + Ok(Err(err)) => { + self.close(Err(err.into())); + Poll::Ready(Ok(StreamResult::Dropped)) + } + Err(err) => Poll::Ready(Err(anyhow!(err).context("failed to join I/O task"))), + } } } @@ -434,7 +698,7 @@ impl types::HostDescriptorWithStore for WasiFilesystem { let file = get_file(store.get().table, &fd)?; if !file.perms.contains(FilePerms::READ) { return Ok(( - StreamReader::new(instance, &mut store, StreamEmptyProducer(PhantomData)), + StreamReader::new(instance, &mut store, StreamEmptyProducer::default()), FutureReader::new( instance, &mut store, @@ -453,7 +717,7 @@ impl types::HostDescriptorWithStore for WasiFilesystem { file, offset, result: Some(result_tx), - future: None, + task: None, }, ), FutureReader::new(instance, &mut store, FutureOneshotProducer(result_rx)), @@ -478,10 +742,10 @@ impl types::HostDescriptorWithStore for WasiFilesystem { store, WriteStreamConsumer { file, - offset: Some(offset), - buffer: BytesMut::default(), + offset, result: Some(result_tx), - future: None, + buffer: BytesMut::default(), + task: None, }, ); FilesystemResult::Ok(()) @@ -507,12 +771,11 @@ impl types::HostDescriptorWithStore for WasiFilesystem { let file = file.clone(); data.pipe( store, - WriteStreamConsumer { + AppendStreamConsumer { file, - offset: None, - buffer: BytesMut::default(), result: Some(result_tx), - future: None, + buffer: BytesMut::default(), + task: None, }, ); FilesystemResult::Ok(()) @@ -598,7 +861,7 @@ impl types::HostDescriptorWithStore for WasiFilesystem { let dir = get_dir(store.get().table, &fd)?; if !dir.perms.contains(DirPerms::READ) { return Ok(( - StreamReader::new(instance, &mut store, StreamEmptyProducer(PhantomData)), + StreamReader::new(instance, &mut store, StreamEmptyProducer::default()), FutureReader::new( instance, &mut store, @@ -606,20 +869,30 @@ impl types::HostDescriptorWithStore for WasiFilesystem { ), )); } - - let dir = dir.clone(); + let allow_blocking_current_thread = dir.allow_blocking_current_thread; + let dir = Arc::clone(dir.as_dir()); let (result_tx, result_rx) = oneshot::channel(); - Ok(( + let stream = if allow_blocking_current_thread { StreamReader::new( instance, &mut store, - DirectoryStreamProducer { + BlockingDirectoryStreamProducer { dir, - entries: None, result: Some(result_tx), - future: None, }, - ), + ) + } else { + StreamReader::new( + instance, + &mut store, + NonblockingDirectoryStreamProducer(DirStreamState::Init { + dir, + result: result_tx, + }), + ) + }; + Ok(( + stream, FutureReader::new(instance, &mut store, FutureOneshotProducer(result_rx)), )) }) From 4bc782d3c2405cf2cb35c11cc4fdd55ea42642dd Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 4 Sep 2025 19:57:49 +0200 Subject: [PATCH 27/32] remove `MAX_BUFFER_CAPACITY` Signed-off-by: Roman Volosatovs --- crates/wasi/src/p3/mod.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/crates/wasi/src/p3/mod.rs b/crates/wasi/src/p3/mod.rs index c97a520dfb79..c3cdf63117b9 100644 --- a/crates/wasi/src/p3/mod.rs +++ b/crates/wasi/src/p3/mod.rs @@ -30,9 +30,6 @@ use wasmtime::component::{ // Default buffer capacity to use for reads of byte-sized values. const DEFAULT_BUFFER_CAPACITY: usize = 8192; -// Maximum buffer capacity to use for reads of byte-sized values. -const MAX_BUFFER_CAPACITY: usize = 4 * DEFAULT_BUFFER_CAPACITY; - struct StreamEmptyProducer(PhantomData T>); impl Default for StreamEmptyProducer { From 1197b64ea35bd89bc5ac2ef02b46874dd1f21019 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 4 Sep 2025 19:58:08 +0200 Subject: [PATCH 28/32] refactor `Cursor` usage Signed-off-by: Roman Volosatovs --- crates/wasi/src/p3/cli/host.rs | 12 ++++++------ crates/wasi/src/p3/sockets/host/types/tcp.rs | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/crates/wasi/src/p3/cli/host.rs b/crates/wasi/src/p3/cli/host.rs index 117fe6be0165..3915f5754c4e 100644 --- a/crates/wasi/src/p3/cli/host.rs +++ b/crates/wasi/src/p3/cli/host.rs @@ -54,10 +54,10 @@ impl StreamProducer for InputStreamProducer { } } } - let mut buf = dst.take_buffer(); - debug_assert!(buf.get_ref().is_empty()); - buf.get_mut().reserve(DEFAULT_BUFFER_CAPACITY); - let mut rbuf = ReadBuf::uninit(buf.get_mut().spare_capacity_mut()); + let mut buf = dst.take_buffer().into_inner(); + buf.clear(); + buf.reserve(DEFAULT_BUFFER_CAPACITY); + let mut rbuf = ReadBuf::uninit(buf.spare_capacity_mut()); match self.rx.as_mut().poll_read(cx, &mut rbuf) { Poll::Ready(Ok(())) if rbuf.filled().is_empty() => { Poll::Ready(Ok(StreamResult::Dropped)) @@ -66,8 +66,8 @@ impl StreamProducer for InputStreamProducer { let n = rbuf.filled().len(); // SAFETY: `ReadyBuf::filled` promised us `count` bytes have // been initialized. - unsafe { buf.get_mut().set_len(n) }; - dst.set_buffer(buf); + unsafe { buf.set_len(n) }; + dst.set_buffer(Cursor::new(buf)); Poll::Ready(Ok(StreamResult::Completed)) } Poll::Ready(Err(..)) => { diff --git a/crates/wasi/src/p3/sockets/host/types/tcp.rs b/crates/wasi/src/p3/sockets/host/types/tcp.rs index 6abafedf5df9..0b755cde134a 100644 --- a/crates/wasi/src/p3/sockets/host/types/tcp.rs +++ b/crates/wasi/src/p3/sockets/host/types/tcp.rs @@ -142,14 +142,14 @@ impl StreamProducer for ReceiveStreamProducer { } } - let mut buf = dst.take_buffer(); - debug_assert!(buf.get_ref().is_empty()); - buf.get_mut().reserve(DEFAULT_BUFFER_CAPACITY); + let mut buf = dst.take_buffer().into_inner(); + buf.clear(); + buf.reserve(DEFAULT_BUFFER_CAPACITY); loop { - match self.stream.try_read_buf(buf.get_mut()) { + match self.stream.try_read_buf(&mut buf) { Ok(0) => break 'result Ok(()), Ok(..) => { - dst.set_buffer(buf); + dst.set_buffer(Cursor::new(buf)); return Poll::Ready(Ok(StreamResult::Completed)); } Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => { From 4e99221a9040c4df5804bf22105c3d1aa80e4f8e Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 4 Sep 2025 19:58:28 +0200 Subject: [PATCH 29/32] impl Default for VecBuffer Signed-off-by: Roman Volosatovs --- .../component/concurrent/futures_and_streams/buffers.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams/buffers.rs b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams/buffers.rs index c3f08b063095..4f9dea603fe5 100644 --- a/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams/buffers.rs +++ b/crates/wasmtime/src/runtime/component/concurrent/futures_and_streams/buffers.rs @@ -220,6 +220,12 @@ pub struct VecBuffer { offset: usize, } +impl Default for VecBuffer { + fn default() -> Self { + Self::with_capacity(0) + } +} + impl VecBuffer { /// Create a new instance with the specified capacity. pub fn with_capacity(capacity: usize) -> Self { From afac7833683e847f807ff1aed3479a25f7d6ecd0 Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 4 Sep 2025 20:00:34 +0200 Subject: [PATCH 30/32] refactor: use consistent import styling Signed-off-by: Roman Volosatovs --- crates/wasi/src/p3/filesystem/host.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/wasi/src/p3/filesystem/host.rs b/crates/wasi/src/p3/filesystem/host.rs index e92d3b15d2b6..a0ffe530e61e 100644 --- a/crates/wasi/src/p3/filesystem/host.rs +++ b/crates/wasi/src/p3/filesystem/host.rs @@ -19,11 +19,11 @@ use std::sync::Arc; use system_interface::fs::FileIoExt as _; use tokio::sync::{mpsc, oneshot}; use tokio::task::{JoinHandle, spawn_blocking}; +use wasmtime::StoreContextMut; use wasmtime::component::{ Accessor, Destination, FutureReader, Resource, ResourceTable, Source, StreamConsumer, - StreamProducer, StreamReader, StreamResult, + StreamProducer, StreamReader, StreamResult, VecBuffer, }; -use wasmtime::{StoreContextMut, component::VecBuffer}; fn get_descriptor<'a>( table: &'a ResourceTable, From f0dfa55d58491bab212e0659be250e7e95605ffb Mon Sep 17 00:00:00 2001 From: Roman Volosatovs Date: Thu, 4 Sep 2025 20:03:36 +0200 Subject: [PATCH 31/32] feature-gate fs Arc accessors Signed-off-by: Roman Volosatovs --- crates/wasi/src/filesystem.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/wasi/src/filesystem.rs b/crates/wasi/src/filesystem.rs index ee43c2b19527..8f8a281050eb 100644 --- a/crates/wasi/src/filesystem.rs +++ b/crates/wasi/src/filesystem.rs @@ -702,6 +702,7 @@ impl File { } /// Returns reference to the underlying [`cap_std::fs::File`] + #[cfg(feature = "p3")] pub(crate) fn as_file(&self) -> &Arc { &self.file } @@ -799,6 +800,7 @@ impl Dir { } /// Returns reference to the underlying [`cap_std::fs::Dir`] + #[cfg(feature = "p3")] pub(crate) fn as_dir(&self) -> &Arc { &self.dir } From 7db82fff1d8343334a09e0d0c5fbe54753d613c8 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 4 Sep 2025 13:58:15 -0700 Subject: [PATCH 32/32] Update test expectations --- .../tests/expanded/char_concurrent.rs | 4 +- .../tests/expanded/conventions_concurrent.rs | 24 +++---- .../tests/expanded/dead-code_concurrent.rs | 2 +- .../expanded/direct-import_concurrent.rs | 2 +- .../tests/expanded/flags_concurrent.rs | 14 ++-- .../tests/expanded/floats_concurrent.rs | 8 +-- .../tests/expanded/host-world_concurrent.rs | 2 +- .../tests/expanded/integers_concurrent.rs | 36 +++++----- .../tests/expanded/lists_concurrent.rs | 58 ++++++++-------- .../expanded/many-arguments_concurrent.rs | 4 +- .../tests/expanded/multiversion_concurrent.rs | 4 +- .../tests/expanded/records_concurrent.rs | 22 +++---- .../tests/expanded/rename_concurrent.rs | 2 +- .../expanded/resources-export_concurrent.rs | 2 +- .../expanded/resources-import_concurrent.rs | 66 +++++++++---------- .../tests/expanded/share-types_concurrent.rs | 2 +- .../expanded/simple-functions_concurrent.rs | 12 ++-- .../tests/expanded/simple-lists_concurrent.rs | 8 +-- .../tests/expanded/simple-wasi_concurrent.rs | 4 +- .../expanded/small-anonymous_concurrent.rs | 2 +- .../tests/expanded/smoke_concurrent.rs | 2 +- .../tests/expanded/strings_concurrent.rs | 6 +- .../expanded/unstable-features_concurrent.rs | 12 ++-- .../expanded/unversioned-foo_concurrent.rs | 2 +- .../tests/expanded/use-paths_concurrent.rs | 8 +-- .../tests/expanded/variants_concurrent.rs | 40 +++++------ 26 files changed, 174 insertions(+), 174 deletions(-) diff --git a/crates/component-macro/tests/expanded/char_concurrent.rs b/crates/component-macro/tests/expanded/char_concurrent.rs index 18b9d225a13d..17e218da4d1f 100644 --- a/crates/component-macro/tests/expanded/char_concurrent.rs +++ b/crates/component-macro/tests/expanded/char_concurrent.rs @@ -218,7 +218,7 @@ pub mod foo { "take-char", move |caller: &wasmtime::component::Accessor, (arg0,): (char,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::take_char(accessor, arg0) .await; Ok(r) @@ -229,7 +229,7 @@ pub mod foo { "return-char", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::return_char(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/conventions_concurrent.rs b/crates/component-macro/tests/expanded/conventions_concurrent.rs index 9c4e16e970d3..34ad59182a8c 100644 --- a/crates/component-macro/tests/expanded/conventions_concurrent.rs +++ b/crates/component-macro/tests/expanded/conventions_concurrent.rs @@ -286,7 +286,7 @@ pub mod foo { "kebab-case", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::kebab_case(accessor).await; Ok(r) }) @@ -299,7 +299,7 @@ pub mod foo { (arg0,): (LudicrousSpeed,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor, arg0).await; Ok(r) }) @@ -309,7 +309,7 @@ pub mod foo { "function-with-dashes", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::function_with_dashes(accessor) .await; Ok(r) @@ -320,7 +320,7 @@ pub mod foo { "function-with-no-weird-characters", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::function_with_no_weird_characters( accessor, ) @@ -333,7 +333,7 @@ pub mod foo { "apple", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::apple(accessor).await; Ok(r) }) @@ -343,7 +343,7 @@ pub mod foo { "apple-pear", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::apple_pear(accessor).await; Ok(r) }) @@ -353,7 +353,7 @@ pub mod foo { "apple-pear-grape", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::apple_pear_grape(accessor) .await; Ok(r) @@ -364,7 +364,7 @@ pub mod foo { "a0", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a0(accessor).await; Ok(r) }) @@ -374,7 +374,7 @@ pub mod foo { "is-XML", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::is_xml(accessor).await; Ok(r) }) @@ -384,7 +384,7 @@ pub mod foo { "explicit", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::explicit(accessor).await; Ok(r) }) @@ -394,7 +394,7 @@ pub mod foo { "explicit-kebab", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::explicit_kebab(accessor).await; Ok(r) }) @@ -404,7 +404,7 @@ pub mod foo { "bool", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::bool(accessor).await; Ok(r) }) diff --git a/crates/component-macro/tests/expanded/dead-code_concurrent.rs b/crates/component-macro/tests/expanded/dead-code_concurrent.rs index 48a3e7be18e3..8e862699aa82 100644 --- a/crates/component-macro/tests/expanded/dead-code_concurrent.rs +++ b/crates/component-macro/tests/expanded/dead-code_concurrent.rs @@ -228,7 +228,7 @@ pub mod a { "f", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/direct-import_concurrent.rs b/crates/component-macro/tests/expanded/direct-import_concurrent.rs index af13e65bc0b8..6cb2a3c72bdd 100644 --- a/crates/component-macro/tests/expanded/direct-import_concurrent.rs +++ b/crates/component-macro/tests/expanded/direct-import_concurrent.rs @@ -184,7 +184,7 @@ const _: () = { "foo", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor).await; Ok(r) }) diff --git a/crates/component-macro/tests/expanded/flags_concurrent.rs b/crates/component-macro/tests/expanded/flags_concurrent.rs index dc33af21457b..361019023b8a 100644 --- a/crates/component-macro/tests/expanded/flags_concurrent.rs +++ b/crates/component-macro/tests/expanded/flags_concurrent.rs @@ -351,7 +351,7 @@ pub mod foo { "roundtrip-flag1", move |caller: &wasmtime::component::Accessor, (arg0,): (Flag1,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::roundtrip_flag1(accessor, arg0) .await; Ok((r,)) @@ -362,7 +362,7 @@ pub mod foo { "roundtrip-flag2", move |caller: &wasmtime::component::Accessor, (arg0,): (Flag2,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::roundtrip_flag2(accessor, arg0) .await; Ok((r,)) @@ -373,7 +373,7 @@ pub mod foo { "roundtrip-flag4", move |caller: &wasmtime::component::Accessor, (arg0,): (Flag4,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::roundtrip_flag4(accessor, arg0) .await; Ok((r,)) @@ -384,7 +384,7 @@ pub mod foo { "roundtrip-flag8", move |caller: &wasmtime::component::Accessor, (arg0,): (Flag8,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::roundtrip_flag8(accessor, arg0) .await; Ok((r,)) @@ -395,7 +395,7 @@ pub mod foo { "roundtrip-flag16", move |caller: &wasmtime::component::Accessor, (arg0,): (Flag16,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::roundtrip_flag16( accessor, arg0, @@ -409,7 +409,7 @@ pub mod foo { "roundtrip-flag32", move |caller: &wasmtime::component::Accessor, (arg0,): (Flag32,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::roundtrip_flag32( accessor, arg0, @@ -423,7 +423,7 @@ pub mod foo { "roundtrip-flag64", move |caller: &wasmtime::component::Accessor, (arg0,): (Flag64,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::roundtrip_flag64( accessor, arg0, diff --git a/crates/component-macro/tests/expanded/floats_concurrent.rs b/crates/component-macro/tests/expanded/floats_concurrent.rs index 4074956a70b7..48b41f84fb8d 100644 --- a/crates/component-macro/tests/expanded/floats_concurrent.rs +++ b/crates/component-macro/tests/expanded/floats_concurrent.rs @@ -225,7 +225,7 @@ pub mod foo { "f32-param", move |caller: &wasmtime::component::Accessor, (arg0,): (f32,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f32_param(accessor, arg0) .await; Ok(r) @@ -236,7 +236,7 @@ pub mod foo { "f64-param", move |caller: &wasmtime::component::Accessor, (arg0,): (f64,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f64_param(accessor, arg0) .await; Ok(r) @@ -247,7 +247,7 @@ pub mod foo { "f32-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f32_result(accessor).await; Ok((r,)) }) @@ -257,7 +257,7 @@ pub mod foo { "f64-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f64_result(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/host-world_concurrent.rs b/crates/component-macro/tests/expanded/host-world_concurrent.rs index 4e01b30ad8f6..2b6fcd675bfa 100644 --- a/crates/component-macro/tests/expanded/host-world_concurrent.rs +++ b/crates/component-macro/tests/expanded/host-world_concurrent.rs @@ -184,7 +184,7 @@ const _: () = { "foo", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor).await; Ok(r) }) diff --git a/crates/component-macro/tests/expanded/integers_concurrent.rs b/crates/component-macro/tests/expanded/integers_concurrent.rs index b4013def35d3..22bcecf1377f 100644 --- a/crates/component-macro/tests/expanded/integers_concurrent.rs +++ b/crates/component-macro/tests/expanded/integers_concurrent.rs @@ -281,7 +281,7 @@ pub mod foo { "a1", move |caller: &wasmtime::component::Accessor, (arg0,): (u8,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a1(accessor, arg0).await; Ok(r) }) @@ -291,7 +291,7 @@ pub mod foo { "a2", move |caller: &wasmtime::component::Accessor, (arg0,): (i8,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a2(accessor, arg0).await; Ok(r) }) @@ -301,7 +301,7 @@ pub mod foo { "a3", move |caller: &wasmtime::component::Accessor, (arg0,): (u16,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a3(accessor, arg0).await; Ok(r) }) @@ -311,7 +311,7 @@ pub mod foo { "a4", move |caller: &wasmtime::component::Accessor, (arg0,): (i16,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a4(accessor, arg0).await; Ok(r) }) @@ -321,7 +321,7 @@ pub mod foo { "a5", move |caller: &wasmtime::component::Accessor, (arg0,): (u32,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a5(accessor, arg0).await; Ok(r) }) @@ -331,7 +331,7 @@ pub mod foo { "a6", move |caller: &wasmtime::component::Accessor, (arg0,): (i32,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a6(accessor, arg0).await; Ok(r) }) @@ -341,7 +341,7 @@ pub mod foo { "a7", move |caller: &wasmtime::component::Accessor, (arg0,): (u64,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a7(accessor, arg0).await; Ok(r) }) @@ -351,7 +351,7 @@ pub mod foo { "a8", move |caller: &wasmtime::component::Accessor, (arg0,): (i64,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a8(accessor, arg0).await; Ok(r) }) @@ -373,7 +373,7 @@ pub mod foo { ): (u8, i8, u16, i16, u32, i32, u64, i64)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a9( accessor, arg0, @@ -394,7 +394,7 @@ pub mod foo { "r1", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::r1(accessor).await; Ok((r,)) }) @@ -404,7 +404,7 @@ pub mod foo { "r2", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::r2(accessor).await; Ok((r,)) }) @@ -414,7 +414,7 @@ pub mod foo { "r3", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::r3(accessor).await; Ok((r,)) }) @@ -424,7 +424,7 @@ pub mod foo { "r4", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::r4(accessor).await; Ok((r,)) }) @@ -434,7 +434,7 @@ pub mod foo { "r5", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::r5(accessor).await; Ok((r,)) }) @@ -444,7 +444,7 @@ pub mod foo { "r6", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::r6(accessor).await; Ok((r,)) }) @@ -454,7 +454,7 @@ pub mod foo { "r7", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::r7(accessor).await; Ok((r,)) }) @@ -464,7 +464,7 @@ pub mod foo { "r8", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::r8(accessor).await; Ok((r,)) }) @@ -474,7 +474,7 @@ pub mod foo { "pair-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::pair_ret(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/lists_concurrent.rs b/crates/component-macro/tests/expanded/lists_concurrent.rs index 11324e1d83ad..d7b6d2ec5bc2 100644 --- a/crates/component-macro/tests/expanded/lists_concurrent.rs +++ b/crates/component-macro/tests/expanded/lists_concurrent.rs @@ -543,7 +543,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_u8_param(accessor, arg0) .await; Ok(r) @@ -557,7 +557,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_u16_param(accessor, arg0) .await; Ok(r) @@ -571,7 +571,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_u32_param(accessor, arg0) .await; Ok(r) @@ -585,7 +585,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_u64_param(accessor, arg0) .await; Ok(r) @@ -599,7 +599,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_s8_param(accessor, arg0) .await; Ok(r) @@ -613,7 +613,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_s16_param(accessor, arg0) .await; Ok(r) @@ -627,7 +627,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_s32_param(accessor, arg0) .await; Ok(r) @@ -641,7 +641,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_s64_param(accessor, arg0) .await; Ok(r) @@ -655,7 +655,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_f32_param(accessor, arg0) .await; Ok(r) @@ -669,7 +669,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_f64_param(accessor, arg0) .await; Ok(r) @@ -680,7 +680,7 @@ pub mod foo { "list-u8-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_u8_ret(accessor).await; Ok((r,)) }) @@ -690,7 +690,7 @@ pub mod foo { "list-u16-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_u16_ret(accessor).await; Ok((r,)) }) @@ -700,7 +700,7 @@ pub mod foo { "list-u32-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_u32_ret(accessor).await; Ok((r,)) }) @@ -710,7 +710,7 @@ pub mod foo { "list-u64-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_u64_ret(accessor).await; Ok((r,)) }) @@ -720,7 +720,7 @@ pub mod foo { "list-s8-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_s8_ret(accessor).await; Ok((r,)) }) @@ -730,7 +730,7 @@ pub mod foo { "list-s16-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_s16_ret(accessor).await; Ok((r,)) }) @@ -740,7 +740,7 @@ pub mod foo { "list-s32-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_s32_ret(accessor).await; Ok((r,)) }) @@ -750,7 +750,7 @@ pub mod foo { "list-s64-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_s64_ret(accessor).await; Ok((r,)) }) @@ -760,7 +760,7 @@ pub mod foo { "list-f32-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_f32_ret(accessor).await; Ok((r,)) }) @@ -770,7 +770,7 @@ pub mod foo { "list-f64-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_f64_ret(accessor).await; Ok((r,)) }) @@ -783,7 +783,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec<(u8, i8)>,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::tuple_list(accessor, arg0) .await; Ok((r,)) @@ -803,7 +803,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::string_list_arg(accessor, arg0) .await; Ok(r) @@ -814,7 +814,7 @@ pub mod foo { "string-list-ret", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::string_list_ret(accessor) .await; Ok((r,)) @@ -834,7 +834,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::tuple_string_list( accessor, arg0, @@ -857,7 +857,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::string_list(accessor, arg0) .await; Ok((r,)) @@ -871,7 +871,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::record_list(accessor, arg0) .await; Ok((r,)) @@ -885,7 +885,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::record_list_reverse( accessor, arg0, @@ -902,7 +902,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::variant_list(accessor, arg0) .await; Ok((r,)) @@ -916,7 +916,7 @@ pub mod foo { (arg0,): (LoadStoreAllSizes,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::load_store_everything( accessor, arg0, diff --git a/crates/component-macro/tests/expanded/many-arguments_concurrent.rs b/crates/component-macro/tests/expanded/many-arguments_concurrent.rs index 46c44fb96a63..3822a8b5f15e 100644 --- a/crates/component-macro/tests/expanded/many-arguments_concurrent.rs +++ b/crates/component-macro/tests/expanded/many-arguments_concurrent.rs @@ -352,7 +352,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::many_args( accessor, arg0, @@ -384,7 +384,7 @@ pub mod foo { (arg0,): (BigStruct,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::big_argument(accessor, arg0) .await; Ok(r) diff --git a/crates/component-macro/tests/expanded/multiversion_concurrent.rs b/crates/component-macro/tests/expanded/multiversion_concurrent.rs index 866e8184a177..93792c2ed84b 100644 --- a/crates/component-macro/tests/expanded/multiversion_concurrent.rs +++ b/crates/component-macro/tests/expanded/multiversion_concurrent.rs @@ -223,7 +223,7 @@ pub mod my { "x", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::x(accessor).await; Ok(r) }) @@ -259,7 +259,7 @@ pub mod my { "x", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::x(accessor).await; Ok(r) }) diff --git a/crates/component-macro/tests/expanded/records_concurrent.rs b/crates/component-macro/tests/expanded/records_concurrent.rs index f372fca3f501..becdf1917bc1 100644 --- a/crates/component-macro/tests/expanded/records_concurrent.rs +++ b/crates/component-macro/tests/expanded/records_concurrent.rs @@ -408,7 +408,7 @@ pub mod foo { (arg0,): ((char, u32),)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::tuple_arg(accessor, arg0) .await; Ok(r) @@ -419,7 +419,7 @@ pub mod foo { "tuple-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::tuple_result(accessor).await; Ok((r,)) }) @@ -429,7 +429,7 @@ pub mod foo { "empty-arg", move |caller: &wasmtime::component::Accessor, (arg0,): (Empty,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::empty_arg(accessor, arg0) .await; Ok(r) @@ -440,7 +440,7 @@ pub mod foo { "empty-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::empty_result(accessor).await; Ok((r,)) }) @@ -453,7 +453,7 @@ pub mod foo { (arg0,): (Scalars,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::scalar_arg(accessor, arg0) .await; Ok(r) @@ -464,7 +464,7 @@ pub mod foo { "scalar-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::scalar_result(accessor).await; Ok((r,)) }) @@ -477,7 +477,7 @@ pub mod foo { (arg0,): (ReallyFlags,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::flags_arg(accessor, arg0) .await; Ok(r) @@ -488,7 +488,7 @@ pub mod foo { "flags-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::flags_result(accessor).await; Ok((r,)) }) @@ -501,7 +501,7 @@ pub mod foo { (arg0,): (Aggregates,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::aggregate_arg(accessor, arg0) .await; Ok(r) @@ -512,7 +512,7 @@ pub mod foo { "aggregate-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::aggregate_result(accessor) .await; Ok((r,)) @@ -526,7 +526,7 @@ pub mod foo { (arg0,): (TupleTypedef2,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::typedef_inout(accessor, arg0) .await; Ok((r,)) diff --git a/crates/component-macro/tests/expanded/rename_concurrent.rs b/crates/component-macro/tests/expanded/rename_concurrent.rs index 976fb32856fe..37609b7ab4e8 100644 --- a/crates/component-macro/tests/expanded/rename_concurrent.rs +++ b/crates/component-macro/tests/expanded/rename_concurrent.rs @@ -238,7 +238,7 @@ pub mod foo { "foo", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/resources-export_concurrent.rs b/crates/component-macro/tests/expanded/resources-export_concurrent.rs index 4f1121af72d0..7060f72591d1 100644 --- a/crates/component-macro/tests/expanded/resources-export_concurrent.rs +++ b/crates/component-macro/tests/expanded/resources-export_concurrent.rs @@ -269,7 +269,7 @@ pub mod foo { wasmtime::component::ResourceType::host::(), move |caller: &wasmtime::component::Accessor, rep| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); HostYWithStore::drop( accessor, wasmtime::component::Resource::new_own(rep), diff --git a/crates/component-macro/tests/expanded/resources-import_concurrent.rs b/crates/component-macro/tests/expanded/resources-import_concurrent.rs index 684c268a32fd..0ce0bbb3768d 100644 --- a/crates/component-macro/tests/expanded/resources-import_concurrent.rs +++ b/crates/component-macro/tests/expanded/resources-import_concurrent.rs @@ -259,7 +259,7 @@ const _: () = { wasmtime::component::ResourceType::host::(), move |caller: &wasmtime::component::Accessor, rep| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); HostWorldResourceWithStore::drop( accessor, wasmtime::component::Resource::new_own(rep), @@ -273,7 +273,7 @@ const _: () = { "some-world-func", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::some_world_func( accessor, ) @@ -287,7 +287,7 @@ const _: () = { "[constructor]world-resource", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::new(accessor) .await; Ok((r,)) @@ -302,7 +302,7 @@ const _: () = { (arg0,): (wasmtime::component::Resource,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo( accessor, arg0, @@ -317,7 +317,7 @@ const _: () = { "[static]world-resource.static-foo", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::static_foo( accessor, ) @@ -593,7 +593,7 @@ pub mod foo { wasmtime::component::ResourceType::host::(), move |caller: &wasmtime::component::Accessor, rep| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); HostBarWithStore::drop( accessor, wasmtime::component::Resource::new_own(rep), @@ -607,7 +607,7 @@ pub mod foo { wasmtime::component::ResourceType::host::(), move |caller: &wasmtime::component::Accessor, rep| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); HostFallibleWithStore::drop( accessor, wasmtime::component::Resource::new_own(rep), @@ -620,7 +620,7 @@ pub mod foo { "[constructor]bar", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::new(accessor).await; Ok((r,)) }) @@ -630,7 +630,7 @@ pub mod foo { "[static]bar.static-a", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::static_a(accessor).await; Ok((r,)) }) @@ -643,7 +643,7 @@ pub mod foo { (arg0,): (wasmtime::component::Resource,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::method_a(accessor, arg0) .await; Ok((r,)) @@ -657,7 +657,7 @@ pub mod foo { (arg0,): (wasmtime::component::Resource,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::bar_own_arg(accessor, arg0) .await; Ok(r) @@ -671,7 +671,7 @@ pub mod foo { (arg0,): (wasmtime::component::Resource,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::bar_borrow_arg(accessor, arg0) .await; Ok(r) @@ -682,7 +682,7 @@ pub mod foo { "bar-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::bar_result(accessor).await; Ok((r,)) }) @@ -695,7 +695,7 @@ pub mod foo { (arg0,): ((wasmtime::component::Resource, u32),)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::tuple_own_arg(accessor, arg0) .await; Ok(r) @@ -709,7 +709,7 @@ pub mod foo { (arg0,): ((wasmtime::component::Resource, u32),)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::tuple_borrow_arg( accessor, arg0, @@ -723,7 +723,7 @@ pub mod foo { "tuple-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::tuple_result(accessor).await; Ok((r,)) }) @@ -736,7 +736,7 @@ pub mod foo { (arg0,): (Option>,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::option_own_arg(accessor, arg0) .await; Ok(r) @@ -750,7 +750,7 @@ pub mod foo { (arg0,): (Option>,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::option_borrow_arg( accessor, arg0, @@ -764,7 +764,7 @@ pub mod foo { "option-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::option_result(accessor).await; Ok((r,)) }) @@ -777,7 +777,7 @@ pub mod foo { (arg0,): (Result, ()>,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::result_own_arg(accessor, arg0) .await; Ok(r) @@ -791,7 +791,7 @@ pub mod foo { (arg0,): (Result, ()>,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::result_borrow_arg( accessor, arg0, @@ -805,7 +805,7 @@ pub mod foo { "result-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::result_result(accessor).await; Ok((r,)) }) @@ -824,7 +824,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_own_arg(accessor, arg0) .await; Ok(r) @@ -844,7 +844,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_borrow_arg(accessor, arg0) .await; Ok(r) @@ -855,7 +855,7 @@ pub mod foo { "list-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::list_result(accessor).await; Ok((r,)) }) @@ -868,7 +868,7 @@ pub mod foo { (arg0,): (NestedOwn,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::record_own_arg(accessor, arg0) .await; Ok(r) @@ -882,7 +882,7 @@ pub mod foo { (arg0,): (NestedBorrow,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::record_borrow_arg( accessor, arg0, @@ -896,7 +896,7 @@ pub mod foo { "record-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::record_result(accessor).await; Ok((r,)) }) @@ -909,7 +909,7 @@ pub mod foo { (arg0,): (SomeHandle,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::func_with_handle_typedef( accessor, arg0, @@ -923,7 +923,7 @@ pub mod foo { "[constructor]fallible", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::new(accessor).await; Ok((r,)) }) @@ -969,7 +969,7 @@ pub mod foo { wasmtime::component::ResourceType::host::(), move |caller: &wasmtime::component::Accessor, rep| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); HostAWithStore::drop( accessor, wasmtime::component::Resource::new_own(rep), @@ -1059,7 +1059,7 @@ pub mod foo { "foo", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor).await; Ok((r,)) }) @@ -1106,7 +1106,7 @@ pub mod foo { wasmtime::component::ResourceType::host::(), move |caller: &wasmtime::component::Accessor, rep| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); HostFooWithStore::drop( accessor, wasmtime::component::Resource::new_own(rep), diff --git a/crates/component-macro/tests/expanded/share-types_concurrent.rs b/crates/component-macro/tests/expanded/share-types_concurrent.rs index 2591cdd87c00..9348e23fa839 100644 --- a/crates/component-macro/tests/expanded/share-types_concurrent.rs +++ b/crates/component-macro/tests/expanded/share-types_concurrent.rs @@ -288,7 +288,7 @@ pub mod http_fetch { "fetch-request", move |caller: &wasmtime::component::Accessor, (arg0,): (Request,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::fetch_request(accessor, arg0).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/simple-functions_concurrent.rs b/crates/component-macro/tests/expanded/simple-functions_concurrent.rs index 843d04ae8e33..f4af4e6153cb 100644 --- a/crates/component-macro/tests/expanded/simple-functions_concurrent.rs +++ b/crates/component-macro/tests/expanded/simple-functions_concurrent.rs @@ -235,7 +235,7 @@ pub mod foo { "f1", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f1(accessor).await; Ok(r) }) @@ -245,7 +245,7 @@ pub mod foo { "f2", move |caller: &wasmtime::component::Accessor, (arg0,): (u32,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f2(accessor, arg0).await; Ok(r) }) @@ -258,7 +258,7 @@ pub mod foo { (arg0, arg1): (u32, u32)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f3(accessor, arg0, arg1).await; Ok(r) }) @@ -268,7 +268,7 @@ pub mod foo { "f4", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f4(accessor).await; Ok((r,)) }) @@ -278,7 +278,7 @@ pub mod foo { "f5", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f5(accessor).await; Ok((r,)) }) @@ -291,7 +291,7 @@ pub mod foo { (arg0, arg1, arg2): (u32, u32, u32)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::f6(accessor, arg0, arg1, arg2) .await; Ok((r,)) diff --git a/crates/component-macro/tests/expanded/simple-lists_concurrent.rs b/crates/component-macro/tests/expanded/simple-lists_concurrent.rs index 2dac5ac632c6..6e1e555171a5 100644 --- a/crates/component-macro/tests/expanded/simple-lists_concurrent.rs +++ b/crates/component-macro/tests/expanded/simple-lists_concurrent.rs @@ -243,7 +243,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::Vec,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::simple_list1(accessor, arg0) .await; Ok(r) @@ -254,7 +254,7 @@ pub mod foo { "simple-list2", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::simple_list2(accessor).await; Ok((r,)) }) @@ -273,7 +273,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::simple_list3( accessor, arg0, @@ -297,7 +297,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::simple_list4(accessor, arg0) .await; Ok((r,)) diff --git a/crates/component-macro/tests/expanded/simple-wasi_concurrent.rs b/crates/component-macro/tests/expanded/simple-wasi_concurrent.rs index 3e2a1f612e11..2f8bc3dcaedd 100644 --- a/crates/component-macro/tests/expanded/simple-wasi_concurrent.rs +++ b/crates/component-macro/tests/expanded/simple-wasi_concurrent.rs @@ -273,7 +273,7 @@ pub mod foo { "create-directory-at", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::create_directory_at(accessor) .await; Ok((r,)) @@ -284,7 +284,7 @@ pub mod foo { "stat", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::stat(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/small-anonymous_concurrent.rs b/crates/component-macro/tests/expanded/small-anonymous_concurrent.rs index 6fe5843c0bf1..f709f7da3803 100644 --- a/crates/component-macro/tests/expanded/small-anonymous_concurrent.rs +++ b/crates/component-macro/tests/expanded/small-anonymous_concurrent.rs @@ -262,7 +262,7 @@ pub mod foo { "option-test", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::option_test(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/smoke_concurrent.rs b/crates/component-macro/tests/expanded/smoke_concurrent.rs index 425d7783cd9f..44b764d5c75b 100644 --- a/crates/component-macro/tests/expanded/smoke_concurrent.rs +++ b/crates/component-macro/tests/expanded/smoke_concurrent.rs @@ -201,7 +201,7 @@ pub mod imports { "y", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::y(accessor).await; Ok(r) }) diff --git a/crates/component-macro/tests/expanded/strings_concurrent.rs b/crates/component-macro/tests/expanded/strings_concurrent.rs index 8c47ecb73616..5de24d6ea275 100644 --- a/crates/component-macro/tests/expanded/strings_concurrent.rs +++ b/crates/component-macro/tests/expanded/strings_concurrent.rs @@ -230,7 +230,7 @@ pub mod foo { (arg0,): (wasmtime::component::__internal::String,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a(accessor, arg0).await; Ok(r) }) @@ -240,7 +240,7 @@ pub mod foo { "b", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::b(accessor).await; Ok((r,)) }) @@ -259,7 +259,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::c(accessor, arg0, arg1).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/unstable-features_concurrent.rs b/crates/component-macro/tests/expanded/unstable-features_concurrent.rs index a45fe341cd0a..3882ab7626c7 100644 --- a/crates/component-macro/tests/expanded/unstable-features_concurrent.rs +++ b/crates/component-macro/tests/expanded/unstable-features_concurrent.rs @@ -283,7 +283,7 @@ const _: () = { wasmtime::component::ResourceType::host::(), move |caller: &wasmtime::component::Accessor, rep| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); HostBazWithStore::drop( accessor, wasmtime::component::Resource::new_own(rep), @@ -299,7 +299,7 @@ const _: () = { "foo", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor) .await; Ok(r) @@ -316,7 +316,7 @@ const _: () = { (arg0,): (wasmtime::component::Resource,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor, arg0).await; Ok(r) }) @@ -434,7 +434,7 @@ pub mod foo { wasmtime::component::ResourceType::host::(), move |caller: &wasmtime::component::Accessor, rep| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); HostBarWithStore::drop( accessor, wasmtime::component::Resource::new_own(rep), @@ -449,7 +449,7 @@ pub mod foo { "foo", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor).await; Ok(r) }) @@ -464,7 +464,7 @@ pub mod foo { (arg0,): (wasmtime::component::Resource,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::foo(accessor, arg0).await; Ok(r) }) diff --git a/crates/component-macro/tests/expanded/unversioned-foo_concurrent.rs b/crates/component-macro/tests/expanded/unversioned-foo_concurrent.rs index 096e97ccac46..31fa0ffec0fe 100644 --- a/crates/component-macro/tests/expanded/unversioned-foo_concurrent.rs +++ b/crates/component-macro/tests/expanded/unversioned-foo_concurrent.rs @@ -231,7 +231,7 @@ pub mod foo { "g", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::g(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/use-paths_concurrent.rs b/crates/component-macro/tests/expanded/use-paths_concurrent.rs index ef8f1f4cacef..80771ea7384b 100644 --- a/crates/component-macro/tests/expanded/use-paths_concurrent.rs +++ b/crates/component-macro/tests/expanded/use-paths_concurrent.rs @@ -225,7 +225,7 @@ pub mod foo { "a", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a(accessor).await; Ok((r,)) }) @@ -264,7 +264,7 @@ pub mod foo { "a", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a(accessor).await; Ok((r,)) }) @@ -303,7 +303,7 @@ pub mod foo { "a", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::a(accessor).await; Ok((r,)) }) @@ -344,7 +344,7 @@ pub mod d { "b", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::b(accessor).await; Ok((r,)) }) diff --git a/crates/component-macro/tests/expanded/variants_concurrent.rs b/crates/component-macro/tests/expanded/variants_concurrent.rs index a63406844f36..3ab97ed3a962 100644 --- a/crates/component-macro/tests/expanded/variants_concurrent.rs +++ b/crates/component-macro/tests/expanded/variants_concurrent.rs @@ -595,7 +595,7 @@ pub mod foo { "e1-arg", move |caller: &wasmtime::component::Accessor, (arg0,): (E1,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::e1_arg(accessor, arg0).await; Ok(r) }) @@ -605,7 +605,7 @@ pub mod foo { "e1-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::e1_result(accessor).await; Ok((r,)) }) @@ -615,7 +615,7 @@ pub mod foo { "v1-arg", move |caller: &wasmtime::component::Accessor, (arg0,): (V1,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::v1_arg(accessor, arg0).await; Ok(r) }) @@ -625,7 +625,7 @@ pub mod foo { "v1-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::v1_result(accessor).await; Ok((r,)) }) @@ -635,7 +635,7 @@ pub mod foo { "bool-arg", move |caller: &wasmtime::component::Accessor, (arg0,): (bool,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::bool_arg(accessor, arg0).await; Ok(r) }) @@ -645,7 +645,7 @@ pub mod foo { "bool-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::bool_result(accessor).await; Ok((r,)) }) @@ -672,7 +672,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::option_arg( accessor, arg0, @@ -691,7 +691,7 @@ pub mod foo { "option-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::option_result(accessor).await; Ok((r,)) }) @@ -711,7 +711,7 @@ pub mod foo { ): (Casts1, Casts2, Casts3, Casts4, Casts5, Casts6)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::casts( accessor, arg0, @@ -750,7 +750,7 @@ pub mod foo { )| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::result_arg( accessor, arg0, @@ -769,7 +769,7 @@ pub mod foo { "result-result", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::result_result(accessor).await; Ok((r,)) }) @@ -779,7 +779,7 @@ pub mod foo { "return-result-sugar", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::return_result_sugar(accessor) .await; Ok((r,)) @@ -790,7 +790,7 @@ pub mod foo { "return-result-sugar2", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::return_result_sugar2(accessor) .await; Ok((r,)) @@ -801,7 +801,7 @@ pub mod foo { "return-result-sugar3", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::return_result_sugar3(accessor) .await; Ok((r,)) @@ -812,7 +812,7 @@ pub mod foo { "return-result-sugar4", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::return_result_sugar4(accessor) .await; Ok((r,)) @@ -823,7 +823,7 @@ pub mod foo { "return-option-sugar", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::return_option_sugar(accessor) .await; Ok((r,)) @@ -834,7 +834,7 @@ pub mod foo { "return-option-sugar2", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::return_option_sugar2(accessor) .await; Ok((r,)) @@ -845,7 +845,7 @@ pub mod foo { "result-simple", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::result_simple(accessor).await; Ok((r,)) }) @@ -858,7 +858,7 @@ pub mod foo { (arg0,): (IsClone,)| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::is_clone_arg(accessor, arg0) .await; Ok(r) @@ -869,7 +869,7 @@ pub mod foo { "is-clone-return", move |caller: &wasmtime::component::Accessor, (): ()| { wasmtime::component::__internal::Box::pin(async move { - let accessor = &caller.with_data(host_getter); + let accessor = &caller.with_getter(host_getter); let r = ::is_clone_return(accessor) .await; Ok((r,))