diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4f2b6e02..769d04ca 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -73,22 +73,6 @@ jobs: shared: true build-type: "Release" - - compiler: "mingw" - version: "*" - cxxstd: "20" - latest-cxxstd: "20" - cxx: "g++" - cc: "gcc" - runs-on: "windows-2022" - b2-toolset: "gcc" - generator: "MinGW Makefiles" - is-latest: true - is-earliest: true - name: "MinGW: C++20" - shared: false - build-type: "Release" - build-cmake: true - # macOS (2 configurations) - compiler: "apple-clang" diff --git a/doc/modules/ROOT/nav.adoc b/doc/modules/ROOT/nav.adoc index 553ff9b0..96d20d02 100644 --- a/doc/modules/ROOT/nav.adoc +++ b/doc/modules/ROOT/nav.adoc @@ -45,4 +45,13 @@ ** xref:examples/custom-dynamic-buffer.adoc[Custom Dynamic Buffer] ** xref:examples/echo-server-corosio.adoc[Echo Server with Corosio] ** xref:examples/stream-pipeline.adoc[Stream Pipeline] +* Design +** xref:design/ReadStream.adoc[ReadStream] +** xref:design/ReadSource.adoc[ReadSource] +** xref:design/BufferSource.adoc[BufferSource] +** xref:design/WriteStream.adoc[WriteStream] +** xref:design/WriteSink.adoc[WriteSink] +** xref:design/BufferSink.adoc[BufferSink] +** xref:design/RunApi.adoc[Run API] +** xref:design/TypeEraseAwaitable.adoc[Type-Erasing Awaitables] * xref:reference:boost/capy.adoc[Reference] diff --git a/doc/modules/ROOT/pages/design/AnyBufferSink.adoc b/doc/modules/ROOT/pages/design/AnyBufferSink.adoc new file mode 100644 index 00000000..35b402e0 --- /dev/null +++ b/doc/modules/ROOT/pages/design/AnyBufferSink.adoc @@ -0,0 +1,409 @@ += any_buffer_sink Design + +== Overview + +This document describes the design of `any_buffer_sink`, a type-erased +wrapper that satisfies both `BufferSink` and `WriteSink`. The central +design goal is to serve two fundamentally different data-production +patterns through a single runtime interface, with no performance +compromise for either. + +Data producers fall into two categories: + +- **Generators** produce data on demand. They do not hold the data + in advance; they compute or serialize it into memory that someone + else provides. An HTTP header serializer, a JSON encoder, and a + compression engine are generators. + +- **Buffered sources** already have data sitting in buffers. A + memory-mapped file, a ring buffer that received data from a socket, + and a pre-serialized response body are buffered sources. + +These two patterns require different buffer ownership models. +Generators need writable memory from the sink (the `BufferSink` +pattern). Buffered sources need to hand their existing buffers to +the sink (the `WriteSink` pattern). Forcing either pattern through +the other's interface introduces an unnecessary copy. + +`any_buffer_sink` exposes both interfaces. The caller chooses the +one that matches how its data is produced. The wrapper dispatches +to the underlying concrete sink through the optimal path, achieving +zero-copy when the concrete type supports it and falling back to a +synthesized path when it does not. + +== The Two Interfaces + +=== BufferSink: Callee-Owned Buffers + +The `BufferSink` interface (`prepare`, `commit`, `commit_eof`) is +designed for generators. The sink owns the memory. The generator +asks for writable space, fills it, and commits: + +[source,cpp] +---- +any_buffer_sink abs(concrete_sink{}); + +mutable_buffer arr[16]; +auto bufs = abs.prepare(arr); +// serialize directly into bufs +auto [ec] = co_await abs.commit(bytes_written); +---- + +The data lands in the sink's internal storage with no intermediate +copy. If the concrete sink is backed by a kernel page, a DMA +descriptor, or a ring buffer, the bytes go directly to their final +destination. + +=== WriteSink: Caller-Owned Buffers + +The `WriteSink` interface (`write_some`, `write`, `write_eof`) is +designed for buffered sources. The caller already has the data in +buffers and passes them to the sink: + +[source,cpp] +---- +any_buffer_sink abs(concrete_sink{}); + +// Data already in buffers -- pass them directly +auto [ec, n] = co_await abs.write(existing_buffers); + +// Or atomically write and signal EOF +auto [ec2, n2] = co_await abs.write_eof(final_buffers); +---- + +When the concrete sink natively supports `WriteSink`, the caller's +buffers propagate directly through the type-erased boundary. The +sink receives the original buffer descriptors pointing to the +caller's memory. No data is copied into an intermediate staging +area. + +== Dispatch Strategy + +The vtable records whether the wrapped concrete type satisfies +`WriteSink` in addition to `BufferSink`. This determination is made +at compile time when the vtable is constructed. At runtime, each +`WriteSink` operation checks a single nullable function pointer to +select its path. + +=== Native Forwarding (BufferSink + WriteSink) + +When the concrete type satisfies both concepts, the `WriteSink` +vtable slots are populated with functions that construct the +concrete type's own `write_some`, `write`, `write_eof(buffers)`, +and `write_eof()` awaitables in the cached storage. The caller's +buffer descriptors pass straight through: + +---- +caller buffers → vtable → concrete write(buffers) → I/O +---- + +No `prepare`, no `buffer_copy`, no `commit`. The concrete type +receives the caller's buffers and can submit them directly to the +operating system, the compression library, or the next pipeline +stage. + +This is the zero-copy path for buffered sources writing to a sink +that natively accepts caller-owned buffers. + +=== Synthesized Path (BufferSink Only) + +When the concrete type satisfies only `BufferSink`, the `WriteSink` +vtable slots are null. The wrapper synthesizes the `WriteSink` +operations from the `BufferSink` primitives: + +---- +caller buffers → prepare → buffer_copy → commit → I/O +---- + +For `write_some`: + +1. Call `prepare` to get writable space from the sink. +2. Copy data from the caller's buffers into the prepared space + with `buffer_copy`. +3. Call `commit` to finalize. + +For `write` and `write_eof`: the same loop, repeated until all +data is consumed. `write_eof` finishes with `commit_eof` to signal +end-of-stream. + +This path incurs one buffer copy, which is unavoidable: the +concrete sink only knows how to accept data through its own +`prepare`/`commit` protocol, so the caller's buffers must be copied +into the sink's internal storage. + +== Why This Matters + +=== No Compromise + +A naive design would pick one interface and synthesize the other +unconditionally. If the wrapper only exposed `BufferSink`, every +buffered source would pay a copy to move its data into the sink's +prepared buffers. If the wrapper only exposed `WriteSink`, every +generator would need to allocate its own intermediate buffer, fill +it, then hand it to the sink -- paying a copy that the `BufferSink` +path avoids. + +`any_buffer_sink` avoids both penalties. Each data-production +pattern uses the interface designed for it. The only copy that +occurs is the one that is structurally unavoidable: when a +`WriteSink` operation targets a concrete type that only speaks +`BufferSink`. + +=== True Zero-Copy for Buffered Sources + +Consider an HTTP server where the response body is a memory-mapped +file. The file's pages are already in memory. Through the +`WriteSink` interface, those pages can propagate directly to the +underlying transport: + +[source,cpp] +---- +// body_source is a BufferSource backed by mmap pages +// response_sink wraps a concrete type satisfying both concepts + +any_buffer_sink response_sink(&concrete); + +const_buffer arr[16]; +for(;;) +{ + auto [ec, bufs] = co_await body_source.pull(arr); + if(ec == cond::eof) + { + auto [ec2] = co_await response_sink.write_eof(); + break; + } + if(ec) + break; + // bufs point directly into mmap pages + // write() propagates them through the vtable to the concrete sink + auto [ec2, n] = co_await response_sink.write(bufs); + if(ec2) + break; + body_source.consume(n); +} +---- + +The mapped pages flow from `body_source.pull` through +`response_sink.write` to the concrete transport with no +intermediate copy. If the concrete sink can scatter-gather those +buffers into a `writev` system call, the data moves from the +page cache to the network card without touching user-space memory +a second time. + +=== Generators Write In-Place + +An HTTP header serializer generates bytes on the fly. It does not +hold the output in advance. Through the `BufferSink` interface, it +writes directly into whatever memory the concrete sink provides: + +[source,cpp] +---- +task<> serialize_headers( + any_buffer_sink& sink, + response const& resp) +{ + mutable_buffer arr[16]; + + for(auto const& field : resp.fields()) + { + auto bufs = sink.prepare(arr); + // serialize field directly into bufs + std::size_t n = format_field(bufs, field); + auto [ec] = co_await sink.commit(n); + if(ec) + co_return; + } + // headers done; body follows through the same sink +} +---- + +The serializer never allocates a scratch buffer for the formatted +output. The bytes land directly in the sink's internal storage, +which might be a chunked-encoding buffer, a TLS record buffer, or +a circular buffer feeding a socket. + +== Awaitable Caching + +`any_buffer_sink` uses the split vtable pattern described in +xref:TypeEraseAwaitable.adoc[Type-Erasing Awaitables]. Multiple +async operations (`commit`, `commit_eof`, plus the four `WriteSink` +operations when the concrete type supports them) share a single +cached awaitable storage region. + +The constructor computes the maximum size and alignment across all +awaitable types that the concrete type can produce and allocates +that storage once. This reserves all virtual address space at +construction time, so memory usage is measurable at server startup +rather than growing piecemeal as requests arrive. + +Two separate `awaitable_ops` structs are used: + +- `awaitable_ops` for operations yielding `io_result<>` + (`commit`, `commit_eof`, `write_eof()`) +- `write_awaitable_ops` for operations yielding + `io_result` (`write_some`, `write`, + `write_eof(buffers)`) + +Each `construct_*` function in the vtable creates the concrete +awaitable in the cached storage and returns a pointer to the +matching `static constexpr` ops table. The wrapper stores this +pointer as `active_ops_` or `active_write_ops_` and uses it for +`await_ready`, `await_suspend`, `await_resume`, and destruction. + +== Ownership Modes + +=== Owning + +[source,cpp] +---- +any_buffer_sink abs(my_concrete_sink{args...}); +---- + +The wrapper allocates storage for the concrete sink and moves it +in. The wrapper owns the sink and destroys it in its destructor. +The awaitable cache is allocated separately. + +If either allocation fails, the constructor cleans up via an +internal guard and propagates the exception. + +=== Non-Owning (Reference) + +[source,cpp] +---- +my_concrete_sink sink; +any_buffer_sink abs(&sink); +---- + +The wrapper stores a pointer without allocating storage for the +sink. The concrete sink must outlive the wrapper. Only the +awaitable cache is allocated. + +This mode is useful when the concrete sink is managed by a +higher-level object (e.g., an HTTP connection that owns the +transport) and the wrapper is a short-lived handle passed to a +body-production function. + +== Relationship to any_buffer_source + +`any_buffer_source` is the read-side counterpart, satisfying both +`BufferSource` and `ReadSource`. The same dual-interface principle +applies in mirror image: + +[cols="1,1,1"] +|=== +| Direction | Primary concept | Secondary concept + +| Writing (any_buffer_sink) +| `BufferSink` (callee-owned) +| `WriteSink` (caller-owned) + +| Reading (any_buffer_source) +| `BufferSource` (callee-owned) +| `ReadSource` (caller-owned) +|=== + +Both wrappers enable the same design philosophy: the caller +chooses the interface that matches its data-production or +data-consumption pattern, and the wrapper dispatches optimally. + +== Alternatives Considered + +=== WriteSink-Only Wrapper + +A design where the type-erased wrapper satisfied only `WriteSink` +was considered. Generators would allocate their own scratch buffer, +serialize into it, and call `write`. This was rejected because: + +- Every generator pays a buffer copy that the `BufferSink` path + avoids. For high-throughput paths (HTTP header serialization, + compression output), this copy is measurable. +- Generators must manage scratch buffer lifetime and sizing. + The `prepare`/`commit` protocol pushes this responsibility to + the sink, which knows its own buffer topology. +- The `commit_eof(n)` optimization (coalescing final data with + stream termination) is lost. A generator calling `write` cannot + signal that its last write is the final one without a separate + `write_eof()` call, preventing the sink from combining them. + +=== BufferSink-Only Wrapper + +A design where the wrapper satisfied only `BufferSink` was +considered. Buffered sources would copy their data into the +sink's prepared buffers via `prepare` + `buffer_copy` + `commit`. +This was rejected because: + +- Every buffered source pays a copy that native `WriteSink` + forwarding avoids. When the source is a memory-mapped file and + the sink is a socket, this eliminates the zero-copy path + entirely. +- The `buffer_copy` step becomes the bottleneck for large + transfers, dominating what would otherwise be a pure I/O + operation. +- Buffered sources that produce scatter-gather buffer sequences + (multiple non-contiguous regions) must copy each region + individually into prepared buffers, losing the ability to pass + the entire scatter-gather list to a `writev` system call. + +=== Separate Wrapper Types + +A design with two distinct wrappers (`any_buffer_sink` satisfying +only `BufferSink` and `any_write_sink` satisfying only `WriteSink`) +was considered. The caller would choose which wrapper to construct +based on its data-production pattern. This was rejected because: + +- The caller and the sink are often decoupled. An HTTP server + framework provides the sink; the user provides the body + producer. The framework cannot know at compile time whether the + user will call `prepare`/`commit` or `write`/`write_eof`. +- Requiring two wrapper types forces the framework to either + pick one (losing the other pattern) or expose both (complicating + the API). +- A single wrapper that satisfies both concepts lets the + framework hand one object to the body producer, which uses + whichever interface is natural. No choice is imposed on the + framework or the user. + +=== Always Synthesizing WriteSink + +A design where the `WriteSink` operations were always synthesized +from `prepare` + `buffer_copy` + `commit`, even when the concrete +type natively supports `WriteSink`, was considered. This would +simplify the vtable by removing the nullable write-forwarding +slots. This was rejected because: + +- The buffer copy is measurable. For a concrete type that can + accept caller-owned buffers directly (e.g., a socket wrapper + with `writev` support), the synthesized path adds a copy that + native forwarding avoids. +- The `write_eof(buffers)` atomicity guarantee is lost. The + synthesized path must decompose it into `prepare` + + `buffer_copy` + `commit_eof`, which the concrete type cannot + distinguish from a non-final commit followed by an empty + `commit_eof`. This prevents optimizations like coalescing the + last data chunk with a chunked-encoding terminator. + +== Summary + +`any_buffer_sink` satisfies both `BufferSink` and `WriteSink` +behind a single type-erased interface. The dual API lets each +data-production pattern use the interface designed for it: + +[cols="1,2,2"] +|=== +| Producer type | Interface | Data path + +| Generator (produces on demand) +| `prepare` / `commit` / `commit_eof` +| Writes directly into sink's internal storage. Zero copy. + +| Buffered source (data already in memory) +| `write_some` / `write` / `write_eof` +| Buffers propagate through the vtable. Zero copy when the concrete + type natively supports `WriteSink`. One copy (synthesized) when + it does not. +|=== + +The dispatch is determined at construction time through nullable +vtable slots. At runtime, a single pointer check selects the native +or synthesized path. Neither pattern pays for the other's +existence. diff --git a/doc/modules/ROOT/pages/design/BufferSink.adoc b/doc/modules/ROOT/pages/design/BufferSink.adoc new file mode 100644 index 00000000..9a5cfb42 --- /dev/null +++ b/doc/modules/ROOT/pages/design/BufferSink.adoc @@ -0,0 +1,540 @@ += BufferSink Concept Design + +== Overview + +This document describes the design of the `BufferSink` concept, the rationale +behind each member function, and the relationship between `BufferSink`, +`WriteSink`, and the `pull_from` algorithm. `BufferSink` models the +"callee owns buffers" pattern: the sink provides writable memory and the +caller writes directly into it, enabling zero-copy data transfer. + +Where `WriteSink` requires the caller to supply buffer sequences containing +the data to be written, `BufferSink` inverts the ownership: the sink +exposes its internal storage and the caller fills it in place. The two +concepts are independent -- neither refines the other -- but the +type-erased wrapper `any_buffer_sink` satisfies both, bridging the two +patterns behind a single runtime interface. + +== Concept Definition + +[source,cpp] +---- +template +concept BufferSink = + requires(T& sink, std::span dest, std::size_t n) + { + // Synchronous: get writable buffers from sink's internal storage + { sink.prepare(dest) } -> std::same_as>; + + // Async: commit n bytes written + { sink.commit(n) } -> IoAwaitable; + requires awaitable_decomposes_to< + decltype(sink.commit(n)), + std::error_code>; + + // Async: commit n final bytes and signal end of data + { sink.commit_eof(n) } -> IoAwaitable; + requires awaitable_decomposes_to< + decltype(sink.commit_eof(n)), + std::error_code>; + }; +---- + +`BufferSink` is a standalone concept. It does not refine `WriteSink` or +`WriteStream`. The two concept families model different ownership +patterns and can coexist on the same concrete type. + +== Caller vs Callee Buffer Ownership + +The library provides two concept families for writing data: + +[cols="1,2,2"] +|=== +| Aspect | WriteSink (caller owns) | BufferSink (callee owns) + +| Buffer origin +| Caller allocates and fills buffers, then passes them to the sink. +| Sink exposes its internal storage; caller writes into it. + +| Copy cost +| One copy: caller's buffer -> sink's internal storage (or I/O submission). +| Zero copies when the sink's internal storage is the final destination + (e.g., a ring buffer, kernel page, or DMA region). + +| API shape +| `write_some(buffers)`, `write(buffers)`, `write_eof(buffers)` +| `prepare(dest)`, `commit(n)`, `commit_eof(n)` + +| Natural for +| Protocol serializers that produce data into their own buffers, then hand + it off. +| Sinks backed by pre-allocated memory (ring buffers, memory-mapped files, + hardware DMA descriptors). +|=== + +Both patterns are necessary. A compression sink, for example, naturally +owns the output buffer where compressed data lands; the caller feeds +uncompressed data and the compressor writes results directly into the +ring buffer. Conversely, an HTTP serializer naturally produces header +bytes into its own scratch space and then hands the buffer sequence to a +`WriteSink`. + +== Member Functions + +=== `prepare(dest)` -- Expose Writable Buffers + +Fills the provided span with mutable buffer descriptors pointing to the +sink's internal storage. This operation is synchronous. + +==== Signature + +[source,cpp] +---- +std::span prepare(std::span dest); +---- + +==== Semantics + +- Returns a (possibly empty) subspan of `dest` populated with buffer + descriptors. Each descriptor points to a writable region of the sink's + internal storage. +- If the returned span is empty, the sink has no available space. The + caller should call `commit` (possibly with `n == 0`) to flush + buffered data and then retry `prepare`. +- The returned buffers remain valid until the next call to `prepare`, + `commit`, `commit_eof`, or until the sink is destroyed. + +==== Why Synchronous + +`prepare` is synchronous because it is a bookkeeping operation: the sink +returns pointers into memory it already owns. No I/O or blocking is +involved. Making `prepare` asynchronous would force a coroutine +suspension on every iteration of the write loop, adding overhead with no +benefit. + +When the sink has no available space, the correct response is to +`commit` the pending data (which _is_ asynchronous, as it may trigger +I/O), then call `prepare` again. This keeps the synchronous fast path +free of unnecessary suspensions. + +==== Why a Span Parameter + +The caller provides the output span rather than the sink returning a +fixed-size container. This lets the caller control the stack allocation +and avoids heap allocation for the buffer descriptor array: + +[source,cpp] +---- +mutable_buffer arr[16]; +auto bufs = sink.prepare(arr); +---- + +The sink fills as many descriptors as it can (up to `dest.size()`) and +returns the populated subspan. + +=== `commit(n)` -- Finalize Written Data + +Commits `n` bytes that the caller wrote into the buffers returned by +the most recent `prepare`. Returns `(error_code)`. + +==== Semantics + +- On success: `!ec`. +- On error: `ec`. +- May trigger underlying I/O (flush to socket, compression pass, etc.). +- After `commit`, the buffers returned by the prior `prepare` are + invalidated. The caller must call `prepare` again before writing + more data. + +==== When to Use + +- After writing data into prepared buffers and needing to continue + the transfer. +- To flush when `prepare` returns an empty span (call `commit(0)` + to drain the sink's internal buffer and free space). + +=== `commit_eof(n)` -- Commit Final Data and Signal EOF + +Commits `n` bytes written to the most recent `prepare` buffers and +signals end-of-stream. Returns `(error_code)`. + +After a successful call, no further `prepare`, `commit`, or +`commit_eof` operations are permitted. + +==== Semantics + +- On success: `!ec`. The sink is finalized. +- On error: `ec`. The sink state is unspecified. + +==== Why `commit_eof` Takes a Byte Count + +Combining the final commit with the EOF signal in a single operation +enables the same optimizations that motivate `write_eof(buffers)` on +the `WriteSink` side: + +- **HTTP chunked encoding**: `commit_eof(n)` can emit the data chunk + followed by the terminal `0\r\n\r\n` in a single system call. +- **Compression (deflate)**: `commit_eof(n)` can pass `Z_FINISH` to the + final `deflate()` call, producing the compressed data and the stream + trailer together. +- **TLS close-notify**: `commit_eof(n)` can coalesce the final + application data with the TLS close-notify alert. + +A separate `commit(n)` followed by `commit_eof(0)` would prevent these +optimizations because the sink cannot know during `commit` that no more +data will follow. + +== Relationship to `pull_from` + +`pull_from` is a composed algorithm that transfers data from a +`ReadSource` (or `ReadStream`) into a `BufferSink`. It is the +callee-owns-buffers counterpart to `push_to`, which transfers from a +`BufferSource` to a `WriteSink`. + +[source,cpp] +---- +template +io_task +pull_from(Src& source, Sink& sink); + +template +io_task +pull_from(Src& source, Sink& sink); +---- + +The algorithm loops: + +1. Call `sink.prepare(arr)` to get writable buffers. +2. Call `source.read(bufs)` (or `source.read_some(bufs)`) to fill them. +3. Call `sink.commit(n)` to finalize the data. +4. When the source signals EOF, call `sink.commit_eof(0)` to finalize + the sink. + +`pull_from` is the right tool when the data source satisfies +`ReadSource` or `ReadStream` and the destination satisfies `BufferSink`. +It avoids the intermediate caller-owned buffer that a `WriteSink`-based +transfer would require. + +The two `pull_from` overloads differ in how they read from the source: + +[cols="1,2"] +|=== +| Overload | Behavior + +| `pull_from(ReadSource, BufferSink)` +| Uses `source.read(bufs)` for complete reads. Each iteration fills the + prepared buffers entirely (or returns EOF/error). + +| `pull_from(ReadStream, BufferSink)` +| Uses `source.read_some(bufs)` for partial reads. Each iteration + commits whatever data was available, providing lower latency. +|=== + +== Relationship to `WriteSink` + +`BufferSink` and `WriteSink` are independent concepts serving different +ownership models. A concrete type may satisfy one, the other, or both. + +The type-erased wrapper `any_buffer_sink` satisfies both concepts. When +the wrapped type satisfies only `BufferSink`, the `WriteSink` operations +(`write_some`, `write`, `write_eof`) are synthesized from `prepare` and +`commit` with a `buffer_copy` step. When the wrapped type satisfies both +`BufferSink` and `WriteSink`, the native write operations are forwarded +directly through the virtual boundary with no extra copy. + +This dual-concept bridge lets algorithms constrained on `WriteSink` work +with any `BufferSink` through `any_buffer_sink`, and lets algorithms +constrained on `BufferSink` work natively with the callee-owns-buffers +pattern. + +=== Transfer Algorithm Matrix + +[cols="1,1,1"] +|=== +| Source | Sink | Algorithm + +| `BufferSource` +| `WriteSink` +| `push_to` -- pulls from source, writes to sink + +| `BufferSource` +| `WriteStream` +| `push_to` -- pulls from source, writes partial to stream + +| `ReadSource` +| `BufferSink` +| `pull_from` -- prepares sink buffers, reads into them + +| `ReadStream` +| `BufferSink` +| `pull_from` -- prepares sink buffers, reads partial into them +|=== + +== Use Cases + +=== Zero-Copy Transfer + +When the sink's internal storage is the final destination (a ring +buffer, a kernel page, a DMA region), the caller writes directly +into it with no intermediate copy. + +[source,cpp] +---- +template +task<> fill_sink(Sink& sink, std::string_view data) +{ + std::size_t written = 0; + while(written < data.size()) + { + mutable_buffer arr[16]; + auto bufs = sink.prepare(arr); + if(bufs.empty()) + { + auto [ec] = co_await sink.commit(0); + if(ec) + co_return; + continue; + } + + std::size_t n = buffer_copy( + bufs, + const_buffer( + data.data() + written, + data.size() - written)); + written += n; + + if(written == data.size()) + { + auto [ec] = co_await sink.commit_eof(n); + if(ec) + co_return; + } + else + { + auto [ec] = co_await sink.commit(n); + if(ec) + co_return; + } + } +} +---- + +=== Transferring from a ReadSource + +The `pull_from` algorithm reads data directly into the sink's buffers, +avoiding a caller-owned intermediate buffer entirely. + +[source,cpp] +---- +template +task<> transfer(Source& source, Sink& sink) +{ + auto [ec, total] = co_await pull_from(source, sink); + if(ec) + co_return; + // total bytes transferred with zero intermediate copies +} +---- + +Compare with the `WriteSink` approach, which requires an intermediate +buffer: + +[source,cpp] +---- +template +task<> transfer(Source& source, Sink& sink) +{ + char buf[8192]; // intermediate buffer + for(;;) + { + auto [ec, n] = co_await source.read_some( + mutable_buffer(buf)); + if(ec == cond::eof) + { + auto [wec] = co_await sink.write_eof(); + co_return; + } + if(ec) + co_return; + auto [wec, nw] = co_await sink.write( + const_buffer(buf, n)); + if(wec) + co_return; + } +} +---- + +The `BufferSink` path eliminates the `buf[8192]` intermediate buffer. + +=== HTTP Response Body Sink + +An HTTP response body can be consumed through a `BufferSink` interface. +The concrete implementation handles transfer encoding behind the +abstraction. + +[source,cpp] +---- +task<> receive_body( + any_buffer_sink& body, + ReadSource auto& source) +{ + auto [ec, n] = co_await pull_from(source, body); + if(ec) + co_return; + // Body fully received and committed +} +---- + +The caller does not know whether the body uses content-length, chunked +encoding, or compression. The `BufferSink` interface handles the +difference. + +=== Compression Pipeline + +A compression sink owns an output ring buffer where compressed data +lands. The caller writes uncompressed data into prepared buffers, and +`commit` triggers a compression pass. + +[source,cpp] +---- +template +task<> compress_input(Sink& sink, std::span input) +{ + std::size_t pos = 0; + while(pos < input.size()) + { + mutable_buffer arr[16]; + auto bufs = sink.prepare(arr); + if(bufs.empty()) + { + auto [ec] = co_await sink.commit(0); + if(ec) + co_return; + continue; + } + + std::size_t n = buffer_copy( + bufs, + const_buffer(input.data() + pos, + input.size() - pos)); + pos += n; + + auto [ec] = co_await sink.commit(n); + if(ec) + co_return; + } + auto [ec] = co_await sink.commit_eof(0); + if(ec) + co_return; +} +---- + +The `commit_eof(0)` call lets the compression sink pass `Z_FINISH` to +the final deflate call, flushing the compressed stream trailer. + +=== Bridging to WriteSink via `any_buffer_sink` + +When a function is constrained on `WriteSink` but the concrete type +satisfies only `BufferSink`, `any_buffer_sink` bridges the gap. + +[source,cpp] +---- +template +task<> send_message(Sink& sink, std::string_view msg); + +// Concrete type satisfies BufferSink only +my_ring_buffer ring; +any_buffer_sink abs(ring); + +// Works: any_buffer_sink satisfies WriteSink +co_await send_message(abs, "hello"); +---- + +When the wrapped type also satisfies `WriteSink`, `any_buffer_sink` +forwards the native write operations directly, avoiding the synthesized +`prepare` + `buffer_copy` + `commit` path. + +== Alternatives Considered + +=== Combined Prepare-and-Commit + +An alternative design combined the prepare and commit steps into a +single asynchronous operation: `write(dest) -> (error_code, span)`, +where the sink returns writable buffers and the commit happens on the +next call. This was rejected because: + +- The synchronous `prepare` is a pure bookkeeping operation. Making it + asynchronous forces a coroutine suspension on every iteration, even + when the sink has space available. +- Separating `prepare` from `commit` lets the caller fill multiple + prepared buffers before incurring the cost of an asynchronous commit. +- The two-step protocol makes the buffer lifetime explicit: buffers + from `prepare` are valid until `commit` or `commit_eof`. + +=== `prepare` Returning a Count Instead of a Span + +An earlier design had `prepare` fill a raw pointer array and return a +count (`std::size_t prepare(mutable_buffer* arr, std::size_t max)`). +This was replaced by the span-based interface because: + +- `std::span` is self-describing: it carries both the + pointer and the size, eliminating a class of off-by-one errors. +- Returning a subspan of the input span is idiomatic C++ and composes + well with range-based code. +- The raw-pointer interface required two parameters (pointer + count) + where the span interface requires one. + +=== Separate `flush` Operation + +A design with an explicit `flush` method (distinct from `commit`) was +considered, where `commit` would only buffer data and `flush` would +trigger I/O. This was rejected because: + +- It adds a fourth operation to the concept without clear benefit. The + `commit` operation already serves both roles: it finalizes the + caller's data and may trigger I/O at the sink's discretion. +- A sink that wants to defer I/O can do so internally by accumulating + committed data and flushing when its buffer is full. The caller does + not need to know when physical I/O occurs. +- Adding `flush` would complicate the `pull_from` algorithm, which + would need to decide when to call `flush` versus `commit`. + +=== `BufferSink` Refining `WriteSink` + +A design where `BufferSink` refined `WriteSink` (requiring all types to +implement both interfaces) was considered. This was rejected because: + +- Many natural `BufferSink` types (ring buffers, DMA descriptors) have + no meaningful `write_some` primitive. Their data path is + prepare-then-commit, not write-from-caller-buffer. +- Requiring `write_some`, `write`, and `write_eof` on every + `BufferSink` would force implementations to synthesize these + operations even when they are never called. +- The `any_buffer_sink` wrapper provides the bridge when needed, + without burdening every concrete type. + +== Summary + +[cols="1,2,2"] +|=== +| Function | Contract | Use Case + +| `prepare(dest)` +| Synchronous. Fills span with writable buffer descriptors from the + sink's internal storage. Returns empty span if no space is available. +| Every write iteration: obtain writable memory before filling it. + +| `commit(n)` +| Async. Commits `n` bytes to the sink. May trigger I/O. +| Interior iterations of a transfer loop. + +| `commit_eof(n)` +| Async. Commits `n` bytes and signals end-of-stream. Finalizes the sink. +| Final iteration: deliver last data and close the stream. +|=== + +`BufferSink` is the callee-owns-buffers counterpart to `WriteSink`. The +`pull_from` algorithm transfers data from a `ReadSource` or +`ReadStream` into a `BufferSink`, and `any_buffer_sink` bridges the two +patterns by satisfying both `BufferSink` and `WriteSink` behind a +single type-erased interface. diff --git a/doc/modules/ROOT/pages/design/BufferSource.adoc b/doc/modules/ROOT/pages/design/BufferSource.adoc new file mode 100644 index 00000000..10958480 --- /dev/null +++ b/doc/modules/ROOT/pages/design/BufferSource.adoc @@ -0,0 +1,513 @@ += BufferSource Concept Design + +== Overview + +This document describes the design of the `BufferSource` concept, the +rationale behind each member function, and the relationship between +`BufferSource`, `ReadSource`, and the `push_to` algorithm. +`BufferSource` models the "callee owns buffers" pattern on the read +side: the source exposes its internal storage as read-only buffers and +the caller consumes data directly from them, enabling zero-copy data +transfer. + +Where `ReadSource` requires the caller to supply mutable buffers for +the source to fill, `BufferSource` inverts the ownership: the source +provides read-only views into its own memory and the caller reads from +them in place. The two concepts are independent -- neither refines the +other -- but the type-erased wrapper `any_buffer_source` satisfies +both, bridging the two patterns behind a single runtime interface. + +== Concept Definition + +[source,cpp] +---- +template +concept BufferSource = + requires(T& src, std::span dest, std::size_t n) + { + { src.pull(dest) } -> IoAwaitable; + requires awaitable_decomposes_to< + decltype(src.pull(dest)), + std::error_code, std::span>; + src.consume(n); + }; +---- + +`BufferSource` is a standalone concept. It does not refine `ReadSource` +or `ReadStream`. The two concept families model different ownership +patterns and can coexist on the same concrete type. + +== Caller vs Callee Buffer Ownership + +The library provides two concept families for reading data: + +[cols="1,2,2"] +|=== +| Aspect | ReadSource (caller owns) | BufferSource (callee owns) + +| Buffer origin +| Caller allocates mutable buffers; source fills them. +| Source exposes its internal storage as read-only buffers; caller reads + from them. + +| Copy cost +| One copy: source's internal storage -> caller's buffer. +| Zero copies when the caller can process data in place (e.g., scanning, + hashing, forwarding to a `write_some` call). + +| API shape +| `read_some(buffers)`, `read(buffers)` +| `pull(dest)`, `consume(n)` + +| Natural for +| Callers that need to accumulate data into their own buffer (e.g., + parsing a fixed-size header into a struct). +| Sources backed by pre-existing memory (ring buffers, memory-mapped + files, decompression output buffers, kernel receive buffers). +|=== + +Both patterns are necessary. A memory-mapped file source naturally owns +the mapped region; the caller reads directly from the mapped pages +without copying. Conversely, an application that needs to fill a +fixed-size header struct naturally provides its own mutable buffer for +the source to fill. + +== Member Functions + +=== `pull(dest)` -- Expose Readable Buffers + +Fills the provided span with const buffer descriptors pointing to the +source's internal storage. This operation is asynchronous because the +source may need to perform I/O to produce data (e.g., reading from a +socket, decompressing a block). + +==== Signature + +[source,cpp] +---- +IoAwaitable auto pull(std::span dest); +---- + +Returns `(error_code, std::span)`. + +==== Semantics + +- **Data available**: `!ec` and `bufs.size() > 0`. The returned span + contains buffer descriptors pointing to readable data in the source's + internal storage. +- **Source exhausted**: `ec == cond::eof` and `bufs.empty()`. No more + data is available; the transfer is complete. +- **Error**: `ec` is `true` and `ec != cond::eof`. An error occurred. + +Calling `pull` multiple times without an intervening `consume` returns +the same unconsumed data. This idempotency lets the caller inspect the +data, decide how much to process, and then advance the position with +`consume`. + +==== Why Asynchronous + +Unlike `BufferSink::prepare`, which is synchronous, `pull` is +asynchronous. The asymmetry exists because the two operations have +fundamentally different costs: + +- `prepare` returns pointers to _empty_ memory the sink already owns. + No data movement is involved; it is pure bookkeeping. +- `pull` may need to _produce_ data before it can return buffer + descriptors. A file source reads from disk. A decompression source + feeds compressed input to the decompressor. A network source waits + for data to arrive on a socket. These operations require I/O. + +Making `pull` synchronous would force the source to pre-buffer all data +before the caller can begin consuming it, defeating the streaming model. + +==== Why a Span Parameter + +The caller provides the output span rather than the source returning a +fixed-size container. This lets the caller control the stack allocation +and avoids heap allocation for the buffer descriptor array: + +[source,cpp] +---- +const_buffer arr[16]; +auto [ec, bufs] = co_await source.pull(arr); +---- + +The source fills as many descriptors as it can (up to `dest.size()`) +and returns the populated subspan. + +=== `consume(n)` -- Advance the Read Position + +Advances the source's internal read position by `n` bytes. The next +call to `pull` returns data starting after the consumed bytes. This +operation is synchronous. + +==== Signature + +[source,cpp] +---- +void consume(std::size_t n) noexcept; +---- + +==== Semantics + +- Advances the read position by `n` bytes. +- `n` must not exceed the total size of the buffers returned by the + most recent `pull`. +- After `consume`, the buffers returned by the prior `pull` are + invalidated. The caller must call `pull` again to obtain new buffer + descriptors. + +==== Why Synchronous + +`consume` is synchronous because it is pure bookkeeping: advancing an +offset or releasing a reference. No I/O is involved. The asynchronous +work (producing data, performing I/O) happens in `pull`. + +==== Why Separate from `pull` + +Separating `consume` from `pull` gives the caller explicit control over +how much data to process before advancing: + +[source,cpp] +---- +const_buffer arr[16]; +auto [ec, bufs] = co_await source.pull(arr); +if(!ec) +{ + // Process some of the data + auto n = process(bufs); + source.consume(n); + // Remaining data returned by next pull +} +---- + +This is essential for partial processing. A parser may examine the +pulled data, find that it contains an incomplete message, and consume +only the complete portion. The next `pull` returns the remainder +prepended to any newly available data. + +If `pull` automatically consumed all returned data, the caller would +need to buffer unconsumed bytes itself, defeating the zero-copy benefit. + +== The Pull/Consume Protocol + +The `pull` and `consume` functions form a two-phase read protocol: + +1. **Pull**: the source provides data (async, may involve I/O). +2. **Inspect**: the caller examines the returned buffers. +3. **Consume**: the caller indicates how many bytes were used (sync). +4. **Repeat**: the next `pull` returns data starting after the consumed + bytes. + +This protocol enables several patterns that a single-call interface +cannot: + +- **Partial consumption**: consume less than what was pulled. The + remainder is returned by the next `pull`. +- **Peek**: call `pull` to inspect data without consuming it. Call + `pull` again (without `consume`) to get the same data. +- **Scatter writes**: pull once, write the returned buffers to multiple + destinations (e.g., `write_some` to a socket), and consume only the + bytes that were successfully written. + +== Relationship to `push_to` + +`push_to` is a composed algorithm that transfers data from a +`BufferSource` to a `WriteSink` (or `WriteStream`). It is the +callee-owns-buffers counterpart to `pull_from`, which transfers from a +`ReadSource` (or `ReadStream`) to a `BufferSink`. + +[source,cpp] +---- +template +io_task +push_to(Src& source, Sink& sink); + +template +io_task +push_to(Src& source, Stream& stream); +---- + +The algorithm loops: + +1. Call `source.pull(arr)` to get readable buffers. +2. Write the data to the sink via `sink.write(bufs)` or + `stream.write_some(bufs)`. +3. Call `source.consume(n)` to advance past the written bytes. +4. When `pull` signals EOF, call `sink.write_eof()` to finalize + the sink (WriteSink overload only). + +The two `push_to` overloads differ in how they write to the destination: + +[cols="1,2"] +|=== +| Overload | Behavior + +| `push_to(BufferSource, WriteSink)` +| Uses `sink.write(bufs)` for complete writes. Each iteration delivers + all pulled data. On EOF, calls `sink.write_eof()` to finalize. + +| `push_to(BufferSource, WriteStream)` +| Uses `stream.write_some(bufs)` for partial writes. Consumes only the + bytes that were actually written, providing backpressure. Does not + signal EOF (WriteStream has no EOF mechanism). +|=== + +`push_to` is the right tool when the data source satisfies +`BufferSource` and the destination satisfies `WriteSink` or +`WriteStream`. The source's internal buffers are passed directly to the +write call, avoiding any intermediate caller-owned buffer. + +== Relationship to `ReadSource` + +`BufferSource` and `ReadSource` are independent concepts serving +different ownership models. A concrete type may satisfy one, the other, +or both. + +The type-erased wrapper `any_buffer_source` satisfies both concepts. +When the wrapped type satisfies only `BufferSource`, the `ReadSource` +operations (`read_some`, `read`) are synthesized from `pull` and +`consume` with a `buffer_copy` step: the wrapper pulls data from the +underlying source, copies it into the caller's mutable buffers, and +consumes the copied bytes. + +When the wrapped type satisfies both `BufferSource` and `ReadSource`, +the native `read_some` and `read` implementations are forwarded +directly across the type-erased boundary, avoiding the extra copy. +This dispatch is determined at compile time when the vtable is +constructed; at runtime the wrapper checks a single nullable function +pointer to select the forwarding path. + +This dual-concept bridge lets algorithms constrained on `ReadSource` +work with any `BufferSource` through `any_buffer_source`, and lets +algorithms constrained on `BufferSource` work natively with the +callee-owns-buffers pattern. + +=== Transfer Algorithm Matrix + +[cols="1,1,1"] +|=== +| Source | Sink | Algorithm + +| `BufferSource` +| `WriteSink` +| `push_to` -- pulls from source, writes to sink, signals EOF + +| `BufferSource` +| `WriteStream` +| `push_to` -- pulls from source, writes partial to stream + +| `ReadSource` +| `BufferSink` +| `pull_from` -- prepares sink buffers, reads into them + +| `ReadStream` +| `BufferSink` +| `pull_from` -- prepares sink buffers, reads partial into them +|=== + +== Use Cases + +=== Zero-Copy Transfer to a Socket + +When the source's internal storage already contains the data to send, +`push_to` passes the source's buffers directly to the socket's +`write_some`, avoiding any intermediate copy. + +[source,cpp] +---- +template +task<> send_all(Source& source, Stream& socket) +{ + auto [ec, total] = co_await push_to(source, socket); + if(ec) + co_return; + // total bytes sent directly from source's internal buffers +} +---- + +=== Memory-Mapped File Source + +A memory-mapped file is a natural `BufferSource`. The `pull` operation +returns buffer descriptors pointing directly into the mapped region. No +data is copied until the consumer explicitly copies it. + +[source,cpp] +---- +template +task<> serve_static_file(Source& mmap_source, Sink& response) +{ + auto [ec, total] = co_await push_to(mmap_source, response); + if(ec) + co_return; + // File served via zero-copy from mapped pages +} +---- + +=== Partial Consumption with a Parser + +A protocol parser pulls data, parses as much as it can, and consumes +only the parsed portion. The next `pull` returns the unparsed remainder +plus any newly arrived data. + +[source,cpp] +---- +template +task parse_message(Source& source) +{ + const_buffer arr[16]; + message msg; + + for(;;) + { + auto [ec, bufs] = co_await source.pull(arr); + if(ec) + co_return msg; + + auto [parsed, complete] = msg.parse(bufs); + source.consume(parsed); + + if(complete) + co_return msg; + } +} +---- + +The parser consumes only the bytes it understood. If a message spans +two `pull` calls, the unconsumed tail from the first call is returned +at the start of the second. + +=== HTTP Request Body Source + +An HTTP request body can be exposed through a `BufferSource` interface. +The concrete implementation handles transfer encoding (content-length, +chunked, compressed) behind the abstraction. + +[source,cpp] +---- +task<> handle_request( + any_buffer_source& body, + WriteSink auto& response) +{ + auto [ec, total] = co_await push_to(body, response); + if(ec) + co_return; + // Request body forwarded to response sink +} +---- + +The caller does not know whether the body uses content-length, chunked +encoding, or compression. The `BufferSource` interface handles the +difference. + +=== Bridging to ReadSource via `any_buffer_source` + +When a function is constrained on `ReadSource` but the concrete type +satisfies only `BufferSource`, `any_buffer_source` bridges the gap. + +[source,cpp] +---- +template +task read_all(Source& source); + +// Concrete type satisfies BufferSource only +my_ring_buffer ring; +any_buffer_source abs(ring); + +// Works: any_buffer_source satisfies ReadSource +auto data = co_await read_all(abs); +---- + +The `read_some` and `read` methods pull data internally, copy it into +the caller's mutable buffers, and consume the copied bytes. This incurs +one buffer copy compared to using `pull` and `consume` directly. + +== Alternatives Considered + +=== Single `pull` That Auto-Consumes + +An earlier design had `pull` automatically consume all returned data, +eliminating the separate `consume` call. This was rejected because: + +- Partial consumption becomes impossible. A parser that finds an + incomplete message at the end of a pull would need to buffer the + remainder itself, negating the zero-copy benefit. +- Peek semantics (inspecting data without consuming it) require the + source to maintain a separate undo mechanism. +- The `WriteStream::write_some` pattern naturally consumes only `n` + bytes, so the remaining pulled data must survive for the next + `write_some` call. Without `consume`, the source would need to track + how much of its own returned data was actually used. + +=== `pull` Returning an Owned Container + +A design where `pull` returned a `std::vector` or similar +owned container was considered. This was rejected because: + +- Heap allocation on every pull is unacceptable for high-throughput + I/O paths. +- The span-based interface lets the caller control storage: a + stack-allocated array for the common case, or a heap-allocated array + for unusual situations. +- Returning a subspan of the caller's span is zero-overhead and + composes naturally with existing buffer algorithm interfaces. + +=== Synchronous `pull` + +Making `pull` synchronous (like `BufferSink::prepare`) was considered. +This was rejected because: + +- A source may need to perform I/O to produce data. A file source reads + from disk. A decompression source feeds compressed input to the + decompressor. A network source waits for data to arrive. +- Forcing synchronous `pull` would require the source to pre-buffer all + data before the caller starts consuming, breaking the streaming model + and inflating memory usage. +- The asymmetry with `prepare` is intentional: `prepare` returns + pointers to empty memory (no I/O needed), while `pull` returns + pointers to data that may need to be produced first. + +=== `BufferSource` Refining `ReadSource` + +A design where `BufferSource` refined `ReadSource` (requiring all types +to implement `read_some` and `read`) was considered. This was rejected +because: + +- Many natural `BufferSource` types (memory-mapped files, ring buffers, + DMA receive descriptors) have no meaningful `read_some` primitive. + Their data path is pull-then-consume, not read-into-caller-buffer. +- Requiring `read_some` and `read` on every `BufferSource` would force + implementations to synthesize these operations even when they are + never called. +- The `any_buffer_source` wrapper provides the bridge when needed, + without burdening every concrete type. + +=== Combined Pull-and-Consume + +A design with a single `read(dest) -> (error_code, span)` that both +pulled and advanced the position was considered. This is equivalent to +the auto-consume alternative above and was rejected for the same +reasons: it prevents partial consumption and peek semantics. + +== Summary + +[cols="1,2,2"] +|=== +| Function | Contract | Use Case + +| `pull(dest)` +| Async. Fills span with readable buffer descriptors from the source's + internal storage. Returns EOF when exhausted. +| Every read iteration: obtain data to process or forward. + +| `consume(n)` +| Sync. Advances the read position by `n` bytes. Invalidates prior + buffers. +| After processing or forwarding data: indicate how much was used. +|=== + +`BufferSource` is the callee-owns-buffers counterpart to `ReadSource`. +The `push_to` algorithm transfers data from a `BufferSource` to a +`WriteSink` or `WriteStream`, and `any_buffer_source` bridges the two +patterns by satisfying both `BufferSource` and `ReadSource` behind a +single type-erased interface. diff --git a/doc/modules/ROOT/pages/design/ReadSource.adoc b/doc/modules/ROOT/pages/design/ReadSource.adoc new file mode 100644 index 00000000..3b5f1df1 --- /dev/null +++ b/doc/modules/ROOT/pages/design/ReadSource.adoc @@ -0,0 +1,463 @@ += ReadSource Concept Design + +== Overview + +This document describes the design of the `ReadSource` concept: a +refinement of `ReadStream` that adds a complete-read primitive. It +explains how `ReadSource` relates to `ReadStream`, why the refinement +hierarchy mirrors the write side, and the use cases each serves. + +== Definition + +[source,cpp] +---- +template +concept ReadSource = + ReadStream && + requires(T& source, mutable_buffer_archetype buffers) + { + { source.read(buffers) } -> IoAwaitable; + requires awaitable_decomposes_to< + decltype(source.read(buffers)), + std::error_code, std::size_t>; + }; +---- + +`ReadSource` refines `ReadStream`. Every `ReadSource` is a +`ReadStream`. A `ReadSource` provides two operations: + +=== `read_some(buffers)` -- Partial Read (inherited from `ReadStream`) + +Reads one or more bytes from the source into the buffer sequence. +Returns `(error_code, std::size_t)` where `n` is the number of bytes +read. May return fewer bytes than the buffer can hold. + +==== Semantics + +- On success: `!ec`, `n >= 1` and `n \<= buffer_size(buffers)`. +- On EOF: `ec == cond::eof`, `n == 0`. +- On error: `ec`, `n == 0`. +- If `buffer_empty(buffers)`: completes immediately, `!ec`, `n == 0`. + +Once `read_some` returns an error (including EOF), the caller must +not call `read_some` again. The stream is done. Not all +implementations can reproduce a prior error on subsequent calls, so +the behavior after an error is undefined. + +=== `read(buffers)` -- Complete Read + +Reads data into the buffer sequence. Either fills the entire buffer +or returns an error. Returns `(error_code, std::size_t)` where `n` +is the number of bytes read. + +==== Semantics + +- On success: `!ec`, `n == buffer_size(buffers)`. The buffer is + completely filled. +- On EOF: `ec == cond::eof`, `n` is the number of bytes read before + EOF was reached (may be less than `buffer_size(buffers)`). +- On error: `ec`, `n` is the number of bytes read before the error. +- If `buffer_empty(buffers)`: completes immediately, `!ec`, `n == 0`. + +Successful partial reads are not permitted. Either the entire buffer +is filled, or the operation returns with an error. This is the +defining property of a complete-read primitive. + +Once `read` returns an error (including EOF), the caller must not +call `read` or `read_some` again. The source is done. Not all +implementations can reproduce a prior error on subsequent calls, so +the behavior after an error is undefined. + +When the buffer sequence contains multiple buffers, each buffer is +filled completely before proceeding to the next. + +==== Buffer Lifetime + +The caller must ensure that the memory referenced by `buffers` remains +valid until the `co_await` expression returns. + +==== Conforming Signatures + +[source,cpp] +---- +template +IoAwaitable auto read_some(Buffers buffers); + +template +IoAwaitable auto read(Buffers buffers); +---- + +== Concept Hierarchy + +---- +ReadStream { read_some } + | + v +ReadSource { read_some, read } +---- + +This mirrors the write side: + +---- +WriteStream { write_some } + | + v +WriteSink { write_some, write, write_eof(buffers), write_eof() } +---- + +Algorithms constrained on `ReadStream` accept both raw streams and +sources. Algorithms that need the complete-read guarantee constrain on +`ReadSource`. + +== Why ReadSource Refines ReadStream + +Every concrete `ReadSource` type has a natural `read_some`: + +- **HTTP content-length body**: `read_some` returns + `min(available_from_network, remaining_content_length)` bytes. It + is the underlying stream's `read_some` capped by the body's limit. +- **HTTP chunked body**: `read_some` delivers whatever unchunked + data is available from the current chunk. +- **Decompression source** (inflate, zstd): `read_some` does one + decompression pass -- feeds available compressed input to the + decompressor and returns whatever output is produced. This is + how `zlib::inflate()` naturally works. +- **File source**: `read_some` is a single `read()` syscall. It is + the OS primitive. +- **Memory source**: `read_some` returns `min(requested, remaining)`. + +No concrete source type lacks a meaningful `read_some`. The claim that +"many sources can't meaningfully offer `read_some`" does not hold up +under scrutiny. + +=== The Relay Argument + +If `ReadSource` were disjoint from `ReadStream`, generic relay code +would need two separate implementations: + +[source,cpp] +---- +// One for ReadStream sources +template +task<> relay(Src& src, Dest& dest); + +// A different one for ReadSource sources +template +task<> relay(Src& src, Dest& dest); +---- + +With the refinement relationship, one function handles both: + +[source,cpp] +---- +// Works for TCP sockets, HTTP bodies, decompressors, files +template +task<> relay(Src& src, Dest& dest); +---- + +This is the same argument that justified `WriteSink` refining +`WriteStream`. + +=== The Latency Argument + +With only `read` (complete read), a relay must wait for the entire +buffer to fill before forwarding any data: + +[source,cpp] +---- +// Must fill 64KB before sending -- high latency +auto [ec, n] = co_await src.read(mutable_buffer(buf, 65536)); +co_await dest.write_some(const_buffer(buf, n)); +---- + +With `read_some`, data is forwarded as it becomes available: + +[source,cpp] +---- +// Returns with 1KB if that's what's available -- low latency +auto [ec, n] = co_await src.read_some(mutable_buffer(buf, 65536)); +co_await dest.write_some(const_buffer(buf, n)); +---- + +For a decompressor backed by a slow network connection, `read_some` +lets you decompress and forward whatever is available instead of +blocking until the entire buffer is filled. + +== Member Function Comparison + +[cols="1,1"] +|=== +| `read_some` | `read` + +| Returns whatever is available (at least 1 byte) +| Fills the entire buffer or errors + +| Low latency: forward data immediately +| Higher latency: waits for full buffer + +| Caller loops for complete reads +| Source guarantees completeness + +| Natural for relays and streaming +| Natural for fixed-size records and structured data +|=== + +== Composed Algorithms + +=== `read(source, dynamic_buffer)` -- Read Until EOF + +[source,cpp] +---- +auto read(ReadSource auto& source, + DynamicBufferParam auto&& buffers, + std::size_t initial_amount = 2048) + -> io_task; +---- + +Reads from the source into a dynamic buffer until EOF. The buffer +grows with a 1.5x factor when filled. On success (EOF), `ec` is clear +and `n` is total bytes read. + +This is the `ReadSource` equivalent of the `ReadStream` overload. Both +use the same `read` free function name, distinguished by concept +constraints. + +== Use Cases + +=== Reading an HTTP Body + +An HTTP body with a known content length is a `ReadSource`. The caller +reads into a buffer, and the source ensures exactly the right number +of bytes are delivered. + +[source,cpp] +---- +template +task read_body(Source& body, std::size_t content_length) +{ + std::string result(content_length, '\0'); + auto [ec, n] = co_await body.read( + mutable_buffer(result.data(), result.size())); + if(ec) + { + result.resize(n); + co_return result; + } + co_return result; +} +---- + +=== Reading into a Dynamic Buffer + +When the body size is unknown (e.g., chunked encoding), read until +EOF using the dynamic buffer overload. + +[source,cpp] +---- +template +task read_chunked_body(Source& body) +{ + std::string result; + auto [ec, n] = co_await read( + body, string_dynamic_buffer(&result)); + if(ec) + co_return {}; + co_return result; +} +---- + +=== Reading Fixed-Size Records from a Source + +When a source produces structured records of known size, `read` +guarantees each record is completely filled. + +[source,cpp] +---- +struct record +{ + uint32_t id; + char data[256]; +}; + +template +task<> process_records(Source& source) +{ + for(;;) + { + record rec; + auto [ec, n] = co_await source.read( + mutable_buffer(&rec, sizeof(rec))); + if(ec == cond::eof) + co_return; + if(ec) + co_return; + + handle_record(rec); + } +} +---- + +=== Decompression with Low-Latency Relay + +A decompression source wraps a `ReadStream` and produces decompressed +data. Using `read_some` (inherited from `ReadStream`), a relay can +forward decompressed data as it becomes available instead of waiting +for a full buffer. + +[source,cpp] +---- +template +task<> relay_decompressed(Source& inflater, Sink& dest) +{ + char buf[8192]; + for(;;) + { + // read_some: decompress whatever is available + auto [ec, n] = co_await inflater.read_some( + mutable_buffer(buf)); + if(ec == cond::eof) + { + auto [wec] = co_await dest.write_eof(); + co_return; + } + if(ec) + co_return; + + auto [wec, nw] = co_await dest.write( + const_buffer(buf, n)); + if(wec) + co_return; + } +} +---- + +=== Relaying from ReadSource to WriteSink + +When connecting a source to a sink, `read_some` provides low-latency +forwarding. The final chunk uses `write_eof` for atomic delivery plus +EOF signaling. + +[source,cpp] +---- +template +task<> relay(Src& src, Sink& dest) +{ + char buf[8192]; + for(;;) + { + auto [ec, n] = co_await src.read_some( + mutable_buffer(buf)); + if(ec == cond::eof) + { + auto [wec] = co_await dest.write_eof(); + co_return; + } + if(ec) + co_return; + + auto [wec, nw] = co_await dest.write( + const_buffer(buf, n)); + if(wec) + co_return; + } +} +---- + +Because `ReadSource` refines `ReadStream`, this relay accepts +`ReadSource` types (HTTP bodies, decompressors, files) as well as +raw `ReadStream` types (TCP sockets, TLS streams). + +=== Type-Erased Source + +The `any_read_source` wrapper type-erases a `ReadSource` behind a +virtual interface. This is useful when the concrete source type is +not known at compile time. + +[source,cpp] +---- +task<> handle_request(any_read_source& body) +{ + // Works for content-length, chunked, + // compressed, or any other source type + std::string data; + auto [ec, n] = co_await read( + body, string_dynamic_buffer(&data)); + if(ec) + co_return; + + process_request(data); +} +---- + +== Conforming Types + +Examples of types that satisfy `ReadSource`: + +- **HTTP content-length body**: `read_some` returns available bytes + capped by remaining length. `read` fills the buffer, enforcing the + content length limit. +- **HTTP chunked body**: `read_some` delivers available unchunked + data. `read` decodes chunk framing and fills the buffer. +- **Decompression source** (inflate, zstd): `read_some` does one + decompression pass. `read` loops decompression until the buffer + is filled. +- **File source**: `read_some` is a single `read()` syscall. `read` + loops until the buffer is filled or EOF. +- **Memory source**: `read_some` returns available bytes. `read` + fills the buffer from the memory region. + +== Why `read_some` Returns No Data on EOF + +The `read_some` contract (inherited from `ReadStream`) requires that +when `ec == cond::eof`, `n` is always 0. Data and EOF are delivered +in separate calls. See xref:ReadStream.adoc#_design_foundations_why_errors_exclude_data[ReadStream: Why Errors Exclude Data] +for the full rationale. The key points: + +- The clean trichotomy (success/EOF/error, where data implies success) + eliminates an entire class of bugs where callers accidentally drop + the final bytes of a stream. +- Write-side atomicity (`write_eof(buffers)`) serves correctness for + protocol framing. Read-side piggybacking would be a minor + optimization with significant API cost. +- Every concrete source type naturally separates its last data + delivery from its EOF indication. +- POSIX `read()` follows the same model. + +This contract carries over to `ReadSource` unchanged. The `read` +member function (complete read) _does_ allow `n > 0` on EOF, because +it is a composed loop that accumulates data across multiple internal +`read_some` calls. When the underlying stream signals EOF +mid-accumulation, discarding the bytes already gathered would be +wrong. The caller needs `n` to know how much valid data landed in the +buffer. + +== Summary + +`ReadSource` refines `ReadStream` by adding `read` for complete-read +semantics. The refinement relationship enables: + +- Generic algorithms constrained on `ReadStream` work on both raw + streams and sources. +- `read_some` provides low-latency forwarding in relays. +- `read` provides the complete-fill guarantee for structured data. + +[cols="1,2,2"] +|=== +| Function | Contract | Use Case + +| `ReadSource::read_some` +| Returns one or more bytes. May fill less than the buffer. +| Relays, low-latency forwarding, incremental processing. + +| `ReadSource::read` +| Fills the entire buffer or returns an error with partial count. +| HTTP bodies, decompression, file I/O, structured records. + +| `read` composed (on `ReadStream`) +| Loops `read_some` until the buffer is filled. +| Fixed-size headers, known-length messages over raw streams. + +| `read` composed (on `ReadSource`) +| Loops `read` into a dynamic buffer until EOF. +| Slurping an entire body of unknown size. +|=== diff --git a/doc/modules/ROOT/pages/design/ReadStream.adoc b/doc/modules/ROOT/pages/design/ReadStream.adoc new file mode 100644 index 00000000..38b8e9f6 --- /dev/null +++ b/doc/modules/ROOT/pages/design/ReadStream.adoc @@ -0,0 +1,553 @@ += ReadStream Concept Design + +== Overview + +This document describes the design of the `ReadStream` concept: the +fundamental partial-read primitive in the concept hierarchy. It explains +why `read_some` is the correct building block, how composed algorithms +build on top of it, and the relationship to `ReadSource`. + +== Definition + +[source,cpp] +---- +template +concept ReadStream = + requires(T& stream, mutable_buffer_archetype buffers) + { + { stream.read_some(buffers) } -> IoAwaitable; + requires awaitable_decomposes_to< + decltype(stream.read_some(buffers)), + std::error_code, std::size_t>; + }; +---- + +A `ReadStream` provides a single operation: + +=== `read_some(buffers)` -- Partial Read + +Reads one or more bytes from the stream into the buffer sequence. +Returns `(error_code, std::size_t)` where `n` is the number of bytes +read. + +==== Semantics + +- On success: `!ec`, `n >= 1` and `n \<= buffer_size(buffers)`. +- On EOF: `ec == cond::eof`, `n == 0`. +- On error: `ec`, `n == 0`. +- If `buffer_empty(buffers)`: completes immediately, `!ec`, `n == 0`. + +The caller must not assume the buffer is filled. `read_some` may +return fewer bytes than the buffer can hold. This is the defining +property of a partial-read primitive. + +Once `read_some` returns an error (including EOF), the caller must +not call `read_some` again. The stream is done. Not all +implementations can reproduce a prior error on subsequent calls, so +the behavior after an error is undefined. + +Buffers in the sequence are filled completely before proceeding to the +next buffer in the sequence. + +==== Buffer Lifetime + +The caller must ensure that the memory referenced by `buffers` remains +valid until the `co_await` expression returns. + +==== Conforming Signatures + +[source,cpp] +---- +template +IoAwaitable auto read_some(Buffers buffers); +---- + +Buffer sequences should be accepted by value when the member function +is a coroutine, to ensure the sequence lives in the coroutine frame +across suspension points. + +== Concept Hierarchy + +`ReadStream` is the base of the read-side hierarchy: + +---- +ReadStream { read_some } + | + v +ReadSource { read_some, read } +---- + +`ReadSource` refines `ReadStream`. Every `ReadSource` is a +`ReadStream`. Algorithms constrained on `ReadStream` accept both raw +streams and sources. The `ReadSource` concept adds a complete-read +primitive on top of the partial-read primitive. + +This mirrors the write side: + +---- +WriteStream { write_some } + | + v +WriteSink { write_some, write, write_eof(buffers), write_eof() } +---- + +== Composed Algorithms + +Three composed algorithms build on `read_some`: + +=== `read(stream, buffers)` -- Fill a Buffer Sequence + +[source,cpp] +---- +auto read(ReadStream auto& stream, + MutableBufferSequence auto const& buffers) + -> io_task; +---- + +Loops `read_some` until the entire buffer sequence is filled or an +error (including EOF) occurs. On success, `n == buffer_size(buffers)`. + +[source,cpp] +---- +template +task<> read_header(Stream& stream) +{ + char header[16]; + auto [ec, n] = co_await read( + stream, mutable_buffer(header)); + if(ec == cond::eof) + co_return; // clean shutdown + if(ec) + co_return; + // header contains exactly 16 bytes +} +---- + +=== `read(stream, dynamic_buffer)` -- Read Until EOF + +[source,cpp] +---- +auto read(ReadStream auto& stream, + DynamicBufferParam auto&& buffers, + std::size_t initial_amount = 2048) + -> io_task; +---- + +Reads from the stream into a dynamic buffer until EOF is reached. The +buffer grows with a 1.5x factor when filled. On success (EOF), `ec` +is clear and `n` is the total bytes read. + +[source,cpp] +---- +template +task slurp(Stream& stream) +{ + std::string body; + auto [ec, n] = co_await read( + stream, string_dynamic_buffer(&body)); + if(ec) + co_return {}; + co_return body; +} +---- + +=== `read_until(stream, dynamic_buffer, match)` -- Delimited Read + +Reads from the stream into a dynamic buffer until a delimiter or match +condition is found. Used for line-oriented protocols and message +framing. + +[source,cpp] +---- +template +task<> read_line(Stream& stream) +{ + std::string line; + auto [ec, n] = co_await read_until( + stream, string_dynamic_buffer(&line), "\r\n"); + if(ec) + co_return; + // line contains data up to and including "\r\n" +} +---- + +== Use Cases + +=== Incremental Processing with `read_some` + +When processing data as it arrives without waiting for a full buffer, +`read_some` is the right choice. This is common for real-time data or +when the processing can handle partial input. + +[source,cpp] +---- +template +task<> echo(Stream& stream, WriteStream auto& dest) +{ + char buf[4096]; + for(;;) + { + auto [ec, n] = co_await stream.read_some( + mutable_buffer(buf)); + if(ec == cond::eof) + co_return; + if(ec) + co_return; + + // Forward whatever we received immediately + auto [wec, nw] = co_await dest.write_some( + const_buffer(buf, n)); + if(wec) + co_return; + } +} +---- + +=== Relaying from ReadStream to WriteStream + +When relaying data from a reader to a writer, `read_some` feeds +`write_some` directly. This is the fundamental streaming pattern. + +[source,cpp] +---- +template +task<> relay(Src& src, Dest& dest) +{ + char storage[65536]; + circular_dynamic_buffer cb(storage, sizeof(storage)); + + for(;;) + { + // Read into free space + auto mb = cb.prepare(cb.capacity()); + auto [rec, nr] = co_await src.read_some(mb); + cb.commit(nr); + + if(rec && rec != cond::eof) + co_return; + + // Drain to destination + while(cb.size() > 0) + { + auto [wec, nw] = co_await dest.write_some( + cb.data()); + if(wec) + co_return; + cb.consume(nw); + } + + if(rec == cond::eof) + co_return; + } +} +---- + +Because `ReadSource` refines `ReadStream`, this relay function also +accepts `ReadSource` types. An HTTP body source or a decompressor +can be relayed to a `WriteStream` using the same function. + +== Relationship to the Write Side + +[cols="1,1"] +|=== +| Read Side | Write Side + +| `ReadStream::read_some` +| `WriteStream::write_some` + +| `read` free function (composed) +| `write_now` (composed, eager) + +| `read_until` (composed, delimited) +| No write-side equivalent + +| `ReadSource::read` +| `WriteSink::write` +|=== + +== Design Foundations: Why Errors Exclude Data + +The `read_some` contract requires that `n` is 0 whenever `ec` is set. +Data and errors are mutually exclusive outcomes. This is the most +consequential design decision in the `ReadStream` concept, with +implications for every consumer of `read_some` in the library. The +rule follows Asio's established `AsyncReadStream` contract, is +reinforced by the behavior of POSIX and Windows I/O system calls, +and produces cleaner consumer code. This section explains the design +and its consequences. + +=== Reconstructing Kohlhoff's Reasoning + +Christopher Kohlhoff's Asio library defines an `AsyncReadStream` +concept with the identical requirement: on error, `bytes_transferred` +is 0. No design rationale document accompanies this rule. The +reasoning presented here was reconstructed from three sources: + +- *The Asio source code.* The function `non_blocking_recv1` in + `socket_ops.ipp` explicitly sets `bytes_transferred = 0` on every + error path. The function `complete_iocp_recv` maps Windows IOCP + errors to portable error codes, relying on the operating system's + guarantee that failed completions report zero bytes. These are + deliberate choices, not accidental pass-through of OS behavior. +- *A documentation note Kohlhoff left.* Titled "Why EOF is an error," + it gives two reasons: composed operations need EOF-as-error to + report contract violations, and EOF-as-error disambiguates the + end of a stream from a successful zero-byte read. The note is + terse but the implications are deep. +- *Analysis of the underlying system calls.* POSIX `recv()` and + Windows `WSARecv()` both enforce a binary outcome per call: data + or error, never both. This is not because the C++ abstraction + copied the OS, but because both levels face the same fundamental + constraint. + +The following sections examine each of these points and their +consequences. + +=== Alignment with Asio + +Asio's `AsyncReadStream` concept has enforced the same rule for over +two decades: on error, `bytes_transferred` is 0. This is a deliberate +design choice, not an accident. The Asio source code explicitly zeroes +`bytes_transferred` on every error path, and the underlying system +calls (POSIX `recv()`, Windows IOCP) enforce binary outcomes at the +OS level. The `read_some` contract follows this established practice. + +=== The Empty-Buffer Rule + +Every `ReadStream` must support the following: + +[quote] +`read_some(empty_buffer)` completes immediately with `{success, 0}`. + +This is a no-op. The caller passed no buffer space, so no I/O is +attempted. The operation does not inspect the stream's internal state +because that would require a probe capability -- a way to ask "is +there data? is the stream at EOF?" -- without actually reading. Not +every source supports probing. A TCP socket does not know that its +peer has closed until it calls `recv()` and gets 0 back. A pipe does +not know it is broken until a read fails. The empty-buffer rule is +therefore unconditional: return `{success, 0}` regardless of the +stream's state. + +This rule is a natural consequence of the contract, not a proof of +it. When no I/O is attempted, no state is discovered and no error +is reported. + +=== Why EOF Is an Error + +Kohlhoff's documentation note gives two reasons for making EOF an +error code rather than a success: + +*Composed operations need EOF-as-error to report contract violations.* +The composed `read(stream, buffer(buf, 100))` promises to fill +exactly 100 bytes. If the stream ends after 50, the operation did not +fulfill its contract. Reporting `{success, 50}` would be misleading -- +it suggests the operation completed normally. Reporting `{eof, 50}` +tells the caller both what happened (50 bytes landed in the buffer) +and why the operation stopped (the stream ended). EOF-as-error is the +mechanism by which composed operations explain early termination. + +*EOF-as-error disambiguates the empty-buffer no-op from the end of a +stream.* Without EOF-as-error, both `read_some(empty_buffer)` on a +live stream and `read_some(non_empty_buffer)` on an exhausted stream +would produce `{success, 0}`. The caller could not distinguish "I +passed no buffer" from "the stream is done." Making EOF an error code +separates these two cases cleanly. + +These two reasons reinforce each other. Composed operations need EOF +to be an error code so they can report early termination. The +empty-buffer rule needs EOF to be an error code so `{success, 0}` +is unambiguously a no-op. Together with the rule that errors exclude +data, `read_some` results form a clean trichotomy: success with +data, or an error (including EOF) without data. + +=== The Write-Side Asymmetry + +On the write side, `WriteSink` provides `write_eof(buffers)` to +atomically combine the final data with the EOF signal. A natural +question follows: if the write side fuses data with EOF, why does the +read side forbid it? + +The answer is that the two sides of the I/O boundary have different +roles. The writer _decides_ when to signal EOF. The reader +_discovers_ it. This asymmetry has three consequences: + +*`write_eof` exists for correctness, not convenience.* Protocol +framings require the final data and the EOF marker to be emitted +together so the peer observes a complete message. HTTP chunked +encoding needs the terminal `0\r\n\r\n` coalesced with the final +data chunk. A TLS session needs the close-notify alert coalesced +with the final application data. A compressor needs `Z_FINISH` +applied to the final input. These are correctness requirements, not +optimizations. On the read side, whether the last bytes arrive with +EOF or on a separate call does not change what the reader observes. +The data and the order are identical either way. + +*`write_eof` is a separate function the caller explicitly invokes.* +`write_some` never signals EOF. The writer opts into data-plus-EOF +by calling a different function. The call site reads `write_eof(data)` +and the intent is unambiguous. If `read_some` could return data with +EOF, every call to `read_some` would _sometimes_ be a data-only +operation and _sometimes_ a data-plus-EOF operation. The stream +decides which mode the caller gets, at runtime. Every call site must +handle both possibilities. The burden falls on every consumer in the +codebase, not on a single call site that opted into the combined +behavior. + +*A hypothetical `read_eof` makes no sense.* On the write side, +`write_eof` exists because the producer signals the end of data. On +the read side, the consumer does not tell the stream to end -- it +discovers that the stream has ended. EOF flows from producer to +consumer, not the reverse. There is no action the reader can take to +"read the EOF." The reader discovers EOF as a side effect of +attempting to read. + +=== A Clean Trichotomy + +With the current contract, every `read_some` result falls into +exactly one of three mutually exclusive cases: + +- **Success**: `!ec`, `n >= 1` -- data arrived, process it. +- **EOF**: `ec == cond::eof`, `n == 0` -- stream ended, no data. +- **Error**: `ec`, `n == 0` -- failure, no data. + +Data is present if and only if the operation succeeded. This +invariant -- _data implies success_ -- eliminates an entire category +of reasoning from every read loop. The common pattern is: + +[source,cpp] +---- +auto [ec, n] = co_await stream.read_some(buf); +if(ec) + break; // EOF or error -- no data to handle +process(buf, n); // only reached on success, n >= 1 +---- + +If `read_some` could return `n > 0` with EOF, the loop becomes: + +[source,cpp] +---- +auto [ec, n] = co_await stream.read_some(buf); +if(n > 0) + process(buf, n); // must handle data even on EOF +if(ec) + break; +---- + +Every consumer pays this tax: an extra branch to handle data +accompanying EOF. The branch is easy to forget. Forgetting it +silently drops the final bytes of the stream -- a bug that only +manifests when the source delivers EOF with its last data rather than +on a separate call. A TCP socket receiving data in one packet and FIN +in another will not trigger the bug. A memory source that knows its +remaining length will. The non-determinism makes the bug difficult to +reproduce and diagnose. + +The clean trichotomy eliminates this class of bugs entirely. + +=== Conforming Sources + +Every concrete `ReadStream` implementation naturally separates its +last data delivery from its EOF signal: + +- **TCP sockets**: `read_some` maps to a single `recv()` or + `WSARecv()` call, returning whatever the kernel has buffered. The + kernel delivers bytes on one call and returns 0 on the next. The + separation is inherent in the POSIX and Windows APIs. +- **TLS streams**: `read_some` decrypts and returns one TLS record's + worth of application data. The close-notify alert arrives as a + separate record. +- **HTTP content-length body**: the source delivers bytes up to the + content-length limit. Once the limit is reached, the next + `read_some` returns EOF. +- **HTTP chunked body**: the unchunker delivers decoded data from + chunks. The terminal `0\r\n\r\n` is parsed on a separate pass that + returns EOF. +- **Compression (inflate)**: the decompressor delivers output bytes. + When `Z_STREAM_END` is detected, the next read returns EOF. +- **Memory source**: returns `min(requested, remaining)` bytes. When + `remaining` reaches 0, the next call returns EOF. +- **QUIC streams**: `read_some` returns data from received QUIC + frames. Stream FIN is delivered as EOF on a subsequent call. +- **Buffered read streams**: `read_some` returns data from an + internal buffer, refilling from the underlying stream when empty. + EOF propagates from the underlying stream. +- **Test mock streams**: `read_some` returns configurable data and + error sequences for testing. + +No source is forced into an unnatural pattern. The `read_some` call +that discovers EOF is the natural result of attempting to read from +an exhausted stream -- not a separate probing step. Once the caller +receives EOF, it stops reading. + +=== Composed Operations and Partial Results + +The composed `read` algorithm (and `ReadSource::read`) _does_ report +`n > 0` on EOF, because it accumulates data across multiple internal +`read_some` calls. When the underlying stream signals EOF +mid-accumulation, discarding the bytes already gathered would be +wrong. The caller needs `n` to know how much valid data landed in the +buffer. + +The design separates concerns cleanly: the single-shot primitive +(`read_some`) delivers unambiguous results with a clean trichotomy. +Composed operations that accumulate state (`read`) report what they +accumulated, including partial results on EOF. Callers who need +partial-on-EOF semantics get them through the composed layer, while +the primitive layer remains clean. + +=== Evidence from the Asio Implementation + +The Asio source code confirms this design at every level. + +On POSIX platforms, `non_blocking_recv1` in `socket_ops.ipp` calls +`recv()` and branches on the result. If `recv()` returns a positive +value, the bytes are reported as a successful transfer. If `recv()` +returns 0 on a stream socket, EOF is reported. If `recv()` returns +-1, the function explicitly sets `bytes_transferred = 0` before +returning the error. The POSIX `recv()` system call itself enforces +binary outcomes: it returns `N > 0` on success, `0` on EOF, or `-1` +on error. A single call never returns both data and an error. + +On Windows, `complete_iocp_recv` processes the results from +`GetQueuedCompletionStatus`. It maps `ERROR_NETNAME_DELETED` to +`connection_reset` and `ERROR_PORT_UNREACHABLE` to +`connection_refused`. Windows IOCP similarly reports zero +`bytes_transferred` on failed completions. The operating system +enforces the same binary outcome per I/O completion. + +The one edge case is POSIX signal interruption (`EINTR`). If a signal +arrives after `recv()` has already copied some bytes, the kernel +returns the partial byte count as success rather than `-1`/`EINTR`. +Asio handles this transparently by retrying on `EINTR`, so the +caller never observes it. Even the kernel does not combine data with +an error -- it chooses to report the partial data as success. + +=== Convergent Design with POSIX + +POSIX `recv()` independently enforces the same rule: `N > 0` on +success, `-1` on error, `0` on EOF. The kernel never returns "here +are your last 5 bytes, and also EOF." It delivers the available bytes +on one call and returns 0 on the next. This is not because the C++ +abstraction copied POSIX semantics. It is because the kernel faces +the same fundamental constraint: state is discovered through the act +of I/O. The alignment between `read_some` and `recv()` is convergent +design, not leaky abstraction. + +== Summary + +`ReadStream` provides `read_some` as the single partial-read +primitive. This is deliberately minimal: + +- Algorithms that need to fill a buffer completely use the `read` + composed algorithm. +- Algorithms that need delimited reads use `read_until`. +- Algorithms that need to process data as it arrives use `read_some` + directly. +- `ReadSource` refines `ReadStream` by adding `read` for + complete-read semantics. + +The contract that errors exclude data follows Asio's established +`AsyncReadStream` contract, aligns with POSIX and Windows system +call semantics, and produces a clean trichotomy that makes every +read loop safe by construction. diff --git a/doc/modules/ROOT/pages/design/RunApi.adoc b/doc/modules/ROOT/pages/design/RunApi.adoc new file mode 100644 index 00000000..d04fbc42 --- /dev/null +++ b/doc/modules/ROOT/pages/design/RunApi.adoc @@ -0,0 +1,452 @@ += Run API Design + +== Overview + +This document explains the naming conventions and call syntax of the +two launcher functions: `run_async` (fire-and-forget from non-coroutine +code) and `run` (awaitable within a coroutine). Both accept any type +satisfying _IoLaunchableTask_ -- not just `task` -- and use a +deliberate **two-phase invocation** pattern -- `f(context)(task)` -- that +exists for a mechanical reason rooted in coroutine frame allocation +timing. + +== Usage + +=== `run_async` -- Fire-and-Forget Launch + +`run_async` launches any _IoLaunchableTask_ from non-coroutine code: +`main()`, callback handlers, event loops. `task` is the most common +conforming type, but any user-defined type satisfying the concept works. +The function does not return a value to the caller. Handlers receive the +task's result or exception after completion. + +[source,cpp] +---- +// Executor only (uses default recycling allocator) +run_async(ex)(my_task()); + +// With a stop token for cooperative cancellation +std::stop_source source; +run_async(ex, source.get_token())(cancellable_task()); + +// With a custom memory resource +run_async(ex, my_pool)(my_task()); + +// With a result handler +run_async(ex, [](int result) { + std::cout << "Got: " << result << "\n"; +})(compute_value()); + +// With separate success and error handlers +run_async(ex, + [](int result) { std::cout << "Got: " << result << "\n"; }, + [](std::exception_ptr ep) { /* handle error */ } +)(compute_value()); + +// Full: executor, stop_token, allocator, success handler, error handler +run_async(ex, st, alloc, h1, h2)(my_task()); +---- + +=== `run` -- Awaitable Launch Within a Coroutine + +`run` is the coroutine-side counterpart. It binds any +_IoLaunchableTask_ to a (possibly different) executor and returns +the result to the caller via `co_await`. It also supports overloads +that customize stop token or allocator while inheriting the caller's +executor. + +[source,cpp] +---- +// Switch to a different executor for CPU-bound work +task parent() +{ + int result = co_await run(worker_ex)(compute_on_worker()); + // Completion returns to parent's executor +} + +// Customize stop token, inherit caller's executor +task with_timeout() +{ + std::stop_source source; + co_await run(source.get_token())(subtask()); +} + +// Customize allocator, inherit caller's executor +task with_custom_alloc() +{ + co_await run(my_alloc)(subtask()); +} + +// Switch executor AND customize allocator +task full_control() +{ + co_await run(worker_ex, my_alloc)(cpu_bound_task()); +} +---- + +=== `run_async` on a Strand + +A common pattern for launching per-connection coroutines on a +strand, ensuring serialized access to connection state: + +[source,cpp] +---- +void on_accept(tcp::socket sock) +{ + strand my_strand(ioc.get_executor()); + run_async(my_strand)(handle_connection(std::move(sock))); +} +---- + +== Alternatives Considered + +Several alternative naming and syntax proposals were evaluated and +discarded. The following table shows each rejected form alongside the +chosen form. + +=== Builder Pattern: `on` / `with` / `spawn` / `call` + +[cols="1,1"] +|=== +| Rejected | Chosen + +| `capy::on(ex).spawn(t)` +| `run_async(ex)(t)` + +| `co_await capy::on(ex).call(t)` +| `co_await run(ex)(t)` + +| `co_await capy::with(st).call(t)` +| `co_await run(st)(t)` + +| `co_await capy::with(alloc).call(t)` +| `co_await run(alloc)(t)` + +| `capy::on(ex).block(t)` +| `test::run_blocking(ex)(t)` +|=== + +What this looks like in practice: + +[source,cpp] +---- +// Rejected: builder pattern +capy::on(ex).spawn(my_task()); +co_await capy::on(worker_ex).call(compute()); +co_await capy::with(my_alloc).call(subtask()); + +// Chosen: two-phase invocation +run_async(ex)(my_task()); +co_await run(worker_ex)(compute()); +co_await run(my_alloc)(subtask()); +---- + +The builder pattern reads well as English, but it creates problems +in C++ practice. See <> below for the full analysis. + +=== Single-Call with Named Method + +[source,cpp] +---- +// Rejected: single-call +run_async(ex, my_task()); +---- + +This fails the allocator timing constraint entirely. The task +argument `my_task()` is evaluated _before_ `run_async` can set +the thread-local allocator. The coroutine frame is allocated with +the wrong (or no) allocator. This is not a style preference -- it +is a correctness bug. + +=== Named Method on Wrapper + +[source,cpp] +---- +// Rejected: named method instead of operator() +run_async(ex).spawn(my_task()); +co_await run(ex).call(compute()); +---- + +This preserves the two-phase timing guarantee and avoids the +namespace collision problems of `on`/`with`. The objection is minor: +`.spawn()` and `.call()` add vocabulary without adding clarity. The +wrapper already has exactly one purpose -- accepting a task. A named +method implies the wrapper has a richer interface than it does. +`operator()` is the conventional C++ way to express "this object +does exactly one thing." That said, this alternative has legs and +could be revisited if the `()()` syntax proves too confusing in +practice. + +== The Names + +=== Why `run` + +The `run` prefix was chosen for several reasons: + +- **Greppability.** Searching for `run_async(` or `run(` in a + codebase produces unambiguous results. Short, common English words + like `on` or `with` collide with local variable names, parameter + names, and other libraries. A `using namespace capy;` combined with + a local variable named `on` produces silent shadowing bugs. + +- **Verb clarity.** `run` tells you what happens: something executes. + `run_async` tells you it executes without waiting. `run` inside a + coroutine tells you control transfers and returns. Prepositions like + `on` and `with` say nothing about the action -- they are sentence + fragments waiting for a verb. + +- **Discoverability.** The `run_*` family groups together in + documentation, autocompletion, and alphabetical listings. Users + searching for "how do I launch a task" find `run_async` and `run` + as a coherent pair. + +- **Consistency.** The naming follows the established pattern from + `io_context::run()`, `std::jthread`, and other C++ APIs where + `run` means "begin executing work." + +- **No false promises.** A builder-pattern syntax like + `on(ex).spawn(t)` implies composability -- `on(ex).with(alloc).call(t)` -- + that the API does not deliver. The `f(x)(t)` pattern is honest about + being exactly two steps, no more. It does not invite users to chain + methods that do not exist. + +[[why-not-builder]] +=== Why Not a Builder Pattern + +An alternative proposal suggested replacing the two-call syntax with a +builder-style API: + +[source,cpp] +---- +// Rejected builder pattern +capy::on(ex).spawn(my_task()); +co_await capy::on(ex).call(compute()); +co_await capy::with(st).call(subtask()); +co_await capy::with(alloc).call(subtask()); +capy::on(ex).block(my_task()); +---- + +While the English readability of `on(ex).spawn(t)` is genuinely +appealing, the approach has practical problems in a Boost library: + +- **Namespace pollution.** `on` and `with` are among the most + common English words in programming. In a Boost library used + alongside dozens of other namespaces, these names invite collisions. + Consider what happens with `using namespace capy;`: ++ +[source,cpp] +---- +int on = 42; // local variable +on(ex).spawn(my_task()); // ambiguous: variable or function? + +void handle(auto with) { // parameter name + with(alloc).call(sub()); // won't compile +} +---- ++ +The names `run` and `run_async` do not have this problem. No one +names their variables `run_async`. + +- **Semantic ambiguity.** `with(st)` versus `with(alloc)` -- with + _what_, exactly? The current API uses `run(st)` and `run(alloc)` + where overload resolution disambiguates naturally because the verb + `run` provides context. A bare preposition provides none. ++ +[source,cpp] +---- +// What does "with" mean here? Stop token or allocator? +co_await capy::with(x).call(subtask()); + +// "run" provides a verb -- the argument type disambiguates +co_await run(x)(subtask()); +---- + +- **Builder illusion.** Dot-chaining suggests composability that does + not exist. Users will naturally try: ++ +[source,cpp] +---- +// These look reasonable but don't work +capy::on(ex).with(alloc).call(my_task()); +capy::on(ex).with(st).with(alloc).spawn(my_task(), h1, h2); +---- ++ +The current syntax makes the interface boundary explicit: the first +call captures _all_ context, the second call accepts the task. There +is no dot-chain to extend. + +- **Erases the test boundary.** `run_blocking` lives in + `capy::test` deliberately -- it is a test utility, not a + production API. The proposed `on(ex).block(t)` places it alongside + `.spawn()` and `.call()` as if it were a first-class production + method. That is a promotion this API has not earned. + +- **Hidden critical ordering.** The two-phase invocation exists for + a mechanical reason (allocator timing, described below). With + `on(ex).spawn(t)`, the critical sequencing guarantee is buried + behind what looks like a casual method call. The `()()` syntax is + pedagogically valuable -- it signals that something important + happens in two distinct steps. + +- **Overload count does not shrink.** `run_async` has 18 overloads + for good reason (executor x stop_token x allocator x handlers). + The builder pattern still needs all those combinations -- they + just move from free function overloads to constructor or method + overloads. The complexity does not vanish; it relocates. + +== The Two-Phase Invocation + +=== The Problem: Allocator Timing + +Coroutine frame allocation happens _before_ the coroutine body +executes. When the compiler encounters a coroutine call, it: + +1. Calls `operator new` to allocate the frame +2. Constructs the promise object +3. Begins execution of the coroutine body + +Any mechanism that injects the allocator _after_ the call -- receiver +queries, `await_transform`, explicit method calls -- arrives too late. +The frame is already allocated. + +This is the fundamental tension identified in D4003 §3.3: + +[quote] +____ +The allocator must be present at invocation. Coroutine frame +allocation has a fundamental timing constraint: `operator new` +executes before the coroutine body. When a coroutine is called, +the compiler allocates the frame first, then begins execution. +Any mechanism that injects context later -- receiver connection, +`await_transform`, explicit method calls -- arrives too late. +____ + +=== The Solution: C++17 Postfix Evaluation Order + +C++17 guarantees that in a postfix-expression call, the +postfix-expression is sequenced before the argument expressions: + +[quote] +____ +The postfix-expression is sequenced before each expression in +the expression-list and any default argument. -- [expr.call] +____ + +In the expression `run_async(ex)(my_task())`: + +1. `run_async(ex)` evaluates first. This returns a wrapper object + (`run_async_wrapper`) whose constructor sets `current_frame_allocator()` + -- a thread-local pointer to the memory resource. +2. `my_task()` evaluates second. The coroutine's `operator new` reads + the thread-local pointer and allocates the frame from it. +3. `operator()` on the wrapper takes ownership of the task and + dispatches it to the executor. + +[source,cpp] +---- +// Step 1: wrapper constructor sets TLS allocator +// v~~~~~~~~~~~~~~v + run_async(ex, alloc) (my_task()); +// ^~~~~~~~~^ +// Step 2: task frame allocated using TLS allocator +---- + +This sequencing is not an implementation detail -- it is the +_only correct way_ to inject an allocator into a coroutine's frame +allocation when the allocator is not known at compile time. + +=== How It Works in the Code + +The `run_async_wrapper` constructor sets the thread-local allocator: + +[source,cpp] +---- +run_async_wrapper(Ex ex, std::stop_token st, Handlers h, Alloc a) + : tr_(detail::make_trampoline( + std::move(ex), std::move(h), std::move(a))) + , st_(std::move(st)) +{ + // Set TLS before task argument is evaluated + current_frame_allocator() = tr_.h_.promise().get_resource(); +} +---- + +The task's `operator new` reads it: + +[source,cpp] +---- +static void* operator new(std::size_t size) +{ + auto* mr = current_frame_allocator(); + if(!mr) + mr = std::pmr::get_default_resource(); + return mr->allocate(size, alignof(std::max_align_t)); +} +---- + +The wrapper is `[[nodiscard]]` and its `operator()` is +rvalue-ref-qualified, preventing misuse: + +[source,cpp] +---- +// Correct: wrapper is a temporary, used immediately +run_async(ex)(my_task()); + +// Compile error: cannot call operator() on an lvalue +auto w = run_async(ex); +w(my_task()); // Error: requires rvalue +---- + +=== The `run` Variant + +The `run` function uses the same two-phase pattern inside coroutines. +An additional subtlety arises: the wrapper is a temporary that dies +before `co_await` suspends the caller. The wrapper's +`frame_memory_resource` would be destroyed before the child task +executes. + +The solution is to store a _copy_ of the allocator in the awaitable +returned by `operator()`. Since standard allocator copies are +equivalent -- memory allocated with one copy can be deallocated with +another -- this preserves correctness while keeping the allocator +alive for the task's duration. + +=== Comparison with `std::execution` + +In `std::execution` (P2300), context flows _backward_ from receiver +to sender via queries _after_ `connect()`: + +---- +task async_work(); // Frame allocated NOW +auto sndr = async_work(); +auto op = connect(sndr, receiver); // Allocator available NOW -- too late +start(op); +---- + +In the _IoAwaitable_ model, context flows _forward_ from launcher to +task: + +---- +1. Set TLS allocator --> 2. Call task() + 3. operator new (uses TLS) + 4. await_suspend +---- + +The allocator is ready before the frame is created. No query +machinery can retroactively fix an allocation that already happened. + +== Summary + +[cols="1,2"] +|=== +| `run_async(ctx)(task)` | Fire-and-forget launch from non-coroutine code +| `co_await run(ctx)(task)` | Awaitable launch within a coroutine +|=== + +The `run` name is greppable, unambiguous, and won't collide with +local variables in a namespace-heavy Boost codebase. The `f(ctx)(task)` +syntax exists because coroutine frame allocation requires the +allocator to be set _before_ the task expression is evaluated, and +C++17 postfix sequencing guarantees exactly that ordering. The syntax +is intentionally explicit about its two steps -- it tells the reader +that something important happens between them. diff --git a/doc/modules/ROOT/pages/design/TypeEraseAwaitable.adoc b/doc/modules/ROOT/pages/design/TypeEraseAwaitable.adoc new file mode 100644 index 00000000..326b46ba --- /dev/null +++ b/doc/modules/ROOT/pages/design/TypeEraseAwaitable.adoc @@ -0,0 +1,202 @@ += Type-Erasing Awaitables + +== Overview + +The `any_*` wrappers type-erase stream and source concepts so that +algorithms can operate on heterogeneous concrete types through a +uniform interface. Each wrapper preallocates storage for the +type-erased awaitable at construction time, achieving zero +steady-state allocation. + +Two vtable layouts are used depending on how many operations the +wrapper exposes. + +== Single-Operation: Flat Vtable + +When a wrapper exposes exactly one async operation (e.g. +`any_read_stream` with `read_some`, or `any_write_stream` with +`write_some`), all function pointers live in a single flat vtable: + +[source,cpp] +---- +// Flat vtable -- 64 bytes, one cache line +struct vtable +{ + void (*construct_awaitable)(...); // 8 + bool (*await_ready)(void*); // 8 + coro (*await_suspend)(void*, ...); // 8 + io_result (*await_resume)(void*); // 8 + void (*destroy_awaitable)(void*); // 8 + size_t awaitable_size; // 8 + size_t awaitable_align; // 8 + void (*destroy)(void*); // 8 +}; +---- + +The inner awaitable can be constructed in either `await_ready` or +`await_suspend`, depending on whether the outer awaitable has a +short-circuit path. + +=== Construct in await_ready (any_read_stream) + +When there is no outer short-circuit, constructing in `await_ready` +lets immediate completions skip `await_suspend` entirely: + +[source,cpp] +---- +bool await_ready() { + vt_->construct_awaitable(stream_, storage_, buffers); + awaitable_active_ = true; + return vt_->await_ready(storage_); // true → no suspend +} + +coro await_suspend(coro h, executor_ref ex, stop_token tok) { + return vt_->await_suspend(storage_, h, ex, tok); +} + +io_result await_resume() { + auto r = vt_->await_resume(storage_); + vt_->destroy_awaitable(storage_); + awaitable_active_ = false; + return r; +} +---- + +=== Construct in await_suspend (any_write_stream) + +When the outer awaitable has a short-circuit (empty buffers), +construction is deferred to `await_suspend` so the inner awaitable +is never created on the fast path: + +[source,cpp] +---- +bool await_ready() const noexcept { + return buffers_.empty(); // short-circuit, no construct +} + +coro await_suspend(coro h, executor_ref ex, stop_token tok) { + vt_->construct_awaitable(stream_, storage_, buffers); + awaitable_active_ = true; + if(vt_->await_ready(storage_)) + return h; // immediate → resume caller + return vt_->await_suspend(storage_, h, ex, tok); +} + +io_result await_resume() { + if(!awaitable_active_) + return {{}, 0}; // short-circuited + auto r = vt_->await_resume(storage_); + vt_->destroy_awaitable(storage_); + awaitable_active_ = false; + return r; +} +---- + +Both variants touch the same two cache lines on the hot path. + +== Multi-Operation: Split Vtable with awaitable_ops + +When a wrapper exposes multiple operations that produce different +awaitable types (e.g. `any_read_source` with `read_some` and +`read`, or `any_write_sink` with `write_some`, `write`, +`write_eof(buffers)`, and `write_eof()`), a split layout is +required. Each `construct` call returns a pointer to a +`static constexpr awaitable_ops` matching the awaitable it +created. + +[source,cpp] +---- +// Per-awaitable dispatch -- 32 bytes +struct awaitable_ops +{ + bool (*await_ready)(void*); + coro (*await_suspend)(void*, ...); + io_result (*await_resume)(void*); + void (*destroy)(void*); +}; + +// Vtable -- 32 bytes +struct vtable +{ + awaitable_ops const* (*construct_awaitable)(...); + size_t awaitable_size; + size_t awaitable_align; + void (*destroy)(void*); +}; +---- + +The inner awaitable is constructed in `await_suspend`. Outer +`await_ready` handles short-circuits (e.g. empty buffers) before +the inner type is ever created: + +[source,cpp] +---- +bool await_ready() const noexcept { + return buffers_.empty(); // short-circuit +} + +coro await_suspend(coro h, executor_ref ex, stop_token tok) { + active_ops_ = vt_->construct_awaitable(stream_, storage_, buffers_); + if(active_ops_->await_ready(storage_)) + return h; // immediate → resume caller + return active_ops_->await_suspend(storage_, h, ex, tok); +} + +io_result await_resume() { + if(!active_ops_) + return {{}, 0}; // short-circuited + auto r = active_ops_->await_resume(storage_); + active_ops_->destroy(storage_); + active_ops_ = nullptr; + return r; +} +---- + +== Cache Line Analysis + +Immediate completion path -- inner `await_ready` returns true: + +---- +Flat (any_read_stream, any_write_stream): 2 cache lines + LINE 1 object stream_, vt_, cached_awaitable_, ... + LINE 2 vtable construct → await_ready → await_resume → destroy + (contiguous, sequential access, prefetch-friendly) + +Split (any_read_source, any_write_sink): 3 cache lines + LINE 1 object source_, vt_, cached_awaitable_, active_ops_, ... + LINE 2 vtable construct_awaitable + LINE 3 awaitable_ops await_ready → await_suspend → await_resume → destroy + (separate .rodata address, defeats spatial prefetch) +---- + +The flat layout keeps all per-awaitable function pointers adjacent +to `construct_awaitable` in a single 64-byte structure. The split +layout places `vtable` and `awaitable_ops` at unrelated addresses +in `.rodata`, adding one cache miss on the hot path. + +== When to Use Which + +[cols="1,1"] +|=== +| Flat vtable | Split vtable + +| Wrapper has exactly one async operation +| Wrapper has multiple async operations + +| `any_read_stream` (`read_some`) +| `any_read_source` (`read_some`, `read`) + +| `any_write_stream` (`write_some`) +| `any_write_sink` (`write_some`, `write`, `write_eof(bufs)`, `write_eof()`) +|=== + +== Why the Flat Layout Cannot Scale + +With multiple operations, each `construct` call produces a +different concrete awaitable type. The per-awaitable function +pointers (`await_ready`, `await_suspend`, `await_resume`, +`destroy`) must match the type that was constructed. The split +layout solves this by returning the correct `awaitable_ops const*` +from each `construct` call. The flat layout would require +duplicating all four function pointers in the vtable for every +operation -- workable for two operations, unwieldy for four. diff --git a/doc/modules/ROOT/pages/design/WriteSink.adoc b/doc/modules/ROOT/pages/design/WriteSink.adoc new file mode 100644 index 00000000..fa85c74c --- /dev/null +++ b/doc/modules/ROOT/pages/design/WriteSink.adoc @@ -0,0 +1,466 @@ += WriteSink Concept Design + +== Overview + +This document describes the design of the `WriteSink` concept, the rationale +behind each member function, and the relationship between `WriteSink`, +`WriteStream`, and the `write_now` algorithm. The design was arrived at +through deliberation over several alternative approaches, each of which +is discussed here with its trade-offs. + +== Concept Hierarchy + +The write-side concept hierarchy consists of two concepts: + +[source,cpp] +---- +// Partial write primitive +template +concept WriteStream = + requires(T& stream, const_buffer_archetype buffers) + { + { stream.write_some(buffers) } -> IoAwaitable; + requires awaitable_decomposes_to< + decltype(stream.write_some(buffers)), + std::error_code, std::size_t>; + }; + +// Complete write with EOF signaling +template +concept WriteSink = + WriteStream && + requires(T& sink, const_buffer_archetype buffers) + { + { sink.write(buffers) } -> IoAwaitable; + requires awaitable_decomposes_to< + decltype(sink.write(buffers)), + std::error_code, std::size_t>; + { sink.write_eof(buffers) } -> IoAwaitable; + requires awaitable_decomposes_to< + decltype(sink.write_eof(buffers)), + std::error_code, std::size_t>; + { sink.write_eof() } -> IoAwaitable; + requires awaitable_decomposes_to< + decltype(sink.write_eof()), + std::error_code>; + }; +---- + +`WriteSink` refines `WriteStream`. Every `WriteSink` is a `WriteStream`. +Algorithms constrained on `WriteStream` accept both raw streams and sinks. + +== Member Functions + +=== `write_some(buffers)` -- Partial Write + +Writes one or more bytes from the buffer sequence. May consume less than +the full sequence. Returns `(error_code, std::size_t)` where `n` is the +number of bytes written. + +This is the low-level primitive inherited from `WriteStream`. It is +appropriate when the caller manages its own consumption loop or when +forwarding data incrementally without needing a complete-write guarantee. + +==== Semantics + +- On success: `!ec`, `n >= 1`. +- On error: `ec`, `n == 0`. +- If `buffer_empty(buffers)`: completes immediately, `!ec`, `n == 0`. + +==== When to Use + +- Relay interiors: forwarding chunks of data as they arrive without waiting + for the entire payload to be consumed. +- Backpressure-aware pipelines: writing what the destination can accept + and returning control to the caller. +- Implementing `write` or `write_now` on top of the primitive. + +=== `write(buffers)` -- Complete Write + +Writes the entire buffer sequence. All bytes are consumed before the +operation completes. Returns `(error_code, std::size_t)` where `n` is +the number of bytes written. + +==== Semantics + +- On success: `!ec`, `n == buffer_size(buffers)`. +- On error: `ec`, `n` is the number of bytes written before + the error occurred. +- If `buffer_empty(buffers)`: completes immediately, `!ec`, `n == 0`. + +==== When to Use + +- Writing complete protocol messages or frames. +- Serializing structured data where each fragment must be fully delivered + before producing the next. +- Any context where partial delivery is not meaningful. + +==== Why `write` Belongs in the Concept + +For many concrete types, `write` is the natural primitive, not a loop +over `write_some`: + +- **File sinks**: the OS `write` call is the primitive. `write_some` + would simply delegate to `write`. +- **Buffered writers**: `write` is a `memcpy` into the circular buffer + (or drain-then-copy). It is not a loop over `write_some`. +- **Compression sinks** (deflate, zstd): `write` feeds data to the + compressor and flushes the output. The internal operation is a single + compression call, not iterated partial writes. + +Requiring `write` in the concept lets each type implement the operation +in the way that is natural and efficient for that type. + +=== `write_eof(buffers)` -- Atomic Final Write + +Writes the entire buffer sequence and then signals end-of-stream, as a +single atomic operation. Returns `(error_code, std::size_t)` where `n` +is the number of bytes written. + +After a successful call, no further writes or EOF signals are permitted. + +==== Semantics + +- On success: `!ec`, `n == buffer_size(buffers)`. The sink + is finalized. +- On error: `ec`, `n` is bytes written before the error. The + sink state is unspecified. + +==== Why Atomicity Matters + +Combining the final write with the EOF signal in a single operation +enables optimizations that two separate calls cannot: + +- **HTTP chunked encoding**: `write_eof(data)` can emit the data chunk + followed by the terminal `0\r\n\r\n` in a single system call. Calling + `write(data)` then `write_eof()` separately forces two calls and may + result in two TCP segments. +- **Compression (deflate)**: `write_eof(data)` can pass `Z_FINISH` to + the final `deflate()` call, producing the compressed data and the + stream trailer together. Separate `write` + `write_eof` would require + an extra flush. +- **TLS close-notify**: `write_eof(data)` can coalesce the final + application data with the TLS close-notify alert. + +This optimization cannot be achieved by splitting the operation into +`write(data)` followed by `write_eof()`. + +=== `write_eof()` -- Bare EOF Signal + +Signals end-of-stream without writing any data. Returns `(error_code)`. + +After a successful call, no further writes or EOF signals are permitted. + +==== Semantics + +- On success: `!ec`. The sink is finalized. +- On error: `ec`. + +==== When to Use + +When the final data has already been written via `write` or `write_some` +and only the EOF signal remains. This is less common than `write_eof(buffers)` +but necessary when the data and EOF are produced at different times. + +== Relationship to `write_now` + +`write_now` is a composed algorithm that operates on any `WriteStream`. +It loops `write_some` until the entire buffer sequence is consumed. It +has two properties that a plain `write_some` loop does not: + +1. **Eager completion**: if every `write_some` call completes + synchronously (returns `true` from `await_ready`), the entire + `write_now` operation completes in `await_ready` with zero coroutine + suspensions. +2. **Frame caching**: the internal coroutine frame is cached and reused + across calls, eliminating repeated allocation. + +`write_now` is the right tool for code constrained on `WriteStream` +alone (for example, writing to a raw TCP socket). Code constrained on +`WriteSink` should use `write` directly, because the concrete type's +`write` may be more efficient than looping `write_some`, and because +`write_now` cannot replicate the atomic `write_eof(buffers)` operation. + +== Use Cases + +=== Serializing Structured Data + +When producing output fragment by fragment (e.g., JSON serialization), +each fragment must be fully consumed before the next is produced. The +final fragment signals EOF. + +[source,cpp] +---- +template +task<> serialize_json(Sink& sink, json::value const& jv) +{ + auto [ec1, n1] = co_await sink.write(make_buffer("{")); + if(ec1) + co_return; + + auto body = serialize_fields(jv); + auto [ec2, n2] = co_await sink.write(make_buffer(body)); + if(ec2) + co_return; + + auto [ec3, n3] = co_await sink.write_eof(make_buffer("}")); + if(ec3) + co_return; +} +---- + +Here `write` guarantees each fragment is fully delivered, and +`write_eof` atomically writes the closing brace and finalizes the sink. + +=== Relaying a Streaming Body + +When forwarding data from a source to a sink, the interior chunks use +`write_some` for incremental progress. The final chunk uses `write_eof` +for atomic delivery plus EOF. + +[source,cpp] +---- +template +task<> relay(Source& src, Sink& dest) +{ + char buf[8192]; + for(;;) + { + auto [ec, n] = co_await src.read_some( + mutable_buffer(buf)); + if(ec == cond::eof) + { + // Signal EOF to the destination + auto [ec2] = co_await dest.write_eof(); + co_return; + } + if(ec) + co_return; + + // Interior: partial write is acceptable + std::size_t written = 0; + while(written < n) + { + auto [ec2, n2] = co_await dest.write_some( + const_buffer(buf + written, n - written)); + if(ec2) + co_return; + written += n2; + } + } +} +---- + +The interior loop uses `write_some` because the relay does not need +complete-write guarantees for intermediate data. When `read_some` +returns EOF, `n` is 0 (per the `ReadStream` contract), so the relay +signals EOF via `write_eof()` with no data. + +=== Writing Complete Messages + +When sending discrete messages where each must be fully delivered, `write` +is the natural choice. + +[source,cpp] +---- +template +task<> send_messages(Sink& sink, std::span messages) +{ + for(auto const& msg : messages) + { + auto [ec, n] = co_await sink.write(make_buffer(msg)); + if(ec) + co_return; + } + auto [ec] = co_await sink.write_eof(); + if(ec) + co_return; +} +---- + +=== HTTP Response Body + +An HTTP response handler writes the body through a type-erased sink. +The concrete implementation handles transfer encoding (content-length, +chunked, compressed) behind the `WriteSink` interface. + +[source,cpp] +---- +task<> send_response(any_write_sink& body, response const& resp) +{ + // Write headers portion of body + auto headers = format_headers(resp); + auto [ec1, n1] = co_await body.write(make_buffer(headers)); + if(ec1) + co_return; + + // Write body with EOF + auto [ec2, n2] = co_await body.write_eof( + make_buffer(resp.body)); + if(ec2) + co_return; +} +---- + +The caller does not know whether the body is content-length, chunked, +or compressed. The `WriteSink` interface handles the difference. + +=== Compression Pipeline + +A deflate sink wraps an underlying `WriteStream` and compresses data +on the fly. `write_eof` sets `Z_FINISH` on the final deflate call. + +[source,cpp] +---- +template +task<> compress_and_send(Sink& sink, std::string_view data) +{ + // Write compressed data + auto [ec, n] = co_await sink.write_eof(make_buffer(data)); + if(ec) + co_return; + // sink.write_eof triggered Z_FINISH internally, + // flushing the final compressed block and trailer +} +---- + +=== Buffered Writer + +A buffered writer interposes a buffer between the caller and the +underlying stream. `write_some` appends to the buffer without draining. +`write` ensures all data is buffered (draining if necessary). `write_eof` +flushes the buffer and signals EOF to the underlying stream. + +[source,cpp] +---- +template +task<> buffered_output(Sink& sink) +{ + // Small writes accumulate in the buffer + auto [ec1, n1] = co_await sink.write(make_buffer("key: ")); + if(ec1) + co_return; + + auto [ec2, n2] = co_await sink.write(make_buffer("value\r\n")); + if(ec2) + co_return; + + // Final write flushes buffer + signals EOF + auto [ec3, n3] = co_await sink.write_eof( + make_buffer("end\r\n")); + if(ec3) + co_return; +} +---- + +=== Raw Stream with `write_now` + +When only a `WriteStream` is available (no EOF signaling needed), the +`write_now` algorithm provides complete-write behavior with eager +completion and frame caching. + +[source,cpp] +---- +template +task<> send_data(Stream& stream) +{ + write_now wn(stream); + + auto [ec1, n1] = co_await wn(make_buffer("hello")); + if(ec1) + co_return; + + // Frame is cached; no allocation on second call + auto [ec2, n2] = co_await wn(make_buffer("world")); + if(ec2) + co_return; +} +---- + +Because `WriteSink` refines `WriteStream`, `write_now` also works on +sinks. This can be useful when a function is generic over `WriteStream` +and does not need EOF signaling. + +== Alternatives Considered + +=== WriteSink with Only `write` and `write_eof` + +The initial design had `WriteSink` require only `write(buffers)`, +`write(buffers, bool eof)`, and `write_eof()`, with no `write_some`. +This made `WriteSink` disjoint from `WriteStream`: a function +constrained on `WriteStream` (using `write_some`) could not accept a +`WriteSink`, and vice versa. + +This was rejected because it prevents generic algorithms from working +across both streams and sinks. The refinement relationship +(`WriteSink` refines `WriteStream`) is strictly more useful. + +=== WriteSink with Only `write_some` and `write_eof` + +A minimal design was considered where `WriteSink` required only +`write_some` and `write_eof`, with callers using `write_now` for +complete-write behavior. This approach has three problems: + +1. **No atomic final write**: `write_now` over `write_some` followed by + `write_eof()` is two operations. This prevents concrete types from + coalescing the final data with the EOF signal (chunked encoding, + compression trailers, TLS close-notify). + +2. **`write` is the natural primitive for many types**: files, buffered + writers, and compression sinks implement `write` directly, not as a + loop over `write_some`. Forcing these types to express complete-write + semantics through a function called `write_some` is semantically + misleading. + +3. **Implementation burden on callers**: every caller that needs + complete-write behavior must construct a `write_now` object and + manage it, rather than calling `sink.write(buffers)` directly. + +=== `write(buffers, bool eof)` Instead of `write_eof(buffers)` + +An earlier version used `write(buffers, bool eof)` to combine data +writing with optional EOF signaling. This was replaced by +`write_eof(buffers)` because: + +- Boolean parameters are opaque at the call site. `write(data, true)` + does not convey intent as clearly as `write_eof(data)`. +- `write_eof` is self-documenting: the name states that EOF is signaled. +- No risk of accidentally passing the wrong boolean value. + +=== Three-Concept Hierarchy (`WriteStream` / `WriteCloser` / `WriteSink`) + +A three-level hierarchy was considered, with an intermediate concept +(`WriteCloser` or similar) requiring `write_some` + `write_eof` but +not `write`. This was rejected because the intermediate concept serves +no practical purpose: any concrete type that has `write_some` and +`write_eof` can and should provide `write`. There is no use case where +a type offers partial writes and EOF signaling but cannot offer complete +writes. + +== Summary + +[cols="1,2,2"] +|=== +| Function | Contract | Use Case + +| `write_some(buffers)` +| Writes one or more bytes. May consume less than the full sequence. +| Relay interiors, backpressure, implementing composed algorithms. + +| `write(buffers)` +| Writes the entire buffer sequence. +| Protocol messages, serialization, structured output. + +| `write_eof(buffers)` +| Writes the entire buffer sequence and signals EOF atomically. +| Final chunk of a relay, last fragment of serialized output. + +| `write_eof()` +| Signals EOF without data. +| When the final data was already written separately. +|=== + +`WriteSink` refines `WriteStream`. The `write_now` algorithm operates on +any `WriteStream` and provides complete-write behavior with eager +completion and frame caching, but it cannot replicate the atomic +`write_eof(buffers)` that `WriteSink` enables. diff --git a/doc/modules/ROOT/pages/design/WriteStream.adoc b/doc/modules/ROOT/pages/design/WriteStream.adoc new file mode 100644 index 00000000..0af74d75 --- /dev/null +++ b/doc/modules/ROOT/pages/design/WriteStream.adoc @@ -0,0 +1,355 @@ += WriteStream Concept Design + +== Overview + +This document describes the design of the `WriteStream` concept: the +fundamental partial-write primitive in the concept hierarchy. It explains +why `write_some` is the correct building block, how algorithms expressed +directly in terms of `write_some` can outperform composed complete-write +algorithms like `write_now`, and when each approach is appropriate. + +== Definition + +[source,cpp] +---- +template +concept WriteStream = + requires(T& stream, const_buffer_archetype buffers) + { + { stream.write_some(buffers) } -> IoAwaitable; + requires awaitable_decomposes_to< + decltype(stream.write_some(buffers)), + std::error_code, std::size_t>; + }; +---- + +A `WriteStream` provides a single operation: + +=== `write_some(buffers)` -- Partial Write + +Writes one or more bytes from the buffer sequence. Returns +`(error_code, std::size_t)` where `n` is the number of bytes written. + +==== Semantics + +- On success: `!ec`, `n >= 1` and `n \<= buffer_size(buffers)`. +- On error: `ec`, `n == 0`. +- If `buffer_empty(buffers)`: completes immediately, `!ec`, `n == 0`. + +The caller must not assume that all bytes are consumed. `write_some` +may write fewer bytes than offered. This is the defining property of a +partial-write primitive. + +==== Buffer Lifetime + +The caller must ensure that the memory referenced by `buffers` remains +valid until the `co_await` expression returns. + +==== Conforming Signatures + +[source,cpp] +---- +template +IoAwaitable auto write_some(Buffers buffers); +---- + +Buffer sequences should be accepted by value when the member function +is a coroutine, to ensure the sequence lives in the coroutine frame +across suspension points. + +== Concept Hierarchy + +`WriteStream` is the base of the write-side hierarchy: + +---- +WriteStream { write_some } + | + v +WriteSink { write_some, write, write_eof(buffers), write_eof() } +---- + +Every `WriteSink` is a `WriteStream`. Algorithms constrained on +`WriteStream` accept both raw streams and sinks. The `WriteSink` +concept adds complete-write and EOF signaling on top of the partial-write +primitive. See the WriteSink design document for details. + +== Composed Algorithms + +Two composed algorithms build complete-write behavior on top of +`write_some`: + +=== `write` (free function) + +[source,cpp] +---- +auto write(WriteStream auto& stream, + ConstBufferSequence auto const& buffers) + -> io_task; +---- + +Loops `write_some` until the entire buffer sequence is consumed. Always +suspends (returns `task`). No frame caching. + +=== `write_now` (class template) + +[source,cpp] +---- +template +class write_now +{ +public: + explicit write_now(Stream& s) noexcept; + + IoAwaitable auto operator()(ConstBufferSequence auto buffers); +}; +---- + +Loops `write_some` until the entire buffer sequence is consumed, with +two advantages over the free function: + +1. **Eager completion**: if every `write_some` returns synchronously + (its `await_ready` returns `true`), the entire operation completes + in `await_ready` with zero coroutine suspensions. +2. **Frame caching**: the internal coroutine frame is allocated once and + reused across calls. + +== Buffer Top-Up: Why `write_some` Can Outperform `write_now` + +The critical design insight behind `write_some` as a primitive is that +the caller retains control after each partial write. This enables a +pattern called _buffer top-up_: after a partial write consumes some data, +the caller refills the buffer before the next write, keeping the buffer +as full as possible. This maximizes the payload of each system call. + +A composed algorithm like `write_now` cannot do this. It receives a fixed +buffer sequence and drains it to completion. When the kernel accepts only +part of the data, `write_now` must send the remainder in a second call -- +even though the remainder may be small. The caller has no opportunity to +read more data from the source between iterations. + +=== Diagram: Relaying 100KB from a ReadSource through a TCP Socket + +Consider relaying 100KB from a `ReadSource` to a TCP socket. The kernel's +send buffer accepts at most 40KB per call. Compare two approaches: + +==== Approach A: `write_some` with Top-Up (3 syscalls) + +---- + buffer contents syscall kernel accepts +Step 1: [======== 64KB ========] write_some --> 40KB, read 40KB from source +Step 2: [======== 64KB ========] write_some --> 40KB, read 20KB (source done) +Step 3: [===== 44KB =====] write_some --> 44KB + done. 100KB in 3 syscalls, every call near-full. +---- + +==== Approach B: `write_now` Without Top-Up (4 syscalls) + +---- + buffer contents syscall kernel accepts +Step 1: [======== 64KB ========] write_some --> 40KB (write_now, read 64KB) +Step 2: [=== 24KB ===] write_some --> 24KB (write_now, small payload) +Step 3: [====== 36KB ======] write_some --> 20KB (write_now, read 36KB) +Step 4: [== 16KB ==] write_some --> 16KB (write_now, small payload) + done. 100KB in 4 syscalls, two calls undersized. +---- + +Every time `write_now` partially drains a buffer, the remainder is a +small payload that wastes a syscall. With top-up, the caller refills +the ring buffer between calls, keeping each syscall near capacity. + +=== Code: `write_some` with Buffer Top-Up + +This example reads from a `ReadSource` and writes to a `WriteStream` +using a `circular_dynamic_buffer`. After each partial write frees space +in the ring buffer, the caller reads more data from the source to refill +it before calling `write_some` again. + +[source,cpp] +---- +template +task<> relay_with_topup(Source& src, Stream& dest) +{ + char storage[65536]; + circular_dynamic_buffer cb(storage, sizeof(storage)); + + for(;;) + { + // Fill: read from source into free space + auto mb = cb.prepare(cb.capacity()); + auto [rec, nr] = co_await src.read(mb); + cb.commit(nr); + if(rec && rec != cond::eof && nr == 0) + co_return; + + // Drain: write_some from the ring buffer + while(cb.size() > 0) + { + auto [wec, nw] = co_await dest.write_some( + cb.data()); + if(wec) + co_return; + + // consume only what was written + cb.consume(nw); + + // Top-up: refill freed space before next + // write_some, so the next call presents + // the largest possible payload + if(cb.capacity() > 0 && rec != cond::eof) + { + auto mb2 = cb.prepare(cb.capacity()); + auto [rec2, nr2] = co_await src.read(mb2); + cb.commit(nr2); + rec = rec2; + } + // write_some now sees a full (or nearly full) + // ring buffer, maximizing the syscall payload + } + + if(rec == cond::eof) + co_return; + } +} +---- + +After `write_some` accepts 40KB of a 64KB buffer, `consume(40KB)` frees +40KB. The caller immediately reads more data from the source into that +freed space. The next `write_some` again presents a full 64KB payload. + +=== Code: `write_now` Without Top-Up + +This example reads from a `ReadSource` and writes to a `WriteStream` +using `write_now`. Each chunk is drained to completion before the caller +can read more from the source. + +[source,cpp] +---- +template +task<> relay_with_write_now(Source& src, Stream& dest) +{ + char buf[65536]; + write_now wn(dest); + + for(;;) + { + // Read a chunk from the source + auto [rec, nr] = co_await src.read( + mutable_buffer(buf, sizeof(buf))); + if(rec == cond::eof && nr == 0) + co_return; + + // write_now drains the chunk to completion. + // If the kernel accepts 40KB of 64KB, write_now + // internally calls write_some(24KB) for the + // remainder -- a small write that wastes a + // syscall. The caller cannot top up between + // write_now's internal iterations. + auto [wec, nw] = co_await wn( + const_buffer(buf, nr)); + if(wec) + co_return; + + if(rec == cond::eof) + co_return; + } +} +---- + +After the kernel accepts 40KB of a 64KB chunk, `write_now` must send +the remaining 24KB in a second `write_some`. The caller cannot intervene +to refill the buffer because `write_now` owns the loop. That 24KB write +wastes an opportunity to send a full 64KB payload. + +== When to Use Each Approach + +[cols="1,2,2"] +|=== +| Approach | Best For | Trade-off + +| `write_some` directly +| High-throughput relays, producer-consumer loops where the + caller has more data available and can top up after partial writes. +| Caller manages the loop and buffer refill. + +| `write_now` +| Writing discrete complete payloads (a single HTTP header, a + serialized message) where there is no additional data to top up with, + or where the write is expected to complete in one call. +| Cannot top up between iterations. Small remainders + waste syscall payloads. + +| `WriteSink::write` +| Sink-oriented code where the concrete type implements complete-write + natively (buffered writer, file, compressor) and the caller does not + manage the loop. +| Requires `WriteSink`, not just `WriteStream`. +|=== + +=== Rule of Thumb + +- If the caller reads from a source and relays to a raw byte stream + (TCP socket), use `write_some` with a `circular_dynamic_buffer` + for buffer top-up. +- If the caller has a discrete, bounded payload and wants zero-fuss + complete-write semantics, use `write_now`. +- If the destination is a `WriteSink`, use `write` directly. + +== Conforming Types + +Examples of types that satisfy `WriteStream`: + +- **TCP sockets**: `write_some` maps to a single `send()` or + `WSASend()` call. Partial writes are normal under load. +- **TLS streams**: `write_some` encrypts and sends one TLS record. +- **Buffered write streams**: `write_some` appends to an internal + buffer and returns immediately when space is available, or drains + to the underlying stream when full. +- **QUIC streams**: `write_some` sends one or more QUIC frames. +- **Test mock streams**: `write_some` records data and returns + configurable results for testing. + +All of these types also naturally extend to `WriteSink` by adding +`write`, `write_eof(buffers)`, and `write_eof()`. + +== Relationship to `ReadStream` + +The read-side counterpart is `ReadStream`, which requires `read_some`. +The same partial-transfer / composed-algorithm decomposition applies: + +[cols="1,1"] +|=== +| Write Side | Read Side + +| `WriteStream::write_some` +| `ReadStream::read_some` + +| `write_now` (composed) +| `read` free function (composed) + +| `WriteSink::write` +| `ReadSource::read` +|=== + +The asymmetry is that the read side does not have a `read_now` with +eager completion, because reads depend on data arriving from the +network -- the synchronous fast path is less reliably useful than +for writes into a buffered stream. + +== Summary + +`WriteStream` provides `write_some` as the single partial-write +primitive. This is deliberately minimal: + +- Algorithms that need complete-write semantics use `write_now` (for + `WriteStream`) or `write` (for `WriteSink`). +- Algorithms that need maximum throughput use `write_some` directly + with buffer top-up, achieving fewer syscalls than composed algorithms + by keeping the buffer full between iterations. +- The concept is the base of the hierarchy. `WriteSink` refines it by + adding `write`, `write_eof(buffers)`, and `write_eof()`. + +The choice between `write_some`, `write_now`, and `WriteSink::write` +is a throughput-versus-convenience trade-off. `write_some` gives the +caller maximum control. `write_now` gives the caller maximum simplicity. +`WriteSink::write` gives the concrete type maximum implementation +freedom. diff --git a/doc/modules/ROOT/pages/examples/stream-pipeline.adoc b/doc/modules/ROOT/pages/examples/stream-pipeline.adoc index d95462c8..0853e9e0 100644 --- a/doc/modules/ROOT/pages/examples/stream-pipeline.adoc +++ b/doc/modules/ROOT/pages/examples/stream-pipeline.adoc @@ -68,7 +68,7 @@ public: } // BufferSource::pull - returns task<> to enable co_await on upstream - task>> + io_task> pull(std::span dest) { // Already have unconsumed data? @@ -155,7 +155,7 @@ public: } } - task>> + io_task> pull(std::span dest) { if (consumed_ < buffer_.size()) @@ -332,7 +332,7 @@ Data flows through the pipeline: [source,cpp] ---- -task>> +io_task> pull(std::span dest) { // Pull from upstream diff --git a/doc/modules/ROOT/pages/library/io-result.adoc b/doc/modules/ROOT/pages/library/io-result.adoc index a35b4ff1..0f34ae83 100644 --- a/doc/modules/ROOT/pages/library/io-result.adoc +++ b/doc/modules/ROOT/pages/library/io-result.adoc @@ -174,7 +174,7 @@ Custom awaitables can return `io_result`: [source,cpp] ---- -task> read_all(stream& s, buffer& buf) +io_task read_all(stream& s, buffer& buf) { std::size_t total = 0; while (buf.size() < buf.max_size()) @@ -259,7 +259,7 @@ task read_all(stream& s, dynamic_buffer& buf) [source,cpp] ---- -task> write_with_retry(stream& s, const_buffer data, int retries) +io_task<> write_with_retry(stream& s, const_buffer data, int retries) { for (int i = 0; i < retries; ++i) { diff --git a/doc/modules/ROOT/pages/streams/overview.adoc b/doc/modules/ROOT/pages/streams/overview.adoc index a5f3f758..b1e2ca89 100644 --- a/doc/modules/ROOT/pages/streams/overview.adoc +++ b/doc/modules/ROOT/pages/streams/overview.adoc @@ -70,7 +70,7 @@ auto [ec, n] = co_await source.read(buffer); // n == buffer_size(buffer), or ec indicates why not // WriteSink: writes all data, with explicit EOF -co_await sink.write(buffers, true); // true = EOF after this +co_await sink.write_eof(buffers); // atomic write + EOF signal ---- These are higher-level abstractions built on streams. @@ -167,11 +167,11 @@ task<> echo(any_stream& stream) for (;;) { auto [ec, n] = co_await stream.read_some(mutable_buffer(buf)); - if (ec.failed()) + if (ec) co_return; auto [wec, wn] = co_await write(stream, const_buffer(buf, n)); - if (wec.failed()) + if (wec) co_return; } } diff --git a/doc/modules/ROOT/pages/streams/sources-sinks.adoc b/doc/modules/ROOT/pages/streams/sources-sinks.adoc index 6b8a9f0a..0a325b9b 100644 --- a/doc/modules/ROOT/pages/streams/sources-sinks.adoc +++ b/doc/modules/ROOT/pages/streams/sources-sinks.adoc @@ -15,6 +15,7 @@ A `ReadSource` provides complete read operations that fill buffers entirely or s ---- template concept ReadSource = + ReadStream && requires(T& source, mutable_buffer_archetype buffers) { { source.read(buffers) } -> IoAwaitable; }; @@ -24,15 +25,15 @@ concept ReadSource = [source,cpp] ---- -template -IoAwaitable auto read(MB const& buffers); +template +IoAwaitable auto read(Buffers buffers); ---- Returns an awaitable yielding `(error_code, std::size_t)`: -* On success: `!ec.failed()`, and `n == buffer_size(buffers)` (buffer completely filled) +* On success: `!ec`, and `n == buffer_size(buffers)` (buffer completely filled) * On EOF: `ec == cond::eof`, and `n` is bytes read before EOF (partial read) -* On error: `ec.failed()`, and `n` is bytes read before error +* On error: `ec`, and `n` is bytes read before error The key difference from `ReadStream`: a successful read fills the buffer completely. @@ -57,14 +58,14 @@ task> read_message(Source& source) if (ec == cond::eof && n == 0) co_return std::nullopt; // Clean EOF - if (ec.failed()) + if (ec) throw std::system_error(ec); // Read variable-size body std::vector body(header.body_size); auto [ec2, n2] = co_await source.read(make_buffer(body)); - if (ec2.failed()) + if (ec2) throw std::system_error(ec2); co_return message{header, std::move(body)}; @@ -79,9 +80,10 @@ A `WriteSink` provides complete write operations with explicit EOF signaling: ---- template concept WriteSink = + WriteStream && requires(T& sink, const_buffer_archetype buffers) { { sink.write(buffers) } -> IoAwaitable; - { sink.write(buffers, bool{}) } -> IoAwaitable; + { sink.write_eof(buffers) } -> IoAwaitable; { sink.write_eof() } -> IoAwaitable; }; ---- @@ -90,21 +92,25 @@ concept WriteSink = [source,cpp] ---- -// Write data -template -IoAwaitable auto write(CB const& buffers); +// Partial write (inherited from WriteStream) +template +IoAwaitable auto write_some(Buffers buffers); -// Write data with optional EOF -template -IoAwaitable auto write(CB const& buffers, bool eof); +// Complete write +template +IoAwaitable auto write(Buffers buffers); + +// Atomic final write + EOF signal +template +IoAwaitable auto write_eof(Buffers buffers); // Signal EOF without data IoAwaitable auto write_eof(); ---- -The `eof` parameter signals end-of-stream after the data is written. +`write` consumes the entire buffer sequence before returning. `write_eof(buffers)` atomically writes the buffer sequence and signals end-of-stream in a single operation, enabling protocol-level optimizations (e.g., HTTP chunked encoding terminal, compression trailers). -After calling `write_eof()` or `write(buffers, true)`, no further writes are permitted. +After calling `write_eof(buffers)` or `write_eof()`, no further writes are permitted. === Use Cases @@ -121,10 +127,14 @@ task<> send_response(Sink& sink, response const& resp) { // Write headers auto headers = format_headers(resp); - co_await sink.write(make_buffer(headers)); + auto [ec, n] = co_await sink.write(make_buffer(headers)); + if (ec) + co_return; - // Write body with EOF - co_await sink.write(make_buffer(resp.body), true); // EOF after body + // Write body and signal EOF atomically + auto [ec2, n2] = co_await sink.write_eof(make_buffer(resp.body)); + if (ec2) + co_return; } ---- @@ -165,7 +175,7 @@ task<> send_body(any_write_sink& body, std::string_view data) // - Compressed stream // - Test mock - co_await body.write(make_buffer(data), true); + co_await body.write_eof(make_buffer(data)); } ---- @@ -198,7 +208,7 @@ Same `send_body` function, different transfer encodings—the library handles th | EOF handling | Implicit (read returns 0) -| Explicit (`write_eof()`, EOF flag) +| Explicit (`write_eof()`, `write_eof(buffers)`) | Use case | Raw I/O, incremental processing diff --git a/doc/modules/ROOT/pages/streams/streams.adoc b/doc/modules/ROOT/pages/streams/streams.adoc index ff2bc545..bf0808e3 100644 --- a/doc/modules/ROOT/pages/streams/streams.adoc +++ b/doc/modules/ROOT/pages/streams/streams.adoc @@ -24,14 +24,14 @@ concept ReadStream = [source,cpp] ---- -template -IoAwaitable auto read_some(MB const& buffers); +template +IoAwaitable auto read_some(Buffers buffers); ---- Returns an awaitable yielding `(error_code, std::size_t)`: -* On success: `!ec.failed()`, and `n >= 1` bytes were read -* On error: `ec.failed()`, and `n == 0` +* On success: `!ec`, and `n >= 1` bytes were read +* On error: `ec`, and `n == 0` * On EOF: `ec == cond::eof`, and `n == 0` If `buffer_empty(buffers)` is true, completes immediately with `n == 0` and no error. @@ -45,7 +45,7 @@ If `buffer_empty(buffers)` is true, completes immediately with `n == 0` and no e char buf[1024]; auto [ec, n] = co_await stream.read_some(mutable_buffer(buf)); // n might be 1, might be 500, might be 1024 -// The only guarantee: if !ec.failed() && n > 0 +// The only guarantee: if !ec && n > 0 ---- This matches underlying OS behavior—reads return when *some* data is available. @@ -66,7 +66,7 @@ task<> dump_stream(Stream& stream) if (ec == cond::eof) break; // End of stream - if (ec.failed()) + if (ec) throw std::system_error(ec); std::cout.write(buf, n); @@ -91,14 +91,14 @@ concept WriteStream = [source,cpp] ---- -template -IoAwaitable auto write_some(CB const& buffers); +template +IoAwaitable auto write_some(Buffers buffers); ---- Returns an awaitable yielding `(error_code, std::size_t)`: -* On success: `!ec.failed()`, and `n >= 1` bytes were written -* On error: `ec.failed()`, and `n` indicates bytes written before error (may be 0) +* On success: `!ec`, and `n >= 1` bytes were written +* On error: `ec`, and `n == 0` If `buffer_empty(buffers)` is true, completes immediately with `n == 0` and no error. @@ -197,13 +197,13 @@ task<> handle_connection(any_stream& stream) if (ec == cond::eof) co_return; // Client closed connection - if (ec.failed()) + if (ec) throw std::system_error(ec); // Echo it back auto [wec, wn] = co_await write(stream, const_buffer(buf, n)); - if (wec.failed()) + if (wec) throw std::system_error(wec); } } diff --git a/example/custom-dynamic-buffer/custom_dynamic_buffer.cpp b/example/custom-dynamic-buffer/custom_dynamic_buffer.cpp index 7b4527cc..accf86ce 100644 --- a/example/custom-dynamic-buffer/custom_dynamic_buffer.cpp +++ b/example/custom-dynamic-buffer/custom_dynamic_buffer.cpp @@ -163,17 +163,15 @@ void demo_tracked_buffer() { std::cout << "=== Tracked Buffer Demo ===\n\n"; - // Setup mock stream with test data - test::fuse f; - test::stream mock(f); - mock.provide("Hello, "); - mock.provide("World! "); - mock.provide("This is a test of the custom buffer.\n"); - // Stream returns eof when data is exhausted + auto [reader, writer] = test::make_stream_pair(); + writer.provide("Hello, "); + writer.provide("World! "); + writer.provide("This is a test of the custom buffer.\n"); + writer.close(); tracked_buffer buffer; - test::run_blocking()(read_into_tracked_buffer(mock, buffer)); + test::run_blocking()(read_into_tracked_buffer(reader, buffer)); std::cout << "\nFinal buffer contents: "; auto data = buffer.data(); // const_buffer diff --git a/example/mock-stream-testing/mock_stream_testing.cpp b/example/mock-stream-testing/mock_stream_testing.cpp index beba8b09..89b4bb58 100644 --- a/example/mock-stream-testing/mock_stream_testing.cpp +++ b/example/mock-stream-testing/mock_stream_testing.cpp @@ -67,19 +67,16 @@ void test_happy_path() { std::cout << "Test: happy path\n"; - // Use fuse in disarmed mode (no error injection) for happy path - test::fuse f; // test::fuse - test::stream mock(f); // test::stream - mock.provide("hello\n"); + auto [a, b] = test::make_stream_pair(); + b.provide("hello\n"); - // Wrap mock in any_stream using pointer construction for reference semantics - any_stream stream{&mock}; // any_stream + any_stream stream{&a}; // any_stream bool result = false; // bool test::run_blocking([&](bool r) { result = r; })(echo_line_uppercase(stream)); assert(result == true); - assert(mock.data() == "HELLO\n"); + assert(b.data() == "HELLO\n"); std::cout << " PASSED\n"; } @@ -88,20 +85,17 @@ void test_partial_reads() { std::cout << "Test: partial reads (1 byte at a time)\n"; - // Use fuse in disarmed mode (no error injection) - test::fuse f; // test::fuse - // Mock returns at most 1 byte per read_some - test::stream mock(f, 1); // test::stream, max_read_size = 1 - mock.provide("hi\n"); + auto [a, b] = test::make_stream_pair(); + a.set_max_read_size(1); + b.provide("hi\n"); - // Wrap mock in any_stream using pointer construction for reference semantics - any_stream stream{&mock}; // any_stream + any_stream stream{&a}; // any_stream bool result = false; // bool test::run_blocking([&](bool r) { result = r; })(echo_line_uppercase(stream)); assert(result == true); - assert(mock.data() == "HI\n"); + assert(b.data() == "HI\n"); std::cout << " PASSED\n"; } @@ -117,11 +111,10 @@ void test_with_error_injection() // operation point until all paths are covered test::fuse f; // test::fuse auto r = f.armed([&](test::fuse&) -> task<> { // fuse::result - test::stream mock(f); // test::stream - mock.provide("test\n"); + auto [a, b] = test::make_stream_pair(f); + b.provide("test\n"); - // Wrap mock in any_stream using pointer construction for reference semantics - any_stream stream{&mock}; // any_stream + any_stream stream{&a}; // any_stream // Run the protocol - fuse will inject errors at each step bool result = co_await echo_line_uppercase(stream); // bool @@ -130,7 +123,7 @@ void test_with_error_injection() if (result) { ++success_count; - assert(mock.data() == "TEST\n"); + assert(b.data() == "TEST\n"); } else { diff --git a/example/stream-pipeline/stream_pipeline.cpp b/example/stream-pipeline/stream_pipeline.cpp index a84e199f..3b816923 100644 --- a/example/stream-pipeline/stream_pipeline.cpp +++ b/example/stream-pipeline/stream_pipeline.cpp @@ -71,7 +71,7 @@ class uppercase_transform } // BufferSource::pull - returns task<> to enable co_await on upstream - task>> + io_task> pull(std::span dest) { // Already have unconsumed data? @@ -88,7 +88,7 @@ class uppercase_transform // Upstream exhausted? if (exhausted_) - co_return {std::error_code{}, std::span{}}; + co_return {error::eof, std::span{}}; // Pull from upstream buffer_.clear(); @@ -98,14 +98,14 @@ class uppercase_transform // ec: std::error_code, bufs: std::span auto [ec, bufs] = co_await source_->pull(upstream); - if (ec) - co_return {ec, std::span{}}; - - if (bufs.empty()) + if (ec == cond::eof) { exhausted_ = true; - co_return {std::error_code{}, std::span{}}; + co_return {error::eof, std::span{}}; } + + if (ec) + co_return {ec, std::span{}}; // Transform: uppercase each byte for (auto const& buf : bufs) // const_buffer const& @@ -170,7 +170,7 @@ class line_numbering_transform } // BufferSource::pull - returns task<> to enable co_await on upstream - task>> + io_task> pull(std::span dest) { // Already have unconsumed data? @@ -187,7 +187,7 @@ class line_numbering_transform // Upstream exhausted? if (exhausted_) - co_return {std::error_code{}, std::span{}}; + co_return {error::eof, std::span{}}; // Pull from upstream buffer_.clear(); @@ -197,14 +197,14 @@ class line_numbering_transform // ec: std::error_code, bufs: std::span auto [ec, bufs] = co_await source_->pull(upstream); - if (ec) - co_return {ec, std::span{}}; - - if (bufs.empty()) + if (ec == cond::eof) { exhausted_ = true; - co_return {std::error_code{}, std::span{}}; + co_return {error::eof, std::span{}}; } + + if (ec) + co_return {ec, std::span{}}; // Transform: add line numbers for (auto const& buf : bufs) // const_buffer const& @@ -253,12 +253,12 @@ task transfer(any_buffer_source& source, any_write_sink& sink) // ec: std::error_code, spans: std::span auto [ec, spans] = co_await source.pull(bufs); + if (ec == cond::eof) + break; + if (ec) throw std::system_error(ec); - if (spans.empty()) - break; - // Write each buffer to sink for (auto const& buf : spans) // const_buffer const& { diff --git a/example/type-erased-echo/main.cpp b/example/type-erased-echo/main.cpp index cbd4e528..9d096bec 100644 --- a/example/type-erased-echo/main.cpp +++ b/example/type-erased-echo/main.cpp @@ -18,18 +18,17 @@ using namespace boost::capy; void test_with_mock() { - test::fuse f; - test::stream mock(f); - mock.provide("Hello, "); - mock.provide("World!\n"); - // Stream returns eof when no more data is available + auto [a, b] = test::make_stream_pair(); + b.provide("Hello, "); + b.provide("World!\n"); + b.close(); - // Using pointer construction (&mock) for reference semantics - the - // wrapper does not take ownership, so mock must outlive stream. - any_stream stream{&mock}; // any_stream + // Using pointer construction (&a) for reference semantics - the + // wrapper does not take ownership, so a must outlive stream. + any_stream stream{&a}; // any_stream test::run_blocking()(myapp::echo_session(stream)); - std::cout << "Echo output: " << mock.data() << "\n"; + std::cout << "Echo output: " << b.data() << "\n"; } // With real sockets (using Corosio), you would write: diff --git a/include/boost/capy/buffers/buffer_array.hpp b/include/boost/capy/buffers/buffer_array.hpp index 45be2364..731c8b2d 100644 --- a/include/boost/capy/buffers/buffer_array.hpp +++ b/include/boost/capy/buffers/buffer_array.hpp @@ -190,6 +190,60 @@ class buffer_array } } + /** Construct from an iterator range. + + Copies up to N non-empty buffer descriptors from the + range `[first, last)`. If the range contains more than + N non-empty buffers, excess buffers are silently ignored. + + @param first Iterator to the first buffer descriptor. + @param last Iterator past the last buffer descriptor. + */ + template + buffer_array(Iterator first, Iterator last) noexcept + : dummy_(0) + { + while(first != last && n_ < N) + { + value_type b(*first); + if(b.size() != 0) + { + ::new(&arr_[n_++]) value_type(b); + size_ += b.size(); + } + ++first; + } + } + + /** Construct from an iterator range with overflow checking. + + Copies all non-empty buffer descriptors from the range + `[first, last)` into the internal array. + + @param first Iterator to the first buffer descriptor. + @param last Iterator past the last buffer descriptor. + + @throws std::length_error if the range contains more + than N non-empty buffers. + */ + template + buffer_array(std::in_place_t, Iterator first, Iterator last) + : dummy_(0) + { + while(first != last) + { + value_type b(*first); + if(b.size() != 0) + { + if(n_ >= N) + detail::throw_length_error(); + ::new(&arr_[n_++]) value_type(b); + size_ += b.size(); + } + ++first; + } + } + /** Destructor. */ ~buffer_array() diff --git a/include/boost/capy/buffers/buffer_param.hpp b/include/boost/capy/buffers/buffer_param.hpp index ba9a17ef..b92b026a 100644 --- a/include/boost/capy/buffers/buffer_param.hpp +++ b/include/boost/capy/buffers/buffer_param.hpp @@ -30,6 +30,7 @@ #include #include +#include #include #include @@ -139,7 +140,10 @@ class buffer_param private: decltype(begin(std::declval())) it_; decltype(end(std::declval())) end_; - buffer_type arr_[detail::max_iovec_]; + union { + int dummy_; + buffer_type arr_[detail::max_iovec_]; + }; std::size_t size_ = 0; std::size_t pos_ = 0; @@ -152,7 +156,7 @@ class buffer_param { buffer_type buf(*it_); if(buf.size() > 0) - arr_[size_++] = buf; + ::new(&arr_[size_++]) buffer_type(buf); } } @@ -167,6 +171,7 @@ class buffer_param buffer_param(BS const& bs) : it_(begin(bs)) , end_(end(bs)) + , dummy_(0) { refill(); } @@ -193,6 +198,21 @@ class buffer_param return {arr_ + pos_, size_ - pos_}; } + /** Check if more buffers exist beyond the current window. + + Returns `true` if the underlying buffer sequence has + additional buffers that have not yet been loaded into + the current window. Call after @ref data to determine + whether the current window is the last one. + + @return `true` if more buffers remain in the sequence. + */ + bool + more() const noexcept + { + return it_ != end_; + } + /** Consume bytes from the buffer sequence. Advances the current position by `n` bytes, consuming diff --git a/include/boost/capy/concept/buffer_sink.hpp b/include/boost/capy/concept/buffer_sink.hpp index 178b3910..62ae918d 100644 --- a/include/boost/capy/concept/buffer_sink.hpp +++ b/include/boost/capy/concept/buffer_sink.hpp @@ -43,9 +43,7 @@ namespace capy { a `std::span` and returning a span of filled buffers @li `T` must provide `commit(n)` returning an @ref IoAwaitable that decomposes to `(error_code)` - @li `T` must provide `commit(n, eof)` returning an @ref IoAwaitable - that decomposes to `(error_code)` - @li `T` must provide `commit_eof()` returning an @ref IoAwaitable + @li `T` must provide `commit_eof(n)` returning an @ref IoAwaitable that decomposes to `(error_code)` @par Semantic Requirements @@ -64,15 +62,11 @@ namespace capy { @li On success: `ec` is `false` @li On error: `ec` is `true` - The `commit` operation with `eof` combines data commit with end-of-stream: - - @li If `eof` is `false`, behaves identically to `commit(n)` - @li If `eof` is `true`, commits data and finalizes the sink - @li After success with `eof == true`, no further operations are permitted - - The `commit_eof` operation signals end-of-stream with no data: + The `commit_eof` operation commits final data and signals end-of-stream: - @li Equivalent to `commit(0, true)` + @li Commits `n` bytes written to the most recent `prepare` buffers + and finalizes the sink + @li After success, no further operations are permitted @li On success: `ec` is `false`, sink is finalized @li On error: `ec` is `true` @@ -87,15 +81,14 @@ namespace capy { std::span prepare( std::span dest ); IoAwaitable auto commit( std::size_t n ); - IoAwaitable auto commit( std::size_t n, bool eof ); - IoAwaitable auto commit_eof(); + IoAwaitable auto commit_eof( std::size_t n ); @endcode @par Example @code template - task> transfer( Source& source, Sink& sink ) + io_task transfer( Source& source, Sink& sink ) { const_buffer src_arr[16]; mutable_buffer dst_arr[16]; @@ -104,14 +97,13 @@ namespace capy { for(;;) { auto [ec1, src_bufs] = co_await source.pull( src_arr ); - if( ec1 ) - co_return {ec1, total}; - - if( src_bufs.empty() ) + if( ec1 == cond::eof ) { - auto [eof_ec] = co_await sink.commit_eof(); + auto [eof_ec] = co_await sink.commit_eof( 0 ); co_return {eof_ec, total}; } + if( ec1 ) + co_return {ec1, total}; auto dst_bufs = sink.prepare( dst_arr ); std::size_t n = buffer_copy( dst_bufs, src_bufs ); @@ -129,7 +121,7 @@ namespace capy { */ template concept BufferSink = - requires(T& sink, std::span dest, std::size_t n, bool eof) + requires(T& sink, std::span dest, std::size_t n) { // Synchronous: get writable buffers from sink's internal storage { sink.prepare(dest) } -> std::same_as>; @@ -140,16 +132,10 @@ concept BufferSink = decltype(sink.commit(n)), std::error_code>; - // Async: commit n bytes with optional EOF - { sink.commit(n, eof) } -> IoAwaitable; - requires awaitable_decomposes_to< - decltype(sink.commit(n, eof)), - std::error_code>; - - // Async: signal end of data - { sink.commit_eof() } -> IoAwaitable; + // Async: commit n final bytes and signal end of data + { sink.commit_eof(n) } -> IoAwaitable; requires awaitable_decomposes_to< - decltype(sink.commit_eof()), + decltype(sink.commit_eof(n)), std::error_code>; }; diff --git a/include/boost/capy/concept/buffer_source.hpp b/include/boost/capy/concept/buffer_source.hpp index f4bcc596..0c5867a1 100644 --- a/include/boost/capy/concept/buffer_source.hpp +++ b/include/boost/capy/concept/buffer_source.hpp @@ -12,6 +12,7 @@ #include #include +#include #include #include @@ -51,11 +52,12 @@ namespace capy { from the current unconsumed position. On return, exactly one of the following is true: - @li **Data available**: `ec` is `false` and `bufs.size() > 0`. + @li **Data available**: `!ec` and `bufs.size() > 0`. The returned span contains buffer descriptors. - @li **Source exhausted**: `ec` is `false` and `bufs.empty()`. + @li **Source exhausted**: `ec == cond::eof` and `bufs.empty()`. No more data is available; the transfer is complete. - @li **Error**: `ec` is `true`. An error occurred. + @li **Error**: `ec` is `true` and `ec != cond::eof`. + An error occurred. Calling `pull` multiple times without intervening `consume` returns the same unconsumed data. The `consume` operation advances the read @@ -81,17 +83,17 @@ namespace capy { @code template - task> transfer( Source& source, Stream& stream ) + io_task transfer( Source& source, Stream& stream ) { const_buffer arr[16]; std::size_t total = 0; for(;;) { auto [ec, bufs] = co_await source.pull( arr ); + if( ec == cond::eof ) + co_return {{}, total}; if( ec ) co_return {ec, total}; - if( bufs.empty() ) - co_return {{}, total}; auto [write_ec, n] = co_await stream.write_some( bufs ); if( write_ec ) co_return {write_ec, total}; diff --git a/include/boost/capy/concept/dynamic_buffer.hpp b/include/boost/capy/concept/dynamic_buffer.hpp index 763ddf1d..68425a11 100644 --- a/include/boost/capy/concept/dynamic_buffer.hpp +++ b/include/boost/capy/concept/dynamic_buffer.hpp @@ -37,7 +37,7 @@ void fill(DynamicBuffer auto& buffers); - COROUTINE: Use `DynamicBufferParam auto&&` (forwarding ref) - task> read(DynamicBufferParam auto&& buffers); + io_task read(DynamicBufferParam auto&& buffers); DynamicBufferParam enforces safe passing at compile time: accepts lvalues of any DynamicBuffer, but rvalues only for adapters. @@ -82,7 +82,7 @@ namespace capy { plain `DynamicBuffer` in coroutines allows dangerous rvalue passing that compiles but silently loses data on suspend. @code - task> + io_task read( ReadSource auto& src, DynamicBufferParam auto&& buffers ); @endcode @@ -130,7 +130,7 @@ concept DynamicBuffer = @par Conforming Signatures For coroutine functions, use a forwarding reference: @code - task> + io_task read( ReadSource auto& source, DynamicBufferParam auto&& buffers ); @endcode diff --git a/include/boost/capy/concept/read_source.hpp b/include/boost/capy/concept/read_source.hpp index 3a46af0c..5275af56 100644 --- a/include/boost/capy/concept/read_source.hpp +++ b/include/boost/capy/concept/read_source.hpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -22,43 +23,48 @@ namespace boost { namespace capy { -/** Concept for types that provide awaitable read operations from a source. +/** Concept for types providing complete reads from a data source. - A type satisfies `ReadSource` if it provides a `read` member function - that accepts any @ref MutableBufferSequence and is an @ref IoAwaitable - whose return value decomposes to `(error_code, std::size_t)`. + A type satisfies `ReadSource` if it satisfies @ref ReadStream + and additionally provides a `read` member function that accepts + any @ref MutableBufferSequence and is an @ref IoAwaitable whose + return value decomposes to `(error_code, std::size_t)`. - Use this concept when you need to produce data asynchronously, such - as reading HTTP request bodies, streaming file contents, or generating - data through transformations like decompression. + `ReadSource` refines `ReadStream`. Every `ReadSource` is a + `ReadStream`. Algorithms constrained on `ReadStream` accept both + raw streams and sources. @tparam T The source type. @par Syntactic Requirements + @li `T` must satisfy @ref ReadStream (provides `read_some`) @li `T` must provide a `read` member function template accepting any @ref MutableBufferSequence - @li The return type must satisfy @ref IoAwaitable + @li The return type of `read` must satisfy @ref IoAwaitable @li The awaitable must decompose to `(error_code, std::size_t)` via structured bindings @par Semantic Requirements - The `read` operation transfers data into the buffer sequence. On - return, exactly one of the following is true: + The inherited `read_some` operation reads one or more bytes + (partial read). See @ref ReadStream. - @li **Success**: `ec` is `false` and `n` equals - `buffer_size( buffers )`. The entire buffer sequence was filled. - @li **End-of-stream or Error**: `ec` is `true` and `n` - indicates the number of bytes transferred before the failure. + The `read` operation fills the entire buffer sequence. On return, + exactly one of the following is true: - If the source reaches end-of-stream before filling the buffer, - the operation returns with `ec` equal to `true`. Successful - partial reads are not permitted; either the entire buffer is filled - or the operation fails with any partial data reported in `n`. + @li **Success**: `!ec` and `n` equals `buffer_size( buffers )`. + The entire buffer sequence was filled. + @li **End-of-stream**: `ec == cond::eof` and `n` indicates the + number of bytes transferred before EOF was reached. + @li **Error**: `ec` and `n` indicates the number of bytes + transferred before the error. + + Successful partial reads are not permitted; either the entire + buffer is filled or the operation returns with an error. If `buffer_empty( buffers )` is `true`, the operation completes - immediately with `ec` equal to `false` and `n` equal to 0. + immediately with `!ec` and `n` equal to 0. When the buffer sequence contains multiple buffers, each buffer is filled completely before proceeding to the next. @@ -71,13 +77,11 @@ namespace capy { @par Conforming Signatures @code - template - some_io_awaitable> - read( MB const& buffers ); + template< MutableBufferSequence MB > + IoAwaitable auto read_some( MB buffers ); // inherited from ReadStream - template - some_io_awaitable> - read( MB buffers ); // by-value also permitted + template< MutableBufferSequence MB > + IoAwaitable auto read( MB buffers ); @endcode @warning **Coroutine Buffer Lifetime**: When implementing coroutine @@ -91,28 +95,24 @@ namespace capy { @par Example @code - template - task read_all( Source& source ) + template< ReadSource Source > + task<> read_header( Source& source ) { - std::string result; - char buf[1024]; - for(;;) - { - auto [ec, n] = co_await source.read( mutable_buffer( buf ) ); - if( ec == cond::eof ) - break; - if( ec ) - co_return {}; - result.append( buf, n ); - } - co_return result; + char header[16]; + auto [ec, n] = co_await source.read( + mutable_buffer( header ) ); + if( ec ) + co_return; + // header contains exactly 16 bytes } @endcode - @see IoAwaitable, MutableBufferSequence, awaitable_decomposes_to + @see ReadStream, IoAwaitable, MutableBufferSequence, + awaitable_decomposes_to */ template concept ReadSource = + ReadStream && requires(T& source, mutable_buffer_archetype buffers) { { source.read(buffers) } -> IoAwaitable; diff --git a/include/boost/capy/concept/read_stream.hpp b/include/boost/capy/concept/read_stream.hpp index 108b42dc..0a321b6d 100644 --- a/include/boost/capy/concept/read_stream.hpp +++ b/include/boost/capy/concept/read_stream.hpp @@ -50,6 +50,11 @@ namespace capy { Buffers in the sequence are filled completely before proceeding to the next buffer. + @par Design Rationale + The requirement that `n` is 0 whenever `ec` is set follows + from a consistency constraint with the empty-buffer rule. + See the ReadStream design document for a complete derivation. + @par Buffer Lifetime The caller must ensure that the memory referenced by `buffers` remains valid until the `co_await` expression returns. diff --git a/include/boost/capy/concept/write_sink.hpp b/include/boost/capy/concept/write_sink.hpp index 44c058c3..4c008738 100644 --- a/include/boost/capy/concept/write_sink.hpp +++ b/include/boost/capy/concept/write_sink.hpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -22,57 +23,58 @@ namespace boost { namespace capy { -/** Concept for types that provide awaitable write operations to a sink. +/** Concept for types providing complete writes with EOF signaling. - A type satisfies `WriteSink` if it provides `write` and `write_eof` - member functions that are @ref IoAwaitable and whose return values - decompose to `(error_code)` or `(error_code,std::size_t)`. + A type satisfies `WriteSink` if it satisfies @ref WriteStream + and additionally provides `write`, `write_eof(buffers)`, and + `write_eof()` member functions that are @ref IoAwaitable. - Use this concept when you need to consume data asynchronously, such - as writing HTTP response bodies, streaming file contents, or piping - data through transformations like compression. + `WriteSink` refines `WriteStream`. Every `WriteSink` is a + `WriteStream`. Algorithms constrained on `WriteStream` accept + both raw streams and sinks. @tparam T The sink type. @par Syntactic Requirements + @li `T` must satisfy @ref WriteStream (provides `write_some`) @li `T` must provide a `write` member function template accepting any @ref ConstBufferSequence, returning an awaitable that decomposes to `(error_code,std::size_t)` - @li `T` must provide a `write` member function template accepting - any @ref ConstBufferSequence and a `bool eof` parameter, - returning an awaitable that decomposes to `(error_code,std::size_t)` - @li `T` must provide a `write_eof` member function taking no arguments, - returning an awaitable that decomposes to `(error_code)` + @li `T` must provide a `write_eof` member function template + accepting any @ref ConstBufferSequence, returning an awaitable + that decomposes to `(error_code,std::size_t)` + @li `T` must provide a `write_eof` member function taking no + arguments, returning an awaitable that decomposes to + `(error_code)` @li All return types must satisfy @ref IoAwaitable @par Semantic Requirements - The `write` operation consumes data from the buffer sequence: + The inherited `write_some` operation writes one or more bytes + (partial write). See @ref WriteStream. + + The `write` operation consumes the entire buffer sequence: - @li On success: `ec` is `false`, and all bytes from the buffer - sequence have been consumed. - @li On error: `ec` is `true`. + @li On success: `!ec`, and `n` equals `buffer_size( buffers )`. + @li On error: `ec`, and `n` indicates the number of bytes + written before the error. - The `write` operation with `eof` combines data writing with end-of-stream - signaling: + The `write_eof(buffers)` operation writes the entire buffer + sequence and signals end-of-stream atomically: - @li If `eof` is `false`, behaves identically to `write(buffers)`. - @li If `eof` is `true`, writes the data and then finalizes the sink - as if `write_eof()` were called. - @li On success: `ec` is `false`, and `n` indicates the number - of bytes written from the caller's buffer. - @li On error: `ec` is `true`, and `n` indicates the number of - bytes written from the caller's buffer before the error occurred. + @li On success: `!ec`, `n` equals `buffer_size( buffers )`, + and the sink is finalized. + @li On error: `ec`, and `n` indicates the number of bytes + written before the error. - The `write_eof` operation signals that no more data will be written: + The `write_eof()` operation signals end-of-stream with no data: - @li On success: `ec` is `false`, and the sink is finalized. - @li On error: `ec` is `true`. + @li On success: `!ec`, and the sink is finalized. + @li On error: `ec`. - After `write_eof` returns successfully, or after `write(buffers, true)` - returns successfully, no further calls to `write` or `write_eof` are - permitted. + After `write_eof` (either overload) returns successfully, no + further writes or EOF signals are permitted. @par Buffer Lifetime @@ -82,11 +84,14 @@ namespace capy { @par Conforming Signatures @code + template< ConstBufferSequence Buffers > + IoAwaitable auto write_some( Buffers buffers ); // inherited + template< ConstBufferSequence Buffers > IoAwaitable auto write( Buffers buffers ); template< ConstBufferSequence Buffers > - IoAwaitable auto write( Buffers buffers, bool eof ); + IoAwaitable auto write_eof( Buffers buffers ); IoAwaitable auto write_eof(); @endcode @@ -105,33 +110,38 @@ namespace capy { template< WriteSink Sink > task<> send_body( Sink& sink, std::string_view data ) { - auto [ec, n] = co_await sink.write( make_buffer( data ) ); - if( ec ) - co_return; - auto [ec2] = co_await sink.write_eof(); + // Atomic: write all data and signal EOF + auto [ec, n] = co_await sink.write_eof( + make_buffer( data ) ); } - // Or equivalently using the combined overload: + // Or separately: template< WriteSink Sink > task<> send_body2( Sink& sink, std::string_view data ) { - auto [ec, n] = co_await sink.write( make_buffer( data ), true ); + auto [ec, n] = co_await sink.write( + make_buffer( data ) ); + if( ec ) + co_return; + auto [ec2] = co_await sink.write_eof(); } @endcode - @see IoAwaitable, ConstBufferSequence, awaitable_decomposes_to + @see WriteStream, IoAwaitable, ConstBufferSequence, + awaitable_decomposes_to */ template concept WriteSink = - requires(T& sink, const_buffer_archetype buffers, bool eof) + WriteStream && + requires(T& sink, const_buffer_archetype buffers) { { sink.write(buffers) } -> IoAwaitable; requires awaitable_decomposes_to< decltype(sink.write(buffers)), std::error_code, std::size_t>; - { sink.write(buffers, eof) } -> IoAwaitable; + { sink.write_eof(buffers) } -> IoAwaitable; requires awaitable_decomposes_to< - decltype(sink.write(buffers, eof)), + decltype(sink.write_eof(buffers)), std::error_code, std::size_t>; { sink.write_eof() } -> IoAwaitable; requires awaitable_decomposes_to< diff --git a/include/boost/capy/cond.hpp b/include/boost/capy/cond.hpp index 658e59b7..b15042fd 100644 --- a/include/boost/capy/cond.hpp +++ b/include/boost/capy/cond.hpp @@ -31,7 +31,7 @@ namespace capy { // handle cancellation else if( ec == cond::eof ) // handle end of stream - else if( ec.failed() ) + else if( ec ) // handle other errors @endcode diff --git a/include/boost/capy/io/any_buffer_sink.hpp b/include/boost/capy/io/any_buffer_sink.hpp index fba850e6..6ff4a2d9 100644 --- a/include/boost/capy/io/any_buffer_sink.hpp +++ b/include/boost/capy/io/any_buffer_sink.hpp @@ -21,13 +21,14 @@ #include #include #include -#include +#include #include #include #include #include #include +#include #include #include #include @@ -42,13 +43,33 @@ namespace capy { buffer sink operations. It uses cached awaitable storage to achieve zero steady-state allocation after construction. - The wrapper also satisfies @ref WriteSink through templated - @ref write methods. These methods copy data from the caller's - buffers into the sink's internal storage, incurring one extra - buffer copy compared to using @ref prepare and @ref commit - directly. + The wrapper exposes two interfaces for producing data: + the @ref BufferSink interface (`prepare`, `commit`, `commit_eof`) + and the @ref WriteSink interface (`write_some`, `write`, + `write_eof`). Choose the interface that matches how your data + is produced: + + @par Choosing an Interface + + Use the **BufferSink** interface when you are a generator that + produces data into externally-provided buffers. The sink owns + the memory; you call @ref prepare to obtain writable buffers, + fill them, then call @ref commit or @ref commit_eof. + + Use the **WriteSink** interface when you already have buffers + containing the data to write: + - If the entire body is available up front, call + @ref write_eof(buffers) to send everything atomically. + - If data arrives incrementally, call @ref write or + @ref write_some in a loop, then @ref write_eof() when done. + Prefer `write` (complete) unless your streaming pattern + benefits from partial writes via `write_some`. + + If the wrapped type only satisfies @ref BufferSink, the + @ref WriteSink operations are provided automatically. + + @par Construction Modes - The wrapper supports two construction modes: - **Owning**: Pass by value to transfer ownership. The wrapper allocates storage and owns the sink. - **Reference**: Pass a pointer to wrap without ownership. The @@ -73,11 +94,20 @@ namespace capy { some_buffer_sink sink; any_buffer_sink abs(&sink); + // BufferSink interface: generate into callee-owned buffers mutable_buffer arr[16]; auto bufs = abs.prepare(arr); // Write data into bufs[0..bufs.size()) auto [ec] = co_await abs.commit(bytes_written); - auto [ec2] = co_await abs.commit_eof(); + auto [ec2] = co_await abs.commit_eof(0); + + // WriteSink interface: send caller-owned buffers + auto [ec3, n] = co_await abs.write(make_buffer("hello", 5)); + auto [ec4] = co_await abs.write_eof(); + + // Or send everything at once + auto [ec5, n2] = co_await abs.write_eof( + make_buffer(body_data)); @endcode @see any_buffer_source, BufferSink, WriteSink @@ -86,15 +116,18 @@ class any_buffer_sink { struct vtable; struct awaitable_ops; + struct write_awaitable_ops; template struct vtable_for_impl; + // hot-path members first for cache locality void* sink_ = nullptr; vtable const* vt_ = nullptr; void* cached_awaitable_ = nullptr; - void* storage_ = nullptr; awaitable_ops const* active_ops_ = nullptr; + write_awaitable_ops const* active_write_ops_ = nullptr; + void* storage_ = nullptr; public: /** Destructor. @@ -130,8 +163,9 @@ class any_buffer_sink : sink_(std::exchange(other.sink_, nullptr)) , vt_(std::exchange(other.vt_, nullptr)) , cached_awaitable_(std::exchange(other.cached_awaitable_, nullptr)) - , storage_(std::exchange(other.storage_, nullptr)) , active_ops_(std::exchange(other.active_ops_, nullptr)) + , active_write_ops_(std::exchange(other.active_write_ops_, nullptr)) + , storage_(std::exchange(other.storage_, nullptr)) { } @@ -149,7 +183,9 @@ class any_buffer_sink /** Construct by taking ownership of a BufferSink. Allocates storage and moves the sink into this wrapper. - The wrapper owns the sink and will destroy it. + The wrapper owns the sink and will destroy it. If `S` also + satisfies @ref WriteSink, native write operations are + forwarded through the virtual boundary. @param s The sink to take ownership of. */ @@ -160,7 +196,9 @@ class any_buffer_sink /** Construct by wrapping a BufferSink without ownership. Wraps the given sink by pointer. The sink must remain - valid for the lifetime of this wrapper. + valid for the lifetime of this wrapper. If `S` also + satisfies @ref WriteSink, native write operations are + forwarded through the virtual boundary. @param s Pointer to the sink to wrap. */ @@ -221,14 +259,13 @@ class any_buffer_sink auto commit(std::size_t n); - /** Commit bytes written with optional end-of-stream. + /** Commit final bytes and signal end-of-stream. Commits `n` bytes written to the buffers returned by the - most recent call to @ref prepare. If `eof` is true, also - signals end-of-stream. + most recent call to @ref prepare and finalizes the sink. + After success, no further operations are permitted. @param n The number of bytes to commit. - @param eof If true, signals end-of-stream after committing. @return An awaitable yielding `(error_code)`. @@ -236,30 +273,36 @@ class any_buffer_sink The wrapper must contain a valid sink (`has_value() == true`). */ auto - commit(std::size_t n, bool eof); + commit_eof(std::size_t n); - /** Signal end-of-stream. + /** Write some data from a buffer sequence. - Indicates that no more data will be written to the sink. - The operation completes when the sink is finalized, or - an error occurs. + Writes one or more bytes from the buffer sequence to the + underlying sink. May consume less than the full sequence. - @return An awaitable yielding `(error_code)`. + When the wrapped type provides native @ref WriteSink support, + the operation forwards directly. Otherwise it is synthesized + from @ref prepare and @ref commit with a buffer copy. + + @param buffers The buffer sequence to write. + + @return An awaitable yielding `(error_code,std::size_t)`. @par Preconditions The wrapper must contain a valid sink (`has_value() == true`). */ - auto - commit_eof(); + template + io_task + write_some(CB buffers); - /** Write data from a buffer sequence. + /** Write all data from a buffer sequence. Writes all data from the buffer sequence to the underlying sink. This method satisfies the @ref WriteSink concept. - @note This operation copies data from the caller's buffers - into the sink's internal buffers. For zero-copy writes, - use @ref prepare and @ref commit directly. + When the wrapped type provides native @ref WriteSink support, + each window is forwarded directly. Otherwise the data is + copied into the sink via @ref prepare and @ref commit. @param buffers The buffer sequence to write. @@ -269,21 +312,20 @@ class any_buffer_sink The wrapper must contain a valid sink (`has_value() == true`). */ template - task> + io_task write(CB buffers); - /** Write data with optional end-of-stream. + /** Atomically write data and signal end-of-stream. Writes all data from the buffer sequence to the underlying - sink, optionally finalizing it afterwards. This method - satisfies the @ref WriteSink concept. + sink and then signals end-of-stream. - @note This operation copies data from the caller's buffers - into the sink's internal buffers. For zero-copy writes, - use @ref prepare and @ref commit directly. + When the wrapped type provides native @ref WriteSink support, + the final window is sent atomically via the underlying + `write_eof(buffers)`. Otherwise the data is synthesized + through @ref prepare, @ref commit, and @ref commit_eof. @param buffers The buffer sequence to write. - @param eof If true, finalize the sink after writing. @return An awaitable yielding `(error_code,std::size_t)`. @@ -291,14 +333,18 @@ class any_buffer_sink The wrapper must contain a valid sink (`has_value() == true`). */ template - task> - write(CB buffers, bool eof); + io_task + write_eof(CB buffers); /** Signal end-of-stream. Indicates that no more data will be written to the sink. This method satisfies the @ref WriteSink concept. + When the wrapped type provides native @ref WriteSink support, + the underlying `write_eof()` is called. Otherwise the + operation is implemented as `commit_eof(0)`. + @return An awaitable yielding `(error_code)`. @par Preconditions @@ -328,10 +374,36 @@ class any_buffer_sink std::terminate(); sink_ = &new_sink; } + +private: + /** Forward a partial write through the vtable. + + Constructs the underlying `write_some` awaitable in + cached storage and returns a type-erased awaitable. + */ + auto + write_some_(std::span buffers); + + /** Forward a complete write through the vtable. + + Constructs the underlying `write` awaitable in + cached storage and returns a type-erased awaitable. + */ + auto + write_(std::span buffers); + + /** Forward an atomic write-with-EOF through the vtable. + + Constructs the underlying `write_eof(buffers)` awaitable + in cached storage and returns a type-erased awaitable. + */ + auto + write_eof_buffers_(std::span buffers); }; //---------------------------------------------------------- +/** Type-erased ops for awaitables yielding `io_result<>`. */ struct any_buffer_sink::awaitable_ops { bool (*await_ready)(void*); @@ -340,6 +412,15 @@ struct any_buffer_sink::awaitable_ops void (*destroy)(void*) noexcept; }; +/** Type-erased ops for awaitables yielding `io_result`. */ +struct any_buffer_sink::write_awaitable_ops +{ + bool (*await_ready)(void*); + coro (*await_suspend)(void*, coro, executor_ref, std::stop_token); + io_result (*await_resume)(void*); + void (*destroy)(void*) noexcept; +}; + struct any_buffer_sink::vtable { void (*destroy)(void*) noexcept; @@ -351,9 +432,26 @@ struct any_buffer_sink::vtable awaitable_ops const* (*construct_commit_awaitable)( void* sink, void* storage, - std::size_t n, - bool eof); - awaitable_ops const* (*construct_eof_awaitable)( + std::size_t n); + awaitable_ops const* (*construct_commit_eof_awaitable)( + void* sink, + void* storage, + std::size_t n); + + // WriteSink forwarding (null when wrapped type is BufferSink-only) + write_awaitable_ops const* (*construct_write_some_awaitable)( + void* sink, + void* storage, + std::span buffers); + write_awaitable_ops const* (*construct_write_awaitable)( + void* sink, + void* storage, + std::span buffers); + write_awaitable_ops const* (*construct_write_eof_buffers_awaitable)( + void* sink, + void* storage, + std::span buffers); + awaitable_ops const* (*construct_write_eof_awaitable)( void* sink, void* storage); }; @@ -362,8 +460,9 @@ template struct any_buffer_sink::vtable_for_impl { using CommitAwaitable = decltype(std::declval().commit( - std::size_t{}, false)); - using EofAwaitable = decltype(std::declval().commit_eof()); + std::size_t{})); + using CommitEofAwaitable = decltype(std::declval().commit_eof( + std::size_t{})); static void do_destroy_impl(void* sink) noexcept @@ -384,11 +483,10 @@ struct any_buffer_sink::vtable_for_impl construct_commit_awaitable_impl( void* sink, void* storage, - std::size_t n, - bool eof) + std::size_t n) { auto& s = *static_cast(sink); - ::new(storage) CommitAwaitable(s.commit(n, eof)); + ::new(storage) CommitAwaitable(s.commit(n)); static constexpr awaitable_ops ops = { +[](void* p) { @@ -409,49 +507,233 @@ struct any_buffer_sink::vtable_for_impl } static awaitable_ops const* - construct_eof_awaitable_impl( + construct_commit_eof_awaitable_impl( + void* sink, + void* storage, + std::size_t n) + { + auto& s = *static_cast(sink); + ::new(storage) CommitEofAwaitable(s.commit_eof(n)); + + static constexpr awaitable_ops ops = { + +[](void* p) { + return static_cast(p)->await_ready(); + }, + +[](void* p, coro h, executor_ref ex, std::stop_token token) { + return detail::call_await_suspend( + static_cast(p), h, ex, token); + }, + +[](void* p) { + return static_cast(p)->await_resume(); + }, + +[](void* p) noexcept { + static_cast(p)->~CommitEofAwaitable(); + } + }; + return &ops; + } + + //------------------------------------------------------ + // WriteSink forwarding (only instantiated when WriteSink) + + static write_awaitable_ops const* + construct_write_some_awaitable_impl( + void* sink, + void* storage, + std::span buffers) + requires WriteSink + { + using Aw = decltype(std::declval().write_some( + std::span{})); + auto& s = *static_cast(sink); + ::new(storage) Aw(s.write_some(buffers)); + + static constexpr write_awaitable_ops ops = { + +[](void* p) { + return static_cast(p)->await_ready(); + }, + +[](void* p, coro h, executor_ref ex, std::stop_token token) { + return detail::call_await_suspend( + static_cast(p), h, ex, token); + }, + +[](void* p) { + return static_cast(p)->await_resume(); + }, + +[](void* p) noexcept { + static_cast(p)->~Aw(); + } + }; + return &ops; + } + + static write_awaitable_ops const* + construct_write_awaitable_impl( + void* sink, + void* storage, + std::span buffers) + requires WriteSink + { + using Aw = decltype(std::declval().write( + std::span{})); + auto& s = *static_cast(sink); + ::new(storage) Aw(s.write(buffers)); + + static constexpr write_awaitable_ops ops = { + +[](void* p) { + return static_cast(p)->await_ready(); + }, + +[](void* p, coro h, executor_ref ex, std::stop_token token) { + return detail::call_await_suspend( + static_cast(p), h, ex, token); + }, + +[](void* p) { + return static_cast(p)->await_resume(); + }, + +[](void* p) noexcept { + static_cast(p)->~Aw(); + } + }; + return &ops; + } + + static write_awaitable_ops const* + construct_write_eof_buffers_awaitable_impl( + void* sink, + void* storage, + std::span buffers) + requires WriteSink + { + using Aw = decltype(std::declval().write_eof( + std::span{})); + auto& s = *static_cast(sink); + ::new(storage) Aw(s.write_eof(buffers)); + + static constexpr write_awaitable_ops ops = { + +[](void* p) { + return static_cast(p)->await_ready(); + }, + +[](void* p, coro h, executor_ref ex, std::stop_token token) { + return detail::call_await_suspend( + static_cast(p), h, ex, token); + }, + +[](void* p) { + return static_cast(p)->await_resume(); + }, + +[](void* p) noexcept { + static_cast(p)->~Aw(); + } + }; + return &ops; + } + + static awaitable_ops const* + construct_write_eof_awaitable_impl( void* sink, void* storage) + requires WriteSink { + using Aw = decltype(std::declval().write_eof()); auto& s = *static_cast(sink); - ::new(storage) EofAwaitable(s.commit_eof()); + ::new(storage) Aw(s.write_eof()); static constexpr awaitable_ops ops = { +[](void* p) { - return static_cast(p)->await_ready(); + return static_cast(p)->await_ready(); }, +[](void* p, coro h, executor_ref ex, std::stop_token token) { return detail::call_await_suspend( - static_cast(p), h, ex, token); + static_cast(p), h, ex, token); }, +[](void* p) { - return static_cast(p)->await_resume(); + return static_cast(p)->await_resume(); }, +[](void* p) noexcept { - static_cast(p)->~EofAwaitable(); + static_cast(p)->~Aw(); } }; return &ops; } - static constexpr std::size_t max_awaitable_size = - sizeof(CommitAwaitable) > sizeof(EofAwaitable) + //------------------------------------------------------ + + static consteval std::size_t + compute_max_size() noexcept + { + std::size_t s = sizeof(CommitAwaitable) > sizeof(CommitEofAwaitable) ? sizeof(CommitAwaitable) - : sizeof(EofAwaitable); + : sizeof(CommitEofAwaitable); + if constexpr (WriteSink) + { + using WS = decltype(std::declval().write_some( + std::span{})); + using W = decltype(std::declval().write( + std::span{})); + using WEB = decltype(std::declval().write_eof( + std::span{})); + using WE = decltype(std::declval().write_eof()); + + if(sizeof(WS) > s) s = sizeof(WS); + if(sizeof(W) > s) s = sizeof(W); + if(sizeof(WEB) > s) s = sizeof(WEB); + if(sizeof(WE) > s) s = sizeof(WE); + } + return s; + } - static constexpr std::size_t max_awaitable_align = - alignof(CommitAwaitable) > alignof(EofAwaitable) + static consteval std::size_t + compute_max_align() noexcept + { + std::size_t a = alignof(CommitAwaitable) > alignof(CommitEofAwaitable) ? alignof(CommitAwaitable) - : alignof(EofAwaitable); - - static constexpr vtable value = { - &do_destroy_impl, - &do_prepare_impl, - max_awaitable_size, - max_awaitable_align, - &construct_commit_awaitable_impl, - &construct_eof_awaitable_impl - }; + : alignof(CommitEofAwaitable); + if constexpr (WriteSink) + { + using WS = decltype(std::declval().write_some( + std::span{})); + using W = decltype(std::declval().write( + std::span{})); + using WEB = decltype(std::declval().write_eof( + std::span{})); + using WE = decltype(std::declval().write_eof()); + + if(alignof(WS) > a) a = alignof(WS); + if(alignof(W) > a) a = alignof(W); + if(alignof(WEB) > a) a = alignof(WEB); + if(alignof(WE) > a) a = alignof(WE); + } + return a; + } + + static consteval vtable + make_vtable() noexcept + { + vtable v{}; + v.destroy = &do_destroy_impl; + v.do_prepare = &do_prepare_impl; + v.awaitable_size = compute_max_size(); + v.awaitable_align = compute_max_align(); + v.construct_commit_awaitable = &construct_commit_awaitable_impl; + v.construct_commit_eof_awaitable = &construct_commit_eof_awaitable_impl; + v.construct_write_some_awaitable = nullptr; + v.construct_write_awaitable = nullptr; + v.construct_write_eof_buffers_awaitable = nullptr; + v.construct_write_eof_awaitable = nullptr; + + if constexpr (WriteSink) + { + v.construct_write_some_awaitable = + &construct_write_some_awaitable_impl; + v.construct_write_awaitable = + &construct_write_awaitable_impl; + v.construct_write_eof_buffers_awaitable = + &construct_write_eof_buffers_awaitable_impl; + v.construct_write_eof_awaitable = + &construct_write_eof_awaitable_impl; + } + return v; + } + + static constexpr vtable value = make_vtable(); }; //---------------------------------------------------------- @@ -485,6 +767,7 @@ any_buffer_sink::operator=(any_buffer_sink&& other) noexcept cached_awaitable_ = std::exchange(other.cached_awaitable_, nullptr); storage_ = std::exchange(other.storage_, nullptr); active_ops_ = std::exchange(other.active_ops_, nullptr); + active_write_ops_ = std::exchange(other.active_write_ops_, nullptr); } return *this; } @@ -510,7 +793,6 @@ any_buffer_sink::any_buffer_sink(S s) storage_ = ::operator new(sizeof(S)); sink_ = ::new(storage_) S(std::move(s)); - // Preallocate the awaitable storage (sized for max of commit/eof) cached_awaitable_ = ::operator new(vt_->awaitable_size); g.committed = true; @@ -521,7 +803,6 @@ any_buffer_sink::any_buffer_sink(S* s) : sink_(s) , vt_(&vtable_for_impl::value) { - // Preallocate the awaitable storage (sized for max of commit/eof) cached_awaitable_ = ::operator new(vt_->awaitable_size); } @@ -534,35 +815,68 @@ any_buffer_sink::prepare(std::span dest) } inline auto -any_buffer_sink::commit(std::size_t n, bool eof) +any_buffer_sink::commit(std::size_t n) { struct awaitable { any_buffer_sink* self_; std::size_t n_; - bool eof_; bool - await_ready() const noexcept + await_ready() { - return false; + self_->active_ops_ = self_->vt_->construct_commit_awaitable( + self_->sink_, + self_->cached_awaitable_, + n_); + return self_->active_ops_->await_ready(self_->cached_awaitable_); } coro await_suspend(coro h, executor_ref ex, std::stop_token token) { - // Construct the underlying awaitable into cached storage - self_->active_ops_ = self_->vt_->construct_commit_awaitable( + return self_->active_ops_->await_suspend( + self_->cached_awaitable_, h, ex, token); + } + + io_result<> + await_resume() + { + struct guard { + any_buffer_sink* self; + ~guard() { + self->active_ops_->destroy(self->cached_awaitable_); + self->active_ops_ = nullptr; + } + } g{self_}; + return self_->active_ops_->await_resume( + self_->cached_awaitable_); + } + }; + return awaitable{this, n}; +} + +inline auto +any_buffer_sink::commit_eof(std::size_t n) +{ + struct awaitable + { + any_buffer_sink* self_; + std::size_t n_; + + bool + await_ready() + { + self_->active_ops_ = self_->vt_->construct_commit_eof_awaitable( self_->sink_, self_->cached_awaitable_, - n_, - eof_); - - // Check if underlying is immediately ready - if(self_->active_ops_->await_ready(self_->cached_awaitable_)) - return h; + n_); + return self_->active_ops_->await_ready(self_->cached_awaitable_); + } - // Forward to underlying awaitable + coro + await_suspend(coro h, executor_ref ex, std::stop_token token) + { return self_->active_ops_->await_suspend( self_->cached_awaitable_, h, ex, token); } @@ -581,21 +895,70 @@ any_buffer_sink::commit(std::size_t n, bool eof) self_->cached_awaitable_); } }; - return awaitable{this, n, eof}; + return awaitable{this, n}; } +//---------------------------------------------------------- +// Private helpers for native WriteSink forwarding + inline auto -any_buffer_sink::commit(std::size_t n) +any_buffer_sink::write_some_( + std::span buffers) { - return commit(n, false); + struct awaitable + { + any_buffer_sink* self_; + std::span buffers_; + + bool + await_ready() const noexcept + { + return false; + } + + coro + await_suspend(coro h, executor_ref ex, std::stop_token token) + { + self_->active_write_ops_ = + self_->vt_->construct_write_some_awaitable( + self_->sink_, + self_->cached_awaitable_, + buffers_); + + if(self_->active_write_ops_->await_ready( + self_->cached_awaitable_)) + return h; + + return self_->active_write_ops_->await_suspend( + self_->cached_awaitable_, h, ex, token); + } + + io_result + await_resume() + { + struct guard { + any_buffer_sink* self; + ~guard() { + self->active_write_ops_->destroy( + self->cached_awaitable_); + self->active_write_ops_ = nullptr; + } + } g{self_}; + return self_->active_write_ops_->await_resume( + self_->cached_awaitable_); + } + }; + return awaitable{this, buffers}; } inline auto -any_buffer_sink::commit_eof() +any_buffer_sink::write_( + std::span buffers) { struct awaitable { any_buffer_sink* self_; + std::span buffers_; bool await_ready() const noexcept @@ -606,53 +969,150 @@ any_buffer_sink::commit_eof() coro await_suspend(coro h, executor_ref ex, std::stop_token token) { - // Construct the underlying awaitable into cached storage - self_->active_ops_ = self_->vt_->construct_eof_awaitable( - self_->sink_, + self_->active_write_ops_ = + self_->vt_->construct_write_awaitable( + self_->sink_, + self_->cached_awaitable_, + buffers_); + + if(self_->active_write_ops_->await_ready( + self_->cached_awaitable_)) + return h; + + return self_->active_write_ops_->await_suspend( + self_->cached_awaitable_, h, ex, token); + } + + io_result + await_resume() + { + struct guard { + any_buffer_sink* self; + ~guard() { + self->active_write_ops_->destroy( + self->cached_awaitable_); + self->active_write_ops_ = nullptr; + } + } g{self_}; + return self_->active_write_ops_->await_resume( self_->cached_awaitable_); + } + }; + return awaitable{this, buffers}; +} + +inline auto +any_buffer_sink::write_eof_buffers_( + std::span buffers) +{ + struct awaitable + { + any_buffer_sink* self_; + std::span buffers_; - // Check if underlying is immediately ready - if(self_->active_ops_->await_ready(self_->cached_awaitable_)) + bool + await_ready() const noexcept + { + return false; + } + + coro + await_suspend(coro h, executor_ref ex, std::stop_token token) + { + self_->active_write_ops_ = + self_->vt_->construct_write_eof_buffers_awaitable( + self_->sink_, + self_->cached_awaitable_, + buffers_); + + if(self_->active_write_ops_->await_ready( + self_->cached_awaitable_)) return h; - // Forward to underlying awaitable - return self_->active_ops_->await_suspend( + return self_->active_write_ops_->await_suspend( self_->cached_awaitable_, h, ex, token); } - io_result<> + io_result await_resume() { struct guard { any_buffer_sink* self; ~guard() { - self->active_ops_->destroy(self->cached_awaitable_); - self->active_ops_ = nullptr; + self->active_write_ops_->destroy( + self->cached_awaitable_); + self->active_write_ops_ = nullptr; } } g{self_}; - return self_->active_ops_->await_resume( + return self_->active_write_ops_->await_resume( self_->cached_awaitable_); } }; - return awaitable{this}; + return awaitable{this, buffers}; } //---------------------------------------------------------- +// Public WriteSink methods template -task> -any_buffer_sink::write(CB buffers) +io_task +any_buffer_sink::write_some(CB buffers) { - return write(buffers, false); + buffer_param bp(buffers); + auto src = bp.data(); + if(src.empty()) + co_return {{}, 0}; + + // Native WriteSink path + if(vt_->construct_write_some_awaitable) + co_return co_await write_some_(src); + + // Synthesized path: prepare + buffer_copy + commit + mutable_buffer arr[detail::max_iovec_]; + auto dst_bufs = prepare(arr); + if(dst_bufs.empty()) + { + auto [ec] = co_await commit(0); + if(ec) + co_return {ec, 0}; + dst_bufs = prepare(arr); + if(dst_bufs.empty()) + co_return {{}, 0}; + } + + auto n = buffer_copy(dst_bufs, src); + auto [ec] = co_await commit(n); + if(ec) + co_return {ec, 0}; + co_return {{}, n}; } template -task> -any_buffer_sink::write(CB buffers, bool eof) +io_task +any_buffer_sink::write(CB buffers) { buffer_param bp(buffers); std::size_t total = 0; + // Native WriteSink path + if(vt_->construct_write_awaitable) + { + for(;;) + { + auto bufs = bp.data(); + if(bufs.empty()) + break; + + auto [ec, n] = co_await write_(bufs); + total += n; + if(ec) + co_return {ec, total}; + bp.consume(n); + } + co_return {{}, total}; + } + + // Synthesized path: prepare + buffer_copy + commit for(;;) { auto src = bp.data(); @@ -677,24 +1137,137 @@ any_buffer_sink::write(CB buffers, bool eof) total += n; } - if(eof) - { - auto [ec] = co_await commit_eof(); - if(ec) - co_return {ec, total}; - } - co_return {{}, total}; } inline auto any_buffer_sink::write_eof() { - return commit_eof(); + struct awaitable + { + any_buffer_sink* self_; + + bool + await_ready() + { + if(self_->vt_->construct_write_eof_awaitable) + { + // Native WriteSink: forward to underlying write_eof() + self_->active_ops_ = + self_->vt_->construct_write_eof_awaitable( + self_->sink_, + self_->cached_awaitable_); + } + else + { + // Synthesized: commit_eof(0) + self_->active_ops_ = + self_->vt_->construct_commit_eof_awaitable( + self_->sink_, + self_->cached_awaitable_, + 0); + } + return self_->active_ops_->await_ready( + self_->cached_awaitable_); + } + + coro + await_suspend(coro h, executor_ref ex, std::stop_token token) + { + return self_->active_ops_->await_suspend( + self_->cached_awaitable_, h, ex, token); + } + + io_result<> + await_resume() + { + struct guard { + any_buffer_sink* self; + ~guard() { + self->active_ops_->destroy(self->cached_awaitable_); + self->active_ops_ = nullptr; + } + } g{self_}; + return self_->active_ops_->await_resume( + self_->cached_awaitable_); + } + }; + return awaitable{this}; +} + +template +io_task +any_buffer_sink::write_eof(CB buffers) +{ + // Native WriteSink path + if(vt_->construct_write_eof_buffers_awaitable) + { + const_buffer_param bp(buffers); + std::size_t total = 0; + + for(;;) + { + auto bufs = bp.data(); + if(bufs.empty()) + { + auto [ec] = co_await write_eof(); + co_return {ec, total}; + } + + if(!bp.more()) + { + // Last window: send atomically with EOF + auto [ec, n] = co_await write_eof_buffers_(bufs); + total += n; + co_return {ec, total}; + } + + auto [ec, n] = co_await write_(bufs); + total += n; + if(ec) + co_return {ec, total}; + bp.consume(n); + } + } + + // Synthesized path: prepare + buffer_copy + commit + commit_eof + buffer_param bp(buffers); + std::size_t total = 0; + + for(;;) + { + auto src = bp.data(); + if(src.empty()) + break; + + mutable_buffer arr[detail::max_iovec_]; + auto dst_bufs = prepare(arr); + if(dst_bufs.empty()) + { + auto [ec] = co_await commit(0); + if(ec) + co_return {ec, total}; + continue; + } + + auto n = buffer_copy(dst_bufs, src); + auto [ec] = co_await commit(n); + if(ec) + co_return {ec, total}; + bp.consume(n); + total += n; + } + + auto [ec] = co_await commit_eof(0); + if(ec) + co_return {ec, total}; + + co_return {{}, total}; } //---------------------------------------------------------- +static_assert(BufferSink); static_assert(WriteSink); } // namespace capy diff --git a/include/boost/capy/io/any_buffer_source.hpp b/include/boost/capy/io/any_buffer_source.hpp index 366cfc84..a905d975 100644 --- a/include/boost/capy/io/any_buffer_source.hpp +++ b/include/boost/capy/io/any_buffer_source.hpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -22,7 +23,7 @@ #include #include #include -#include +#include #include #include @@ -41,15 +42,15 @@ namespace capy { This class provides type erasure for any type satisfying the @ref BufferSource concept, enabling runtime polymorphism for - buffer pull operations. The wrapper also satisfies @ref ReadSource, - allowing it to be used with code expecting either interface. - It uses cached awaitable storage to achieve zero steady-state - allocation after construction. + buffer pull operations. It uses cached awaitable storage to achieve + zero steady-state allocation after construction. - The wrapper also satisfies @ref ReadSource through the templated - @ref read method. This method copies data from the source's - internal buffers into the caller's buffers, incurring one extra - buffer copy compared to using @ref pull and @ref consume directly. + The wrapper also satisfies @ref ReadSource. When the wrapped type + satisfies only @ref BufferSource, the read operations are + synthesized using @ref pull and @ref consume with an extra + buffer copy. When the wrapped type satisfies both @ref BufferSource + and @ref ReadSource, the native read operations are forwarded + directly across the virtual boundary, avoiding the copy. The wrapper supports two construction modes: - **Owning**: Pass by value to transfer ownership. The wrapper @@ -57,6 +58,15 @@ namespace capy { - **Reference**: Pass a pointer to wrap without ownership. The pointed-to source must outlive this wrapper. + Within each mode, the vtable is populated at compile time based + on whether the wrapped type also satisfies @ref ReadSource: + - **BufferSource only**: @ref read_some and @ref read are + synthesized from @ref pull and @ref consume, incurring one + buffer copy per operation. + - **BufferSource + ReadSource**: All read operations are + forwarded natively through the type-erased boundary with + no extra copy. + @par Awaitable Preallocation The constructor preallocates storage for the type-erased awaitable. This reserves all virtual address space at server startup @@ -78,6 +88,10 @@ namespace capy { const_buffer arr[16]; auto [ec, bufs] = co_await abs.pull(arr); + + // ReadSource interface also available + char buf[64]; + auto [ec2, n] = co_await abs.read_some(mutable_buffer(buf, 64)); @endcode @see any_buffer_sink, BufferSource, ReadSource @@ -86,15 +100,18 @@ class any_buffer_source { struct vtable; struct awaitable_ops; + struct read_awaitable_ops; template struct vtable_for_impl; + // hot-path members first for cache locality void* source_ = nullptr; vtable const* vt_ = nullptr; void* cached_awaitable_ = nullptr; - void* storage_ = nullptr; awaitable_ops const* active_ops_ = nullptr; + read_awaitable_ops const* active_read_ops_ = nullptr; + void* storage_ = nullptr; public: /** Destructor. @@ -130,8 +147,9 @@ class any_buffer_source : source_(std::exchange(other.source_, nullptr)) , vt_(std::exchange(other.vt_, nullptr)) , cached_awaitable_(std::exchange(other.cached_awaitable_, nullptr)) - , storage_(std::exchange(other.storage_, nullptr)) , active_ops_(std::exchange(other.active_ops_, nullptr)) + , active_read_ops_(std::exchange(other.active_read_ops_, nullptr)) + , storage_(std::exchange(other.storage_, nullptr)) { } @@ -149,7 +167,9 @@ class any_buffer_source /** Construct by taking ownership of a BufferSource. Allocates storage and moves the source into this wrapper. - The wrapper owns the source and will destroy it. + The wrapper owns the source and will destroy it. If `S` also + satisfies @ref ReadSource, native read operations are + forwarded through the virtual boundary. @param s The source to take ownership of. */ @@ -160,7 +180,9 @@ class any_buffer_source /** Construct by wrapping a BufferSource without ownership. Wraps the given source by pointer. The source must remain - valid for the lifetime of this wrapper. + valid for the lifetime of this wrapper. If `S` also + satisfies @ref ReadSource, native read operations are + forwarded through the virtual boundary. @param s Pointer to the source to wrap. */ @@ -214,24 +236,46 @@ class any_buffer_source @return An awaitable yielding `(error_code,std::span)`. On success with data, a non-empty span of filled buffers. - On success with empty span, source is exhausted. + On EOF, `ec == cond::eof` and span is empty. @par Preconditions The wrapper must contain a valid source (`has_value() == true`). + The caller must not call this function again after a prior + call returned an error. */ auto pull(std::span dest); - /** Read data into a mutable buffer sequence. + /** Read some data into a mutable buffer sequence. + + Reads one or more bytes into the caller's buffers. May fill + less than the full sequence. + + When the wrapped type provides native @ref ReadSource support, + the operation forwards directly. Otherwise it is synthesized + from @ref pull, @ref buffer_copy, and @ref consume. + + @param buffers The buffer sequence to fill. + + @return An awaitable yielding `(error_code,std::size_t)`. + + @par Preconditions + The wrapper must contain a valid source (`has_value() == true`). + The caller must not call this function again after a prior + call returned an error (including EOF). + + @see pull, consume + */ + template + io_task + read_some(MB buffers); - Fills the provided buffer sequence by pulling data from the - underlying source and copying it into the caller's buffers. - This satisfies @ref ReadSource but incurs a copy; for zero-copy - access, use @ref pull and @ref consume instead. + /** Read data into a mutable buffer sequence. - @note This operation copies data from the source's internal - buffers into the caller's buffers. For zero-copy reads, - use @ref pull and @ref consume directly. + Fills the provided buffer sequence completely. When the + wrapped type provides native @ref ReadSource support, each + window is forwarded directly. Otherwise the data is + synthesized from @ref pull, @ref buffer_copy, and @ref consume. @param buffers The buffer sequence to fill. @@ -241,11 +285,13 @@ class any_buffer_source @par Preconditions The wrapper must contain a valid source (`has_value() == true`). + The caller must not call this function again after a prior + call returned an error (including EOF). @see pull, consume */ template - task> + io_task read(MB buffers); protected: @@ -269,10 +315,28 @@ class any_buffer_source std::terminate(); source_ = &new_source; } + +private: + /** Forward a partial read through the vtable. + + Constructs the underlying `read_some` awaitable in + cached storage and returns a type-erased awaitable. + */ + auto + read_some_(std::span buffers); + + /** Forward a complete read through the vtable. + + Constructs the underlying `read` awaitable in + cached storage and returns a type-erased awaitable. + */ + auto + read_(std::span buffers); }; //---------------------------------------------------------- +/** Type-erased ops for awaitables yielding `io_result>`. */ struct any_buffer_source::awaitable_ops { bool (*await_ready)(void*); @@ -281,8 +345,18 @@ struct any_buffer_source::awaitable_ops void (*destroy)(void*) noexcept; }; +/** Type-erased ops for awaitables yielding `io_result`. */ +struct any_buffer_source::read_awaitable_ops +{ + bool (*await_ready)(void*); + coro (*await_suspend)(void*, coro, executor_ref, std::stop_token); + io_result (*await_resume)(void*); + void (*destroy)(void*) noexcept; +}; + struct any_buffer_source::vtable { + // BufferSource ops (always populated) void (*destroy)(void*) noexcept; void (*do_consume)(void* source, std::size_t n) noexcept; std::size_t awaitable_size; @@ -291,12 +365,22 @@ struct any_buffer_source::vtable void* source, void* storage, std::span dest); + + // ReadSource forwarding (null when wrapped type is BufferSource-only) + read_awaitable_ops const* (*construct_read_some_awaitable)( + void* source, + void* storage, + std::span buffers); + read_awaitable_ops const* (*construct_read_awaitable)( + void* source, + void* storage, + std::span buffers); }; template struct any_buffer_source::vtable_for_impl { - using Awaitable = decltype(std::declval().pull( + using PullAwaitable = decltype(std::declval().pull( std::declval>())); static void @@ -318,33 +402,148 @@ struct any_buffer_source::vtable_for_impl std::span dest) { auto& s = *static_cast(source); - ::new(storage) Awaitable(s.pull(dest)); + ::new(storage) PullAwaitable(s.pull(dest)); static constexpr awaitable_ops ops = { +[](void* p) { - return static_cast(p)->await_ready(); + return static_cast(p)->await_ready(); }, +[](void* p, coro h, executor_ref ex, std::stop_token token) { return detail::call_await_suspend( - static_cast(p), h, ex, token); + static_cast(p), h, ex, token); }, +[](void* p) { - return static_cast(p)->await_resume(); + return static_cast(p)->await_resume(); }, +[](void* p) noexcept { - static_cast(p)->~Awaitable(); + static_cast(p)->~PullAwaitable(); } }; return &ops; } - static constexpr vtable value = { - &do_destroy_impl, - &do_consume_impl, - sizeof(Awaitable), - alignof(Awaitable), - &construct_awaitable_impl - }; + //------------------------------------------------------ + // ReadSource forwarding (only instantiated when ReadSource) + + static read_awaitable_ops const* + construct_read_some_awaitable_impl( + void* source, + void* storage, + std::span buffers) + requires ReadSource + { + using Aw = decltype(std::declval().read_some( + std::span{})); + auto& s = *static_cast(source); + ::new(storage) Aw(s.read_some(buffers)); + + static constexpr read_awaitable_ops ops = { + +[](void* p) { + return static_cast(p)->await_ready(); + }, + +[](void* p, coro h, executor_ref ex, std::stop_token token) { + return detail::call_await_suspend( + static_cast(p), h, ex, token); + }, + +[](void* p) { + return static_cast(p)->await_resume(); + }, + +[](void* p) noexcept { + static_cast(p)->~Aw(); + } + }; + return &ops; + } + + static read_awaitable_ops const* + construct_read_awaitable_impl( + void* source, + void* storage, + std::span buffers) + requires ReadSource + { + using Aw = decltype(std::declval().read( + std::span{})); + auto& s = *static_cast(source); + ::new(storage) Aw(s.read(buffers)); + + static constexpr read_awaitable_ops ops = { + +[](void* p) { + return static_cast(p)->await_ready(); + }, + +[](void* p, coro h, executor_ref ex, std::stop_token token) { + return detail::call_await_suspend( + static_cast(p), h, ex, token); + }, + +[](void* p) { + return static_cast(p)->await_resume(); + }, + +[](void* p) noexcept { + static_cast(p)->~Aw(); + } + }; + return &ops; + } + + //------------------------------------------------------ + + static consteval std::size_t + compute_max_size() noexcept + { + std::size_t s = sizeof(PullAwaitable); + if constexpr (ReadSource) + { + using RS = decltype(std::declval().read_some( + std::span{})); + using R = decltype(std::declval().read( + std::span{})); + + if(sizeof(RS) > s) s = sizeof(RS); + if(sizeof(R) > s) s = sizeof(R); + } + return s; + } + + static consteval std::size_t + compute_max_align() noexcept + { + std::size_t a = alignof(PullAwaitable); + if constexpr (ReadSource) + { + using RS = decltype(std::declval().read_some( + std::span{})); + using R = decltype(std::declval().read( + std::span{})); + + if(alignof(RS) > a) a = alignof(RS); + if(alignof(R) > a) a = alignof(R); + } + return a; + } + + static consteval vtable + make_vtable() noexcept + { + vtable v{}; + v.destroy = &do_destroy_impl; + v.do_consume = &do_consume_impl; + v.awaitable_size = compute_max_size(); + v.awaitable_align = compute_max_align(); + v.construct_awaitable = &construct_awaitable_impl; + v.construct_read_some_awaitable = nullptr; + v.construct_read_awaitable = nullptr; + + if constexpr (ReadSource) + { + v.construct_read_some_awaitable = + &construct_read_some_awaitable_impl; + v.construct_read_awaitable = + &construct_read_awaitable_impl; + } + return v; + } + + static constexpr vtable value = make_vtable(); }; //---------------------------------------------------------- @@ -378,6 +577,7 @@ any_buffer_source::operator=(any_buffer_source&& other) noexcept cached_awaitable_ = std::exchange(other.cached_awaitable_, nullptr); storage_ = std::exchange(other.storage_, nullptr); active_ops_ = std::exchange(other.active_ops_, nullptr); + active_read_ops_ = std::exchange(other.active_read_ops_, nullptr); } return *this; } @@ -403,7 +603,6 @@ any_buffer_source::any_buffer_source(S s) storage_ = ::operator new(sizeof(S)); source_ = ::new(storage_) S(std::move(s)); - // Preallocate the awaitable storage cached_awaitable_ = ::operator new(vt_->awaitable_size); g.committed = true; @@ -414,7 +613,6 @@ any_buffer_source::any_buffer_source(S* s) : source_(s) , vt_(&vtable_for_impl::value) { - // Preallocate the awaitable storage cached_awaitable_ = ::operator new(vt_->awaitable_size); } @@ -435,25 +633,18 @@ any_buffer_source::pull(std::span dest) std::span dest_; bool - await_ready() const noexcept - { - return false; - } - - coro - await_suspend(coro h, executor_ref ex, std::stop_token token) + await_ready() { - // Construct the underlying awaitable into cached storage self_->active_ops_ = self_->vt_->construct_awaitable( self_->source_, self_->cached_awaitable_, dest_); + return self_->active_ops_->await_ready(self_->cached_awaitable_); + } - // Check if underlying is immediately ready - if(self_->active_ops_->await_ready(self_->cached_awaitable_)) - return h; - - // Forward to underlying awaitable + coro + await_suspend(coro h, executor_ref ex, std::stop_token token) + { return self_->active_ops_->await_suspend( self_->cached_awaitable_, h, ex, token); } @@ -475,28 +666,178 @@ any_buffer_source::pull(std::span dest) return awaitable{this, dest}; } +//---------------------------------------------------------- +// Private helpers for native ReadSource forwarding + +inline auto +any_buffer_source::read_some_( + std::span buffers) +{ + struct awaitable + { + any_buffer_source* self_; + std::span buffers_; + + bool + await_ready() const noexcept + { + return false; + } + + coro + await_suspend(coro h, executor_ref ex, std::stop_token token) + { + self_->active_read_ops_ = + self_->vt_->construct_read_some_awaitable( + self_->source_, + self_->cached_awaitable_, + buffers_); + + if(self_->active_read_ops_->await_ready( + self_->cached_awaitable_)) + return h; + + return self_->active_read_ops_->await_suspend( + self_->cached_awaitable_, h, ex, token); + } + + io_result + await_resume() + { + struct guard { + any_buffer_source* self; + ~guard() { + self->active_read_ops_->destroy( + self->cached_awaitable_); + self->active_read_ops_ = nullptr; + } + } g{self_}; + return self_->active_read_ops_->await_resume( + self_->cached_awaitable_); + } + }; + return awaitable{this, buffers}; +} + +inline auto +any_buffer_source::read_( + std::span buffers) +{ + struct awaitable + { + any_buffer_source* self_; + std::span buffers_; + + bool + await_ready() const noexcept + { + return false; + } + + coro + await_suspend(coro h, executor_ref ex, std::stop_token token) + { + self_->active_read_ops_ = + self_->vt_->construct_read_awaitable( + self_->source_, + self_->cached_awaitable_, + buffers_); + + if(self_->active_read_ops_->await_ready( + self_->cached_awaitable_)) + return h; + + return self_->active_read_ops_->await_suspend( + self_->cached_awaitable_, h, ex, token); + } + + io_result + await_resume() + { + struct guard { + any_buffer_source* self; + ~guard() { + self->active_read_ops_->destroy( + self->cached_awaitable_); + self->active_read_ops_ = nullptr; + } + } g{self_}; + return self_->active_read_ops_->await_resume( + self_->cached_awaitable_); + } + }; + return awaitable{this, buffers}; +} + +//---------------------------------------------------------- +// Public ReadSource methods + template -task> +io_task +any_buffer_source::read_some(MB buffers) +{ + buffer_param bp(buffers); + auto dest = bp.data(); + if(dest.empty()) + co_return {{}, 0}; + + // Native ReadSource path + if(vt_->construct_read_some_awaitable) + co_return co_await read_some_(dest); + + // Synthesized path: pull + buffer_copy + consume + const_buffer arr[detail::max_iovec_]; + auto [ec, bufs] = co_await pull(arr); + if(ec) + co_return {ec, 0}; + + auto n = buffer_copy(dest, bufs); + consume(n); + co_return {{}, n}; +} + +template +io_task any_buffer_source::read(MB buffers) { + buffer_param bp(buffers); std::size_t total = 0; - auto dest = sans_prefix(buffers, 0); - while(!buffer_empty(dest)) + // Native ReadSource path + if(vt_->construct_read_awaitable) { + for(;;) + { + auto dest = bp.data(); + if(dest.empty()) + break; + + auto [ec, n] = co_await read_(dest); + total += n; + if(ec) + co_return {ec, total}; + bp.consume(n); + } + co_return {{}, total}; + } + + // Synthesized path: pull + buffer_copy + consume + for(;;) + { + auto dest = bp.data(); + if(dest.empty()) + break; + const_buffer arr[detail::max_iovec_]; auto [ec, bufs] = co_await pull(arr); if(ec) co_return {ec, total}; - if(bufs.empty()) - co_return {error::eof, total}; - auto n = buffer_copy(dest, bufs); consume(n); total += n; - dest = sans_prefix(dest, n); + bp.consume(n); } co_return {{}, total}; diff --git a/include/boost/capy/io/any_read_source.hpp b/include/boost/capy/io/any_read_source.hpp index ad16df10..5605e77b 100644 --- a/include/boost/capy/io/any_read_source.hpp +++ b/include/boost/capy/io/any_read_source.hpp @@ -13,13 +13,14 @@ #include #include #include +#include #include #include #include #include #include #include -#include +#include #include #include @@ -52,6 +53,11 @@ namespace capy { so memory usage can be measured up front, rather than allocating piecemeal as traffic arrives. + @par Immediate Completion + Operations complete immediately without suspending when the + buffer sequence is empty, or when the underlying source's + awaitable reports readiness via `await_ready`. + @par Thread Safety Not thread-safe. Concurrent operations on the same wrapper are undefined behavior. @@ -178,18 +184,55 @@ class any_read_source return has_value(); } - /** Initiate an asynchronous read operation. + /** Initiate a partial read operation. + + Reads one or more bytes into the provided buffer sequence. + May fill less than the full sequence. + + @param buffers The buffer sequence to read into. + + @return An awaitable yielding `(error_code,std::size_t)`. + + @par Immediate Completion + The operation completes immediately without suspending + the calling coroutine when: + @li The buffer sequence is empty, returning `{error_code{}, 0}`. + @li The underlying source's awaitable reports immediate + readiness via `await_ready`. + + @note This is a partial operation and may not process the + entire buffer sequence. Use @ref read for guaranteed + complete transfer. + + @par Preconditions + The wrapper must contain a valid source (`has_value() == true`). + The caller must not call this function again after a prior + call returned an error (including EOF). + */ + template + auto + read_some(MB buffers); + + /** Initiate a complete read operation. - Reads data into the provided buffer sequence. The operation - completes when the entire buffer sequence is filled, end-of-file - is reached, or an error occurs. + Reads data into the provided buffer sequence by forwarding + to the underlying source's `read` operation. Large buffer + sequences are processed in windows, with each window + forwarded as a separate `read` call to the underlying source. + The operation completes when the entire buffer sequence is + filled, end-of-file is reached, or an error occurs. - @param buffers The buffer sequence to read into. Passed by - value to ensure the sequence lives in the coroutine frame - across suspension points. + @param buffers The buffer sequence to read into. @return An awaitable yielding `(error_code,std::size_t)`. + @par Immediate Completion + The operation completes immediately without suspending + the calling coroutine when: + @li The buffer sequence is empty, returning `{error_code{}, 0}`. + @li The underlying source's `read` awaitable reports + immediate readiness via `await_ready`. + @par Postconditions Exactly one of the following is true on return: @li **Success**: `!ec` and `n == buffer_size(buffers)`. @@ -199,9 +242,11 @@ class any_read_source @par Preconditions The wrapper must contain a valid source (`has_value() == true`). + The caller must not call this function again after a prior + call returned an error (including EOF). */ template - task> + io_task read(MB buffers); protected: @@ -228,11 +273,12 @@ class any_read_source private: auto - read_some_(std::span buffers); + read_(std::span buffers); }; //---------------------------------------------------------- +// ordered by call sequence for cache line coherence struct any_read_source::awaitable_ops { bool (*await_ready)(void*); @@ -241,21 +287,28 @@ struct any_read_source::awaitable_ops void (*destroy)(void*) noexcept; }; +// ordered by call frequency for cache line coherence struct any_read_source::vtable { - void (*destroy)(void*) noexcept; - std::size_t awaitable_size; - std::size_t awaitable_align; - awaitable_ops const* (*construct_awaitable)( + awaitable_ops const* (*construct_read_some_awaitable)( void* source, void* storage, std::span buffers); + awaitable_ops const* (*construct_read_awaitable)( + void* source, + void* storage, + std::span buffers); + std::size_t awaitable_size; + std::size_t awaitable_align; + void (*destroy)(void*) noexcept; }; template struct any_read_source::vtable_for_impl { - using Awaitable = decltype(std::declval().read( + using ReadSomeAwaitable = decltype(std::declval().read_some( + std::span{})); + using ReadAwaitable = decltype(std::declval().read( std::span{})); static void @@ -265,37 +318,74 @@ struct any_read_source::vtable_for_impl } static awaitable_ops const* - construct_awaitable_impl( + construct_read_some_awaitable_impl( + void* source, + void* storage, + std::span buffers) + { + auto& s = *static_cast(source); + ::new(storage) ReadSomeAwaitable(s.read_some(buffers)); + + static constexpr awaitable_ops ops = { + +[](void* p) { + return static_cast(p)->await_ready(); + }, + +[](void* p, coro h, executor_ref ex, std::stop_token token) { + return detail::call_await_suspend( + static_cast(p), h, ex, token); + }, + +[](void* p) { + return static_cast(p)->await_resume(); + }, + +[](void* p) noexcept { + static_cast(p)->~ReadSomeAwaitable(); + } + }; + return &ops; + } + + static awaitable_ops const* + construct_read_awaitable_impl( void* source, void* storage, std::span buffers) { auto& s = *static_cast(source); - ::new(storage) Awaitable(s.read(buffers)); + ::new(storage) ReadAwaitable(s.read(buffers)); static constexpr awaitable_ops ops = { +[](void* p) { - return static_cast(p)->await_ready(); + return static_cast(p)->await_ready(); }, +[](void* p, coro h, executor_ref ex, std::stop_token token) { return detail::call_await_suspend( - static_cast(p), h, ex, token); + static_cast(p), h, ex, token); }, +[](void* p) { - return static_cast(p)->await_resume(); + return static_cast(p)->await_resume(); }, +[](void* p) noexcept { - static_cast(p)->~Awaitable(); + static_cast(p)->~ReadAwaitable(); } }; return &ops; } + static constexpr std::size_t max_awaitable_size = + sizeof(ReadSomeAwaitable) > sizeof(ReadAwaitable) + ? sizeof(ReadSomeAwaitable) + : sizeof(ReadAwaitable); + static constexpr std::size_t max_awaitable_align = + alignof(ReadSomeAwaitable) > alignof(ReadAwaitable) + ? alignof(ReadSomeAwaitable) + : alignof(ReadAwaitable); + static constexpr vtable value = { - &do_destroy_impl, - sizeof(Awaitable), - alignof(Awaitable), - &construct_awaitable_impl + &construct_read_some_awaitable_impl, + &construct_read_awaitable_impl, + max_awaitable_size, + max_awaitable_align, + &do_destroy_impl }; }; @@ -310,7 +400,11 @@ any_read_source::~any_read_source() ::operator delete(storage_); } if(cached_awaitable_) + { + if(active_ops_) + active_ops_->destroy(cached_awaitable_); ::operator delete(cached_awaitable_); + } } inline any_read_source& @@ -324,7 +418,11 @@ any_read_source::operator=(any_read_source&& other) noexcept ::operator delete(storage_); } if(cached_awaitable_) + { + if(active_ops_) + active_ops_->destroy(cached_awaitable_); ::operator delete(cached_awaitable_); + } source_ = std::exchange(other.source_, nullptr); vt_ = std::exchange(other.vt_, nullptr); cached_awaitable_ = std::exchange(other.cached_awaitable_, nullptr); @@ -372,8 +470,64 @@ any_read_source::any_read_source(S* s) //---------------------------------------------------------- +template +auto +any_read_source::read_some(MB buffers) +{ + struct awaitable + { + any_read_source* self_; + mutable_buffer_array ba_; + + awaitable(any_read_source* self, MB const& buffers) + : self_(self) + , ba_(buffers) + { + } + + bool + await_ready() const noexcept + { + return ba_.to_span().empty(); + } + + coro + await_suspend(coro h, executor_ref ex, std::stop_token token) + { + self_->active_ops_ = self_->vt_->construct_read_some_awaitable( + self_->source_, + self_->cached_awaitable_, + ba_.to_span()); + + if(self_->active_ops_->await_ready(self_->cached_awaitable_)) + return h; + + return self_->active_ops_->await_suspend( + self_->cached_awaitable_, h, ex, token); + } + + io_result + await_resume() + { + if(ba_.to_span().empty()) + return {{}, 0}; + + struct guard { + any_read_source* self; + ~guard() { + self->active_ops_->destroy(self->cached_awaitable_); + self->active_ops_ = nullptr; + } + } g{self_}; + return self_->active_ops_->await_resume( + self_->cached_awaitable_); + } + }; + return awaitable(this, buffers); +} + inline auto -any_read_source::read_some_(std::span buffers) +any_read_source::read_(std::span buffers) { struct awaitable { @@ -389,17 +543,14 @@ any_read_source::read_some_(std::span buffers) coro await_suspend(coro h, executor_ref ex, std::stop_token token) { - // Construct the underlying awaitable into cached storage - self_->active_ops_ = self_->vt_->construct_awaitable( + self_->active_ops_ = self_->vt_->construct_read_awaitable( self_->source_, self_->cached_awaitable_, buffers_); - // Check if underlying is immediately ready if(self_->active_ops_->await_ready(self_->cached_awaitable_)) return h; - // Forward to underlying awaitable return self_->active_ops_->await_suspend( self_->cached_awaitable_, h, ex, token); } @@ -422,10 +573,10 @@ any_read_source::read_some_(std::span buffers) } template -task> +io_task any_read_source::read(MB buffers) { - buffer_param bp(std::move(buffers)); + buffer_param bp(buffers); std::size_t total = 0; for(;;) @@ -434,7 +585,7 @@ any_read_source::read(MB buffers) if(bufs.empty()) break; - auto [ec, n] = co_await read_some_(bufs); + auto [ec, n] = co_await read_(bufs); total += n; if(ec) co_return {ec, total}; diff --git a/include/boost/capy/io/any_read_stream.hpp b/include/boost/capy/io/any_read_stream.hpp index 4650453f..f6072546 100644 --- a/include/boost/capy/io/any_read_stream.hpp +++ b/include/boost/capy/io/any_read_stream.hpp @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include @@ -51,6 +51,11 @@ namespace capy { so memory usage can be measured up front, rather than allocating piecemeal as traffic arrives. + @par Immediate Completion + When the underlying stream's awaitable reports ready immediately + (e.g. buffered data already available), the wrapper skips + coroutine suspension entirely and returns the result inline. + @par Thread Safety Not thread-safe. Concurrent operations on the same wrapper are undefined behavior. @@ -65,7 +70,7 @@ namespace capy { any_read_stream stream(&sock); mutable_buffer buf(data, size); - auto [ec, n] = co_await stream.read_some(std::span(&buf, 1)); + auto [ec, n] = co_await stream.read_some(buf); @endcode @see any_write_stream, any_stream, ReadStream @@ -73,16 +78,16 @@ namespace capy { class any_read_stream { struct vtable; - struct awaitable_ops; template struct vtable_for_impl; + // ordered for cache line coherence void* stream_ = nullptr; vtable const* vt_ = nullptr; void* cached_awaitable_ = nullptr; void* storage_ = nullptr; - awaitable_ops const* active_ops_ = nullptr; + bool awaitable_active_ = false; public: /** Destructor. @@ -119,7 +124,7 @@ class any_read_stream , vt_(std::exchange(other.vt_, nullptr)) , cached_awaitable_(std::exchange(other.cached_awaitable_, nullptr)) , storage_(std::exchange(other.storage_, nullptr)) - , active_ops_(std::exchange(other.active_ops_, nullptr)) + , awaitable_active_(std::exchange(other.awaitable_active_, false)) { } @@ -189,8 +194,19 @@ class any_read_stream @return An awaitable yielding `(error_code,std::size_t)`. + @par Immediate Completion + The operation completes immediately without suspending + the calling coroutine when the underlying stream's + awaitable reports immediate readiness via `await_ready`. + + @note This is a partial operation and may not process the + entire buffer sequence. Use the composed @ref read algorithm + for guaranteed complete transfer. + @par Preconditions The wrapper must contain a valid stream (`has_value() == true`). + The caller must not call this function again after a prior + call returned an error (including EOF). */ template auto @@ -221,23 +237,20 @@ class any_read_stream //---------------------------------------------------------- -struct any_read_stream::awaitable_ops +struct any_read_stream::vtable { + // ordered by call frequency for cache line coherence + void (*construct_awaitable)( + void* stream, + void* storage, + std::span buffers); bool (*await_ready)(void*); coro (*await_suspend)(void*, coro, executor_ref, std::stop_token); io_result (*await_resume)(void*); - void (*destroy)(void*) noexcept; -}; - -struct any_read_stream::vtable -{ - void (*destroy)(void*) noexcept; + void (*destroy_awaitable)(void*) noexcept; std::size_t awaitable_size; std::size_t awaitable_align; - awaitable_ops const* (*construct_awaitable)( - void* stream, - void* storage, - std::span buffers); + void (*destroy)(void*) noexcept; }; template @@ -252,7 +265,7 @@ struct any_read_stream::vtable_for_impl static_cast(stream)->~S(); } - static awaitable_ops const* + static void construct_awaitable_impl( void* stream, void* storage, @@ -260,30 +273,26 @@ struct any_read_stream::vtable_for_impl { auto& s = *static_cast(stream); ::new(storage) Awaitable(s.read_some(buffers)); - - static constexpr awaitable_ops ops = { - +[](void* p) { - return static_cast(p)->await_ready(); - }, - +[](void* p, coro h, executor_ref ex, std::stop_token token) { - return detail::call_await_suspend( - static_cast(p), h, ex, token); - }, - +[](void* p) { - return static_cast(p)->await_resume(); - }, - +[](void* p) noexcept { - static_cast(p)->~Awaitable(); - } - }; - return &ops; } static constexpr vtable value = { - &do_destroy_impl, + &construct_awaitable_impl, + +[](void* p) { + return static_cast(p)->await_ready(); + }, + +[](void* p, coro h, executor_ref ex, std::stop_token token) { + return detail::call_await_suspend( + static_cast(p), h, ex, token); + }, + +[](void* p) { + return static_cast(p)->await_resume(); + }, + +[](void* p) noexcept { + static_cast(p)->~Awaitable(); + }, sizeof(Awaitable), alignof(Awaitable), - &construct_awaitable_impl + &do_destroy_impl }; }; @@ -299,8 +308,8 @@ any_read_stream::~any_read_stream() } if(cached_awaitable_) { - if(active_ops_) - active_ops_->destroy(cached_awaitable_); + if(awaitable_active_) + vt_->destroy_awaitable(cached_awaitable_); ::operator delete(cached_awaitable_); } } @@ -317,15 +326,15 @@ any_read_stream::operator=(any_read_stream&& other) noexcept } if(cached_awaitable_) { - if(active_ops_) - active_ops_->destroy(cached_awaitable_); + if(awaitable_active_) + vt_->destroy_awaitable(cached_awaitable_); ::operator delete(cached_awaitable_); } stream_ = std::exchange(other.stream_, nullptr); vt_ = std::exchange(other.vt_, nullptr); cached_awaitable_ = std::exchange(other.cached_awaitable_, nullptr); storage_ = std::exchange(other.storage_, nullptr); - active_ops_ = std::exchange(other.active_ops_, nullptr); + awaitable_active_ = std::exchange(other.awaitable_active_, false); } return *this; } @@ -372,32 +381,30 @@ template auto any_read_stream::read_some(MB buffers) { + // VFALCO in theory, we could use if constexpr to detect a + // span and then pass that through to read_some without the array struct awaitable { any_read_stream* self_; - buffer_param bp_; + mutable_buffer_array ba_; bool - await_ready() const noexcept + await_ready() { - return false; + self_->vt_->construct_awaitable( + self_->stream_, + self_->cached_awaitable_, + ba_.to_span()); + self_->awaitable_active_ = true; + + return self_->vt_->await_ready( + self_->cached_awaitable_); } coro await_suspend(coro h, executor_ref ex, std::stop_token token) { - // Construct the underlying awaitable into cached storage - self_->active_ops_ = self_->vt_->construct_awaitable( - self_->stream_, - self_->cached_awaitable_, - bp_.data()); - - // Check if underlying is immediately ready - if(self_->active_ops_->await_ready(self_->cached_awaitable_)) - return h; - - // Forward to underlying awaitable - return self_->active_ops_->await_suspend( + return self_->vt_->await_suspend( self_->cached_awaitable_, h, ex, token); } @@ -407,15 +414,16 @@ any_read_stream::read_some(MB buffers) struct guard { any_read_stream* self; ~guard() { - self->active_ops_->destroy(self->cached_awaitable_); - self->active_ops_ = nullptr; + self->vt_->destroy_awaitable(self->cached_awaitable_); + self->awaitable_active_ = false; } } g{self_}; - return self_->active_ops_->await_resume( + return self_->vt_->await_resume( self_->cached_awaitable_); } }; - return awaitable{this, buffer_param(buffers)}; + return awaitable{this, + mutable_buffer_array(buffers)}; } } // namespace capy diff --git a/include/boost/capy/io/any_write_sink.hpp b/include/boost/capy/io/any_write_sink.hpp index 894c0fd5..0a09d766 100644 --- a/include/boost/capy/io/any_write_sink.hpp +++ b/include/boost/capy/io/any_write_sink.hpp @@ -13,13 +13,14 @@ #include #include #include +#include #include #include #include #include #include #include -#include +#include #include #include @@ -53,6 +54,11 @@ namespace capy { so memory usage can be measured up front, rather than allocating piecemeal as traffic arrives. + @par Immediate Completion + Operations complete immediately without suspending when the + buffer sequence is empty, or when the underlying sink's + awaitable reports readiness via `await_ready`. + @par Thread Safety Not thread-safe. Concurrent operations on the same wrapper are undefined behavior. @@ -183,47 +189,89 @@ class any_write_sink return has_value(); } - /** Initiate an asynchronous write operation. + /** Initiate a partial write operation. + + Writes one or more bytes from the provided buffer sequence. + May consume less than the full sequence. + + @param buffers The buffer sequence containing data to write. + + @return An awaitable yielding `(error_code,std::size_t)`. + + @par Immediate Completion + The operation completes immediately without suspending + the calling coroutine when: + @li The buffer sequence is empty, returning `{error_code{}, 0}`. + @li The underlying sink's awaitable reports immediate + readiness via `await_ready`. + + @note This is a partial operation and may not process the + entire buffer sequence. Use @ref write for guaranteed + complete transfer. + + @par Preconditions + The wrapper must contain a valid sink (`has_value() == true`). + */ + template + auto + write_some(CB buffers); + + /** Initiate a complete write operation. Writes data from the provided buffer sequence. The operation completes when all bytes have been consumed, or an error - occurs. + occurs. Forwards to the underlying sink's `write` operation, + windowed through @ref buffer_param when the sequence exceeds + the per-call buffer limit. @param buffers The buffer sequence containing data to write. - Passed by value to ensure the sequence lives in the - coroutine frame across suspension points. @return An awaitable yielding `(error_code,std::size_t)`. + @par Immediate Completion + The operation completes immediately without suspending + the calling coroutine when: + @li The buffer sequence is empty, returning `{error_code{}, 0}`. + @li Every underlying `write` call completes + immediately (the wrapped sink reports readiness + via `await_ready` on each iteration). + @par Preconditions The wrapper must contain a valid sink (`has_value() == true`). */ template - task> + io_task write(CB buffers); - /** Initiate an asynchronous write operation with optional EOF. + /** Atomically write data and signal end-of-stream. - Writes data from the provided buffer sequence, optionally - finalizing the sink afterwards. The operation completes when - all bytes have been consumed and (if eof is true) the sink - is finalized, or an error occurs. + Writes all data from the buffer sequence and then signals + end-of-stream. The implementation decides how to partition + the data across calls to the underlying sink's @ref write + and `write_eof`. When the caller's buffer sequence is + non-empty, the final call to the underlying sink is always + `write_eof` with a non-empty buffer sequence. When the + caller's buffer sequence is empty, only `write_eof()` with + no data is called. @param buffers The buffer sequence containing data to write. - Passed by value to ensure the sequence lives in the - coroutine frame across suspension points. - - @param eof If `true`, the sink is finalized after writing - the data. @return An awaitable yielding `(error_code,std::size_t)`. + @par Immediate Completion + The operation completes immediately without suspending + the calling coroutine when: + @li The buffer sequence is empty. Only the @ref write_eof() + call is performed. + @li All underlying operations complete immediately (the + wrapped sink reports readiness via `await_ready`). + @par Preconditions The wrapper must contain a valid sink (`has_value() == true`). */ template - task> - write(CB buffers, bool eof); + io_task + write_eof(CB buffers); /** Signal end of data. @@ -233,6 +281,11 @@ class any_write_sink @return An awaitable yielding `(error_code)`. + @par Immediate Completion + The operation completes immediately without suspending + the calling coroutine when the underlying sink's awaitable + reports immediate readiness via `await_ready`. + @par Preconditions The wrapper must contain a valid sink (`has_value() == true`). */ @@ -263,7 +316,13 @@ class any_write_sink private: auto - write_some_(std::span buffers, bool eof); + write_some_(std::span buffers); + + auto + write_(std::span buffers); + + auto + write_eof_buffers_(std::span buffers); }; //---------------------------------------------------------- @@ -286,24 +345,35 @@ struct any_write_sink::eof_awaitable_ops struct any_write_sink::vtable { - void (*destroy)(void*) noexcept; - std::size_t awaitable_size; - std::size_t awaitable_align; + write_awaitable_ops const* (*construct_write_some_awaitable)( + void* sink, + void* storage, + std::span buffers); write_awaitable_ops const* (*construct_write_awaitable)( void* sink, void* storage, - std::span buffers, - bool eof); + std::span buffers); + write_awaitable_ops const* (*construct_write_eof_buffers_awaitable)( + void* sink, + void* storage, + std::span buffers); eof_awaitable_ops const* (*construct_eof_awaitable)( void* sink, void* storage); + std::size_t awaitable_size; + std::size_t awaitable_align; + void (*destroy)(void*) noexcept; }; template struct any_write_sink::vtable_for_impl { + using WriteSomeAwaitable = decltype(std::declval().write_some( + std::span{})); using WriteAwaitable = decltype(std::declval().write( - std::span{}, false)); + std::span{})); + using WriteEofBuffersAwaitable = decltype(std::declval().write_eof( + std::span{})); using EofAwaitable = decltype(std::declval().write_eof()); static void @@ -312,15 +382,41 @@ struct any_write_sink::vtable_for_impl static_cast(sink)->~S(); } + static write_awaitable_ops const* + construct_write_some_awaitable_impl( + void* sink, + void* storage, + std::span buffers) + { + auto& s = *static_cast(sink); + ::new(storage) WriteSomeAwaitable(s.write_some(buffers)); + + static constexpr write_awaitable_ops ops = { + +[](void* p) { + return static_cast(p)->await_ready(); + }, + +[](void* p, coro h, executor_ref ex, std::stop_token token) { + return detail::call_await_suspend( + static_cast(p), h, ex, token); + }, + +[](void* p) { + return static_cast(p)->await_resume(); + }, + +[](void* p) noexcept { + static_cast(p)->~WriteSomeAwaitable(); + } + }; + return &ops; + } + static write_awaitable_ops const* construct_write_awaitable_impl( void* sink, void* storage, - std::span buffers, - bool eof) + std::span buffers) { auto& s = *static_cast(sink); - ::new(storage) WriteAwaitable(s.write(buffers, eof)); + ::new(storage) WriteAwaitable(s.write(buffers)); static constexpr write_awaitable_ops ops = { +[](void* p) { @@ -340,6 +436,33 @@ struct any_write_sink::vtable_for_impl return &ops; } + static write_awaitable_ops const* + construct_write_eof_buffers_awaitable_impl( + void* sink, + void* storage, + std::span buffers) + { + auto& s = *static_cast(sink); + ::new(storage) WriteEofBuffersAwaitable(s.write_eof(buffers)); + + static constexpr write_awaitable_ops ops = { + +[](void* p) { + return static_cast(p)->await_ready(); + }, + +[](void* p, coro h, executor_ref ex, std::stop_token token) { + return detail::call_await_suspend( + static_cast(p), h, ex, token); + }, + +[](void* p) { + return static_cast(p)->await_resume(); + }, + +[](void* p) noexcept { + static_cast(p)->~WriteEofBuffersAwaitable(); + } + }; + return &ops; + } + static eof_awaitable_ops const* construct_eof_awaitable_impl( void* sink, @@ -366,22 +489,35 @@ struct any_write_sink::vtable_for_impl return &ops; } + static constexpr std::size_t max4( + std::size_t a, std::size_t b, + std::size_t c, std::size_t d) noexcept + { + std::size_t ab = a > b ? a : b; + std::size_t cd = c > d ? c : d; + return ab > cd ? ab : cd; + } + static constexpr std::size_t max_awaitable_size = - sizeof(WriteAwaitable) > sizeof(EofAwaitable) - ? sizeof(WriteAwaitable) - : sizeof(EofAwaitable); + max4(sizeof(WriteSomeAwaitable), + sizeof(WriteAwaitable), + sizeof(WriteEofBuffersAwaitable), + sizeof(EofAwaitable)); static constexpr std::size_t max_awaitable_align = - alignof(WriteAwaitable) > alignof(EofAwaitable) - ? alignof(WriteAwaitable) - : alignof(EofAwaitable); + max4(alignof(WriteSomeAwaitable), + alignof(WriteAwaitable), + alignof(WriteEofBuffersAwaitable), + alignof(EofAwaitable)); static constexpr vtable value = { - &do_destroy_impl, + &construct_write_some_awaitable_impl, + &construct_write_awaitable_impl, + &construct_write_eof_buffers_awaitable_impl, + &construct_eof_awaitable_impl, max_awaitable_size, max_awaitable_align, - &construct_write_awaitable_impl, - &construct_eof_awaitable_impl + &do_destroy_impl }; }; @@ -396,7 +532,13 @@ any_write_sink::~any_write_sink() ::operator delete(storage_); } if(cached_awaitable_) + { + if(active_write_ops_) + active_write_ops_->destroy(cached_awaitable_); + else if(active_eof_ops_) + active_eof_ops_->destroy(cached_awaitable_); ::operator delete(cached_awaitable_); + } } inline any_write_sink& @@ -410,7 +552,13 @@ any_write_sink::operator=(any_write_sink&& other) noexcept ::operator delete(storage_); } if(cached_awaitable_) + { + if(active_write_ops_) + active_write_ops_->destroy(cached_awaitable_); + else if(active_eof_ops_) + active_eof_ops_->destroy(cached_awaitable_); ::operator delete(cached_awaitable_); + } sink_ = std::exchange(other.sink_, nullptr); vt_ = std::exchange(other.vt_, nullptr); cached_awaitable_ = std::exchange(other.cached_awaitable_, nullptr); @@ -461,14 +609,59 @@ any_write_sink::any_write_sink(S* s) inline auto any_write_sink::write_some_( - std::span buffers, - bool eof) + std::span buffers) +{ + struct awaitable + { + any_write_sink* self_; + std::span buffers_; + + bool + await_ready() const noexcept + { + return false; + } + + coro + await_suspend(coro h, executor_ref ex, std::stop_token token) + { + self_->active_write_ops_ = self_->vt_->construct_write_some_awaitable( + self_->sink_, + self_->cached_awaitable_, + buffers_); + + if(self_->active_write_ops_->await_ready(self_->cached_awaitable_)) + return h; + + return self_->active_write_ops_->await_suspend( + self_->cached_awaitable_, h, ex, token); + } + + io_result + await_resume() + { + struct guard { + any_write_sink* self; + ~guard() { + self->active_write_ops_->destroy(self->cached_awaitable_); + self->active_write_ops_ = nullptr; + } + } g{self_}; + return self_->active_write_ops_->await_resume( + self_->cached_awaitable_); + } + }; + return awaitable{this, buffers}; +} + +inline auto +any_write_sink::write_( + std::span buffers) { struct awaitable { any_write_sink* self_; std::span buffers_; - bool eof_; bool await_ready() const noexcept @@ -479,18 +672,14 @@ any_write_sink::write_some_( coro await_suspend(coro h, executor_ref ex, std::stop_token token) { - // Construct the underlying awaitable into cached storage self_->active_write_ops_ = self_->vt_->construct_write_awaitable( self_->sink_, self_->cached_awaitable_, - buffers_, - eof_); + buffers_); - // Check if underlying is immediately ready if(self_->active_write_ops_->await_ready(self_->cached_awaitable_)) return h; - // Forward to underlying awaitable return self_->active_write_ops_->await_suspend( self_->cached_awaitable_, h, ex, token); } @@ -509,7 +698,7 @@ any_write_sink::write_some_( self_->cached_awaitable_); } }; - return awaitable{this, buffers, eof}; + return awaitable{this, buffers}; } inline auto @@ -559,16 +748,115 @@ any_write_sink::write_eof() return awaitable{this}; } +inline auto +any_write_sink::write_eof_buffers_( + std::span buffers) +{ + struct awaitable + { + any_write_sink* self_; + std::span buffers_; + + bool + await_ready() const noexcept + { + return false; + } + + coro + await_suspend(coro h, executor_ref ex, std::stop_token token) + { + self_->active_write_ops_ = + self_->vt_->construct_write_eof_buffers_awaitable( + self_->sink_, + self_->cached_awaitable_, + buffers_); + + if(self_->active_write_ops_->await_ready(self_->cached_awaitable_)) + return h; + + return self_->active_write_ops_->await_suspend( + self_->cached_awaitable_, h, ex, token); + } + + io_result + await_resume() + { + struct guard { + any_write_sink* self; + ~guard() { + self->active_write_ops_->destroy(self->cached_awaitable_); + self->active_write_ops_ = nullptr; + } + } g{self_}; + return self_->active_write_ops_->await_resume( + self_->cached_awaitable_); + } + }; + return awaitable{this, buffers}; +} + template -task> -any_write_sink::write(CB buffers) +auto +any_write_sink::write_some(CB buffers) { - return write(buffers, false); + struct awaitable + { + any_write_sink* self_; + const_buffer_array ba_; + + awaitable( + any_write_sink* self, + CB const& buffers) + : self_(self) + , ba_(buffers) + { + } + + bool + await_ready() const noexcept + { + return ba_.to_span().empty(); + } + + coro + await_suspend(coro h, executor_ref ex, std::stop_token token) + { + self_->active_write_ops_ = self_->vt_->construct_write_some_awaitable( + self_->sink_, + self_->cached_awaitable_, + ba_.to_span()); + + if(self_->active_write_ops_->await_ready(self_->cached_awaitable_)) + return h; + + return self_->active_write_ops_->await_suspend( + self_->cached_awaitable_, h, ex, token); + } + + io_result + await_resume() + { + if(ba_.to_span().empty()) + return {{}, 0}; + + struct guard { + any_write_sink* self; + ~guard() { + self->active_write_ops_->destroy(self->cached_awaitable_); + self->active_write_ops_ = nullptr; + } + } g{self_}; + return self_->active_write_ops_->await_resume( + self_->cached_awaitable_); + } + }; + return awaitable{this, buffers}; } template -task> -any_write_sink::write(CB buffers, bool eof) +io_task +any_write_sink::write(CB buffers) { buffer_param bp(buffers); std::size_t total = 0; @@ -579,21 +867,46 @@ any_write_sink::write(CB buffers, bool eof) if(bufs.empty()) break; - auto [ec, n] = co_await write_some_(bufs, false); + auto [ec, n] = co_await write_(bufs); + total += n; if(ec) - co_return {ec, total + n}; + co_return {ec, total}; bp.consume(n); - total += n; } - if(eof) + co_return {{}, total}; +} + +template +io_task +any_write_sink::write_eof(CB buffers) +{ + const_buffer_param bp(buffers); + std::size_t total = 0; + + for(;;) { - auto [ec] = co_await write_eof(); + auto bufs = bp.data(); + if(bufs.empty()) + { + auto [ec] = co_await write_eof(); + co_return {ec, total}; + } + + if(! bp.more()) + { + // Last window — send atomically with EOF + auto [ec, n] = co_await write_eof_buffers_(bufs); + total += n; + co_return {ec, total}; + } + + auto [ec, n] = co_await write_(bufs); + total += n; if(ec) co_return {ec, total}; + bp.consume(n); } - - co_return {{}, total}; } } // namespace capy diff --git a/include/boost/capy/io/any_write_stream.hpp b/include/boost/capy/io/any_write_stream.hpp index 2e389fd9..730e83f3 100644 --- a/include/boost/capy/io/any_write_stream.hpp +++ b/include/boost/capy/io/any_write_stream.hpp @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include @@ -51,6 +51,11 @@ namespace capy { so memory usage can be measured up front, rather than allocating piecemeal as traffic arrives. + @par Immediate Completion + Operations complete immediately without suspending when the + buffer sequence is empty, or when the underlying stream's + awaitable reports readiness via `await_ready`. + @par Thread Safety Not thread-safe. Concurrent operations on the same wrapper are undefined behavior. @@ -73,16 +78,16 @@ namespace capy { class any_write_stream { struct vtable; - struct awaitable_ops; template struct vtable_for_impl; + // ordered for cache line coherence void* stream_ = nullptr; vtable const* vt_ = nullptr; void* cached_awaitable_ = nullptr; void* storage_ = nullptr; - awaitable_ops const* active_ops_ = nullptr; + bool awaitable_active_ = false; public: /** Destructor. @@ -119,7 +124,7 @@ class any_write_stream , vt_(std::exchange(other.vt_, nullptr)) , cached_awaitable_(std::exchange(other.cached_awaitable_, nullptr)) , storage_(std::exchange(other.storage_, nullptr)) - , active_ops_(std::exchange(other.active_ops_, nullptr)) + , awaitable_active_(std::exchange(other.awaitable_active_, false)) { } @@ -189,6 +194,17 @@ class any_write_stream @return An awaitable yielding `(error_code,std::size_t)`. + @par Immediate Completion + The operation completes immediately without suspending + the calling coroutine when: + @li The buffer sequence is empty, returning `{error_code{}, 0}`. + @li The underlying stream's awaitable reports immediate + readiness via `await_ready`. + + @note This is a partial operation and may not process the + entire buffer sequence. Use the composed @ref write algorithm + for guaranteed complete transfer. + @par Preconditions The wrapper must contain a valid stream (`has_value() == true`). */ @@ -221,23 +237,20 @@ class any_write_stream //---------------------------------------------------------- -struct any_write_stream::awaitable_ops +struct any_write_stream::vtable { + // ordered by call frequency for cache line coherence + void (*construct_awaitable)( + void* stream, + void* storage, + std::span buffers); bool (*await_ready)(void*); coro (*await_suspend)(void*, coro, executor_ref, std::stop_token); io_result (*await_resume)(void*); - void (*destroy)(void*) noexcept; -}; - -struct any_write_stream::vtable -{ - void (*destroy)(void*) noexcept; + void (*destroy_awaitable)(void*) noexcept; std::size_t awaitable_size; std::size_t awaitable_align; - awaitable_ops const* (*construct_awaitable)( - void* stream, - void* storage, - std::span buffers); + void (*destroy)(void*) noexcept; }; template @@ -252,7 +265,7 @@ struct any_write_stream::vtable_for_impl static_cast(stream)->~S(); } - static awaitable_ops const* + static void construct_awaitable_impl( void* stream, void* storage, @@ -260,30 +273,26 @@ struct any_write_stream::vtable_for_impl { auto& s = *static_cast(stream); ::new(storage) Awaitable(s.write_some(buffers)); - - static constexpr awaitable_ops ops = { - +[](void* p) { - return static_cast(p)->await_ready(); - }, - +[](void* p, coro h, executor_ref ex, std::stop_token token) { - return detail::call_await_suspend( - static_cast(p), h, ex, token); - }, - +[](void* p) { - return static_cast(p)->await_resume(); - }, - +[](void* p) noexcept { - static_cast(p)->~Awaitable(); - } - }; - return &ops; } static constexpr vtable value = { - &do_destroy_impl, + &construct_awaitable_impl, + +[](void* p) { + return static_cast(p)->await_ready(); + }, + +[](void* p, coro h, executor_ref ex, std::stop_token token) { + return detail::call_await_suspend( + static_cast(p), h, ex, token); + }, + +[](void* p) { + return static_cast(p)->await_resume(); + }, + +[](void* p) noexcept { + static_cast(p)->~Awaitable(); + }, sizeof(Awaitable), alignof(Awaitable), - &construct_awaitable_impl + &do_destroy_impl }; }; @@ -299,8 +308,8 @@ any_write_stream::~any_write_stream() } if(cached_awaitable_) { - if(active_ops_) - active_ops_->destroy(cached_awaitable_); + if(awaitable_active_) + vt_->destroy_awaitable(cached_awaitable_); ::operator delete(cached_awaitable_); } } @@ -317,15 +326,15 @@ any_write_stream::operator=(any_write_stream&& other) noexcept } if(cached_awaitable_) { - if(active_ops_) - active_ops_->destroy(cached_awaitable_); + if(awaitable_active_) + vt_->destroy_awaitable(cached_awaitable_); ::operator delete(cached_awaitable_); } stream_ = std::exchange(other.stream_, nullptr); vt_ = std::exchange(other.vt_, nullptr); cached_awaitable_ = std::exchange(other.cached_awaitable_, nullptr); storage_ = std::exchange(other.storage_, nullptr); - active_ops_ = std::exchange(other.active_ops_, nullptr); + awaitable_active_ = std::exchange(other.awaitable_active_, false); } return *this; } @@ -375,47 +384,55 @@ any_write_stream::write_some(CB buffers) struct awaitable { any_write_stream* self_; - const_buffer_param bp_; + const_buffer_array ba_; + + awaitable( + any_write_stream* self, + CB const& buffers) noexcept + : self_(self) + , ba_(buffers) + { + } bool await_ready() const noexcept { - return false; + return ba_.to_span().empty(); } coro await_suspend(coro h, executor_ref ex, std::stop_token token) { - // Construct the underlying awaitable into cached storage - self_->active_ops_ = self_->vt_->construct_awaitable( + self_->vt_->construct_awaitable( self_->stream_, self_->cached_awaitable_, - bp_.data()); + ba_.to_span()); + self_->awaitable_active_ = true; - // Check if underlying is immediately ready - if(self_->active_ops_->await_ready(self_->cached_awaitable_)) + if(self_->vt_->await_ready(self_->cached_awaitable_)) return h; - // Forward to underlying awaitable - return self_->active_ops_->await_suspend( + return self_->vt_->await_suspend( self_->cached_awaitable_, h, ex, token); } io_result await_resume() { + if(!self_->awaitable_active_) + return {{}, 0}; struct guard { any_write_stream* self; ~guard() { - self->active_ops_->destroy(self->cached_awaitable_); - self->active_ops_ = nullptr; + self->vt_->destroy_awaitable(self->cached_awaitable_); + self->awaitable_active_ = false; } } g{self_}; - return self_->active_ops_->await_resume( + return self_->vt_->await_resume( self_->cached_awaitable_); } }; - return awaitable{this, const_buffer_param(buffers)}; + return awaitable{this, buffers}; } } // namespace capy diff --git a/include/boost/capy/io/pull_from.hpp b/include/boost/capy/io/pull_from.hpp index 8dbe66aa..41c9ed20 100644 --- a/include/boost/capy/io/pull_from.hpp +++ b/include/boost/capy/io/pull_from.hpp @@ -16,8 +16,7 @@ #include #include #include -#include -#include +#include #include #include @@ -61,7 +60,7 @@ namespace capy { @see ReadSource, BufferSink, push_to */ template -task> +io_task pull_from(Src& source, Sink& sink) { mutable_buffer dst_arr[detail::max_iovec_]; @@ -92,7 +91,7 @@ pull_from(Src& source, Sink& sink) if(ec == cond::eof) { - auto [eof_ec] = co_await sink.commit_eof(); + auto [eof_ec] = co_await sink.commit_eof(0); co_return {eof_ec, total}; } @@ -142,7 +141,8 @@ pull_from(Src& source, Sink& sink) @see ReadStream, BufferSink, push_to */ template -task> + requires (!ReadSource) +io_task pull_from(Src& source, Sink& sink) { mutable_buffer dst_arr[detail::max_iovec_]; @@ -177,7 +177,7 @@ pull_from(Src& source, Sink& sink) // Check for EOF condition if(ec == cond::eof) { - auto [eof_ec] = co_await sink.commit_eof(); + auto [eof_ec] = co_await sink.commit_eof(0); co_return {eof_ec, total}; } diff --git a/include/boost/capy/io/push_to.hpp b/include/boost/capy/io/push_to.hpp index 710df33c..d56233bf 100644 --- a/include/boost/capy/io/push_to.hpp +++ b/include/boost/capy/io/push_to.hpp @@ -12,11 +12,11 @@ #include #include +#include #include #include #include -#include -#include +#include #include #include @@ -59,7 +59,7 @@ namespace capy { @see BufferSource, WriteSink */ template -task> +io_task push_to(Src& source, Sink& sink) { const_buffer arr[detail::max_iovec_]; @@ -68,14 +68,13 @@ push_to(Src& source, Sink& sink) for(;;) { auto [ec, bufs] = co_await source.pull(arr); - if(ec) - co_return {ec, total}; - - if(bufs.empty()) + if(ec == cond::eof) { auto [eof_ec] = co_await sink.write_eof(); co_return {eof_ec, total}; } + if(ec) + co_return {ec, total}; auto [write_ec, n] = co_await sink.write(bufs); total += n; @@ -124,7 +123,8 @@ push_to(Src& source, Sink& sink) @see BufferSource, WriteStream, pull_from */ template -task> + requires (!WriteSink) +io_task push_to(Src& source, Stream& stream) { const_buffer arr[detail::max_iovec_]; @@ -133,12 +133,11 @@ push_to(Src& source, Stream& stream) for(;;) { auto [ec, bufs] = co_await source.pull(arr); + if(ec == cond::eof) + co_return {{}, total}; if(ec) co_return {ec, total}; - if(bufs.empty()) - co_return {{}, total}; - auto [write_ec, n] = co_await stream.write_some(bufs); if(write_ec) co_return {write_ec, total}; diff --git a/include/boost/capy/io/write_now.hpp b/include/boost/capy/io/write_now.hpp new file mode 100644 index 00000000..492c7011 --- /dev/null +++ b/include/boost/capy/io/write_now.hpp @@ -0,0 +1,412 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +#ifndef BOOST_CAPY_IO_WRITE_NOW_HPP +#define BOOST_CAPY_IO_WRITE_NOW_HPP + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#ifndef BOOST_CAPY_WRITE_NOW_WORKAROUND +# if defined(__GNUC__) && !defined(__clang__) +# define BOOST_CAPY_WRITE_NOW_WORKAROUND 1 +# else +# define BOOST_CAPY_WRITE_NOW_WORKAROUND 0 +# endif +#endif + +namespace boost { +namespace capy { + +/** Eagerly writes complete buffer sequences with frame caching. + + This class wraps a @ref WriteStream and provides an `operator()` + that writes an entire buffer sequence, attempting to complete + synchronously. If every `write_some` completes without suspending, + the entire operation finishes in `await_ready` with no coroutine + suspension. + + The class maintains a one-element coroutine frame cache. After + the first call, subsequent calls reuse the cached frame memory, + avoiding repeated allocation for the internal coroutine. + + @tparam Stream The stream type, must satisfy @ref WriteStream. + + @par Thread Safety + Distinct objects: Safe. + Shared objects: Unsafe. + + @par Preconditions + Only one operation may be outstanding at a time. A new call to + `operator()` must not be made until the previous operation has + completed (i.e., the returned awaitable has been fully consumed). + + @par Example + + @code + template< WriteStream Stream > + task<> send_messages( Stream& stream ) + { + write_now wn( stream ); + auto [ec1, n1] = co_await wn( make_buffer( "hello" ) ); + if( ec1 ) + detail::throw_system_error( ec1 ); + auto [ec2, n2] = co_await wn( make_buffer( "world" ) ); + if( ec2 ) + detail::throw_system_error( ec2 ); + } + @endcode + + @see write, write_some, WriteStream, ConstBufferSequence +*/ +template + requires WriteStream +class write_now +{ + Stream& stream_; + void* cached_frame_ = nullptr; + std::size_t cached_size_ = 0; + + struct [[nodiscard]] BOOST_CAPY_CORO_AWAIT_ELIDABLE + op_type + { + struct promise_type + { + io_result result_; + std::exception_ptr ep_; + coro cont_{nullptr}; + executor_ref ex_; + std::stop_token token_; + bool done_ = false; + + op_type get_return_object() + { + return op_type{ + std::coroutine_handle< + promise_type>::from_promise(*this)}; + } + + auto initial_suspend() noexcept + { +#if BOOST_CAPY_WRITE_NOW_WORKAROUND + return std::suspend_always{}; +#else + return std::suspend_never{}; +#endif + } + + auto final_suspend() noexcept + { + struct awaiter + { + promise_type* p_; + + bool await_ready() const noexcept + { + return false; + } + + coro await_suspend(coro) const noexcept + { + p_->done_ = true; + if(!p_->cont_) + return std::noop_coroutine(); + return p_->cont_; + } + + void await_resume() const noexcept + { + } + }; + return awaiter{this}; + } + + void return_value( + io_result r) noexcept + { + result_ = r; + } + + void unhandled_exception() + { + ep_ = std::current_exception(); + } + + std::suspend_always yield_value(int) noexcept + { + return {}; + } + + template + auto await_transform(A&& a) + { + using decayed = std::decay_t; + if constexpr (IoAwaitable) + { + struct wrapper + { + decayed inner_; + promise_type* p_; + + bool await_ready() + { + return inner_.await_ready(); + } + + coro await_suspend(coro h) + { + return detail::call_await_suspend( + &inner_, h, + p_->ex_, p_->token_); + } + + decltype(auto) await_resume() + { + return inner_.await_resume(); + } + }; + return wrapper{ + std::forward(a), this}; + } + else + { + return std::forward(a); + } + } + + static void* + operator new( + std::size_t size, + write_now& self, + auto&) + { + if(self.cached_frame_ && + self.cached_size_ >= size) + return self.cached_frame_; + void* p = ::operator new(size); + if(self.cached_frame_) + ::operator delete(self.cached_frame_); + self.cached_frame_ = p; + self.cached_size_ = size; + return p; + } + + static void + operator delete(void*, std::size_t) noexcept + { + } + }; + + std::coroutine_handle h_; + + ~op_type() + { + if(h_) + h_.destroy(); + } + + op_type(op_type const&) = delete; + op_type& operator=(op_type const&) = delete; + + op_type(op_type&& other) noexcept + : h_(std::exchange(other.h_, nullptr)) + { + } + + op_type& operator=(op_type&&) = delete; + + bool await_ready() const noexcept + { + return h_.promise().done_; + } + + coro await_suspend( + coro cont, + executor_ref ex, + std::stop_token token) + { + auto& p = h_.promise(); + p.cont_ = cont; + p.ex_ = ex; + p.token_ = token; + return h_; + } + + io_result await_resume() + { + auto& p = h_.promise(); + if(p.ep_) + std::rethrow_exception(p.ep_); + return p.result_; + } + + private: + explicit op_type( + std::coroutine_handle h) + : h_(h) + { + } + }; + +public: + /** Destructor. Frees the cached coroutine frame. */ + ~write_now() + { + if(cached_frame_) + ::operator delete(cached_frame_); + } + + /** Construct from a stream reference. + + @param s The stream to write to. Must outlive this object. + */ + explicit + write_now(Stream& s) noexcept + : stream_(s) + { + } + + write_now(write_now const&) = delete; + write_now& operator=(write_now const&) = delete; + + /** Eagerly write the entire buffer sequence. + + Writes data to the stream by calling `write_some` repeatedly + until the entire buffer sequence is written or an error + occurs. The operation attempts to complete synchronously: + if every `write_some` completes without suspending, the + entire operation finishes in `await_ready`. + + When the fast path cannot complete, the coroutine suspends + and continues asynchronously. The internal coroutine frame + is cached and reused across calls. + + @param buffers The buffer sequence to write. Passed by + value to ensure the sequence lives in the coroutine + frame across suspension points. + + @return An awaitable yielding `(error_code,std::size_t)`. + On success, `n` equals `buffer_size(buffers)`. On + error, `n` is the number of bytes written before the + error. Compare error codes to conditions: + @li `cond::canceled` - Operation was cancelled + @li `std::errc::broken_pipe` - Peer closed connection + + @par Example + + @code + write_now wn( stream ); + auto [ec, n] = co_await wn( make_buffer( body ) ); + if( ec ) + detail::throw_system_error( ec ); + @endcode + + @see write, write_some, WriteStream + */ +// GCC falsely warns that the coroutine promise's +// placement operator new(size_t, write_now&, auto&) +// mismatches operator delete(void*, size_t). Per the +// standard, coroutine deallocation lookup is separate. +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wmismatched-new-delete" +#endif + +#if BOOST_CAPY_WRITE_NOW_WORKAROUND + template + op_type + operator()(Buffers buffers) + { + std::size_t const total_size = buffer_size(buffers); + std::size_t total_written = 0; + consuming_buffers cb(buffers); + while(total_written < total_size) + { + auto r = + co_await stream_.write_some(cb); + if(r.ec) + co_return io_result{ + r.ec, total_written}; + cb.consume(r.t1); + total_written += r.t1; + } + co_return io_result{ + {}, total_written}; + } +#else + template + op_type + operator()(Buffers buffers) + { + std::size_t const total_size = buffer_size(buffers); + std::size_t total_written = 0; + + // GCC ICE in expand_expr_real_1 (expr.cc:11376) + // when consuming_buffers spans a co_yield, so + // the GCC path uses a separate simple coroutine. + consuming_buffers cb(buffers); + while(total_written < total_size) + { + auto inner = stream_.write_some(cb); + if(!inner.await_ready()) + break; + auto r = inner.await_resume(); + if(r.ec) + co_return io_result{ + r.ec, total_written}; + cb.consume(r.t1); + total_written += r.t1; + } + + if(total_written >= total_size) + co_return io_result{ + {}, total_written}; + + co_yield 0; + + while(total_written < total_size) + { + auto r = + co_await stream_.write_some(cb); + if(r.ec) + co_return io_result{ + r.ec, total_written}; + cb.consume(r.t1); + total_written += r.t1; + } + co_return io_result{ + {}, total_written}; + } +#endif + +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic pop +#endif +}; + +template +write_now(S&) -> write_now; + +} // namespace capy +} // namespace boost + +#endif diff --git a/include/boost/capy/read.hpp b/include/boost/capy/read.hpp index 73d403cf..41ca5cc6 100644 --- a/include/boost/capy/read.hpp +++ b/include/boost/capy/read.hpp @@ -12,8 +12,7 @@ #include #include -#include -#include +#include #include #include #include @@ -60,7 +59,7 @@ namespace capy { auto [ec, n] = co_await read( stream, mutable_buffer( header ) ); if( ec == cond::eof ) co_return; // Connection closed - if( ec.failed() ) + if( ec ) detail::throw_system_error( ec ); // header contains exactly 16 bytes } @@ -72,7 +71,7 @@ auto read( ReadStream auto& stream, MutableBufferSequence auto const& buffers) -> - task> + io_task { consuming_buffers consuming(buffers); std::size_t const total_size = buffer_size(buffers); @@ -123,7 +122,7 @@ read( { std::string body; auto [ec, n] = co_await read( stream, string_dynamic_buffer( &body ) ); - if( ec.failed() ) + if( ec ) detail::throw_system_error( ec ); return body; } @@ -136,7 +135,7 @@ read( ReadStream auto& stream, DynamicBufferParam auto&& buffers, std::size_t initial_amount = 2048) -> - task> + io_task { std::size_t amount = initial_amount; std::size_t total_read = 0; @@ -189,7 +188,7 @@ read( { std::string body; auto [ec, n] = co_await read( source, string_dynamic_buffer( &body ) ); - if( ec.failed() ) + if( ec ) detail::throw_system_error( ec ); return body; } @@ -202,7 +201,7 @@ read( ReadSource auto& source, DynamicBufferParam auto&& buffers, std::size_t initial_amount = 2048) -> - task> + io_task { std::size_t amount = initial_amount; std::size_t total_read = 0; diff --git a/include/boost/capy/read_until.hpp b/include/boost/capy/read_until.hpp index 43b1b321..5486c3f7 100644 --- a/include/boost/capy/read_until.hpp +++ b/include/boost/capy/read_until.hpp @@ -96,7 +96,7 @@ read_until_match_impl( auto [ec, n] = co_await stream.read_some(mb); buffers.commit(n); - if(n > 0) + if(!ec) { auto pos = search_buffer_for_match(buffers.data(), match); if(pos != std::string_view::npos) @@ -273,7 +273,7 @@ struct match_delim *hint = 3; // partial "\r\n\r" possible return std::string_view::npos; } ); - if( ec.failed() ) + if( ec ) detail::throw_system_error( ec ); // header contains data through "\r\n\r\n" } @@ -343,7 +343,7 @@ read_until( stream, string_dynamic_buffer( &line ), "\r\n" ); if( ec == cond::eof ) co_return line; // partial line at EOF - if( ec.failed() ) + if( ec ) detail::throw_system_error( ec ); line.resize( n - 2 ); // remove "\r\n" co_return line; diff --git a/include/boost/capy/task.hpp b/include/boost/capy/task.hpp index ddb00085..dc54f84f 100644 --- a/include/boost/capy/task.hpp +++ b/include/boost/capy/task.hpp @@ -79,7 +79,7 @@ struct task_return_base task compute_value() { auto [ec, n] = co_await stream.read_some( buf ); - if( ec.failed() ) + if( ec ) co_return 0; co_return process( buf, n ); } diff --git a/include/boost/capy/test/buffer_sink.hpp b/include/boost/capy/test/buffer_sink.hpp index f1dfd1e2..a229633e 100644 --- a/include/boost/capy/test/buffer_sink.hpp +++ b/include/boost/capy/test/buffer_sink.hpp @@ -68,7 +68,7 @@ namespace test { */ class buffer_sink { - fuse* f_; + fuse f_; std::string data_; std::string prepare_buf_; std::size_t prepare_size_ = 0; @@ -84,9 +84,9 @@ class buffer_sink Use to simulate limited buffer space. */ explicit buffer_sink( - fuse& f, + fuse f = {}, std::size_t max_prepare_size = 4096) noexcept - : f_(&f) + : f_(std::move(f)) , max_prepare_size_(max_prepare_size) { prepare_buf_.resize(max_prepare_size_); @@ -186,7 +186,7 @@ class buffer_sink io_result<> await_resume() { - auto ec = self_->f_->maybe_fail(); + auto ec = self_->f_.maybe_fail(); if(ec) return {ec}; @@ -200,26 +200,26 @@ class buffer_sink return awaitable{this, n}; } - /** Commit bytes written with optional end-of-stream. + /** Commit final bytes and signal end-of-stream. Transfers `n` bytes from the prepared buffer to the internal - data buffer. If `eof` is true, marks the sink as finalized. + data buffer and marks the sink as finalized. Before committing, + the attached @ref fuse is consulted to possibly inject an error + for testing fault scenarios. @param n The number of bytes to commit. - @param eof If true, signals end-of-stream after committing. @return An awaitable yielding `(error_code)`. @see fuse */ auto - commit(std::size_t n, bool eof) + commit_eof(std::size_t n) { struct awaitable { buffer_sink* self_; std::size_t n_; - bool eof_; bool await_ready() const noexcept { return true; } @@ -236,7 +236,7 @@ class buffer_sink io_result<> await_resume() { - auto ec = self_->f_->maybe_fail(); + auto ec = self_->f_.maybe_fail(); if(ec) return {ec}; @@ -244,56 +244,11 @@ class buffer_sink self_->data_.append(self_->prepare_buf_.data(), to_commit); self_->prepare_size_ = 0; - if(eof_) - self_->eof_called_ = true; - - return {}; - } - }; - return awaitable{this, n, eof}; - } - - /** Signal end-of-stream. - - Marks the sink as finalized, indicating no more data will be - written. Before signaling, the attached @ref fuse is consulted - to possibly inject an error for testing fault scenarios. - - @return An awaitable yielding `(error_code)`. - - @see fuse - */ - auto - commit_eof() - { - struct awaitable - { - buffer_sink* self_; - - bool await_ready() const noexcept { return true; } - - // This method is required to satisfy Capy's IoAwaitable concept, - // but is never called because await_ready() returns true. - // See the comment on commit(std::size_t) for a detailed explanation. - void await_suspend( - coro, - executor_ref, - std::stop_token) const noexcept - { - } - - io_result<> - await_resume() - { - auto ec = self_->f_->maybe_fail(); - if(ec) - return {ec}; - self_->eof_called_ = true; return {}; } }; - return awaitable{this}; + return awaitable{this, n}; } }; diff --git a/include/boost/capy/test/buffer_source.hpp b/include/boost/capy/test/buffer_source.hpp index 372f608c..9bf9a9e2 100644 --- a/include/boost/capy/test/buffer_source.hpp +++ b/include/boost/capy/test/buffer_source.hpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -50,11 +51,11 @@ namespace test { auto r = f.armed( [&]( fuse& ) -> task { const_buffer arr[16]; - auto [ec, count] = co_await bs.pull( arr, 16 ); + auto [ec, bufs] = co_await bs.pull( arr ); if( ec ) co_return; - // arr[0..count) contains buffer descriptors - std::size_t n = buffer_size( std::span( arr, count ) ); + // bufs contains buffer descriptors + std::size_t n = buffer_size( bufs ); bs.consume( n ); } ); @endcode @@ -63,7 +64,7 @@ namespace test { */ class buffer_source { - fuse* f_; + fuse f_; std::string data_; std::size_t pos_ = 0; std::size_t max_pull_size_; @@ -77,9 +78,9 @@ class buffer_source Use to simulate chunked delivery. */ explicit buffer_source( - fuse& f, + fuse f = {}, std::size_t max_pull_size = std::size_t(-1)) noexcept - : f_(&f) + : f_(std::move(f)) , max_pull_size_(max_pull_size) { } @@ -174,12 +175,12 @@ class buffer_source io_result> await_resume() { - auto ec = self_->f_->maybe_fail(); + auto ec = self_->f_.maybe_fail(); if(ec) return {ec, {}}; if(self_->pos_ >= self_->data_.size()) - return {{}, {}}; // Source exhausted + return {error::eof, {}}; std::size_t avail = self_->data_.size() - self_->pos_; std::size_t to_return = (std::min)(avail, self_->max_pull_size_); diff --git a/include/boost/capy/test/read_source.hpp b/include/boost/capy/test/read_source.hpp index d279556f..2493dcac 100644 --- a/include/boost/capy/test/read_source.hpp +++ b/include/boost/capy/test/read_source.hpp @@ -35,9 +35,10 @@ namespace test { to consume it. The associated @ref fuse enables error injection at controlled points. - Unlike @ref read_stream which provides partial reads via `read_some`, - this class satisfies the @ref ReadSource concept by providing complete - reads that fill the entire buffer sequence before returning. + This class satisfies the @ref ReadSource concept by providing both + partial reads via `read_some` (satisfying @ref ReadStream) and + complete reads via `read` that fill the entire buffer sequence + before returning. @par Thread Safety Not thread-safe. @@ -63,7 +64,7 @@ namespace test { */ class read_source { - fuse* f_; + fuse f_; std::string data_; std::size_t pos_ = 0; std::size_t max_read_size_; @@ -77,9 +78,9 @@ class read_source Use to simulate chunked delivery. */ explicit read_source( - fuse& f, + fuse f = {}, std::size_t max_read_size = std::size_t(-1)) noexcept - : f_(&f) + : f_(std::move(f)) , max_read_size_(max_read_size) { } @@ -111,22 +112,74 @@ class read_source return data_.size() - pos_; } - /** Asynchronously read data from the source. + /** Asynchronously read some data from the source. Transfers up to `buffer_size( buffers )` bytes from the internal - buffer to the provided mutable buffer sequence, filling buffers - completely before returning. If no data remains, returns - `error::eof`. Before every read, the attached @ref fuse is - consulted to possibly inject an error for testing fault scenarios. - The returned `std::size_t` is the number of bytes transferred. + buffer to the provided mutable buffer sequence. If no data + remains, returns `error::eof`. Before every read, the attached + @ref fuse is consulted to possibly inject an error for testing + fault scenarios. + + @param buffers The mutable buffer sequence to receive data. + + @return An awaitable yielding `(error_code,std::size_t)`. + + @see fuse + */ + template + auto + read_some(MB buffers) + { + struct awaitable + { + read_source* self_; + MB buffers_; + + bool await_ready() const noexcept { return true; } + + void await_suspend( + coro, + executor_ref, + std::stop_token) const noexcept + { + } + + io_result + await_resume() + { + if(buffer_empty(buffers_)) + return {{}, 0}; + + auto ec = self_->f_.maybe_fail(); + if(ec) + return {ec, 0}; + + if(self_->pos_ >= self_->data_.size()) + return {error::eof, 0}; + + std::size_t avail = self_->data_.size() - self_->pos_; + if(avail > self_->max_read_size_) + avail = self_->max_read_size_; + auto src = make_buffer(self_->data_.data() + self_->pos_, avail); + std::size_t const n = buffer_copy(buffers_, src); + self_->pos_ += n; + return {{}, n}; + } + }; + return awaitable{this, buffers}; + } - @par Effects - On success, advances the internal read position by the number of - bytes copied. If an error is injected by the fuse, the read position - remains unchanged. + /** Asynchronously read data from the source. + + Fills the entire buffer sequence from the internal data. + If the available data is less than the buffer size, returns + `error::eof` with the number of bytes transferred. Before + every read, the attached @ref fuse is consulted to possibly + inject an error for testing fault scenarios. - @par Exception Safety - No-throw guarantee. + Unlike @ref read_some, this ignores `max_read_size` and + transfers all available data in a single operation, matching + the @ref ReadSource semantic contract. @param buffers The mutable buffer sequence to receive data. @@ -145,17 +198,6 @@ class read_source bool await_ready() const noexcept { return true; } - // This method is required to satisfy Capy's IoAwaitable concept, - // but is never called because await_ready() returns true. - // - // Capy uses a two-layer awaitable system: the promise's - // await_transform wraps awaitables in a transform_awaiter whose - // standard await_suspend(coroutine_handle) calls this custom - // 3-argument overload, passing the executor and stop_token from - // the coroutine's context. For synchronous test awaitables like - // this one, the coroutine never suspends, so this is not invoked. - // The signature exists to allow the same awaitable type to work - // with both synchronous (test) and asynchronous (real I/O) code. void await_suspend( coro, executor_ref, @@ -166,7 +208,10 @@ class read_source io_result await_resume() { - auto ec = self_->f_->maybe_fail(); + if(buffer_empty(buffers_)) + return {{}, 0}; + + auto ec = self_->f_.maybe_fail(); if(ec) return {ec, 0}; @@ -174,11 +219,12 @@ class read_source return {error::eof, 0}; std::size_t avail = self_->data_.size() - self_->pos_; - if(avail > self_->max_read_size_) - avail = self_->max_read_size_; auto src = make_buffer(self_->data_.data() + self_->pos_, avail); std::size_t const n = buffer_copy(buffers_, src); self_->pos_ += n; + + if(n < buffer_size(buffers_)) + return {error::eof, n}; return {{}, n}; } }; diff --git a/include/boost/capy/test/read_stream.hpp b/include/boost/capy/test/read_stream.hpp index 4927f93e..6f6e7921 100644 --- a/include/boost/capy/test/read_stream.hpp +++ b/include/boost/capy/test/read_stream.hpp @@ -36,6 +36,8 @@ namespace test { at controlled points. An optional `max_read_size` constructor parameter limits bytes per read to simulate chunked delivery. + This class satisfies the @ref ReadStream concept. + @par Thread Safety Not thread-safe. @@ -56,11 +58,11 @@ namespace test { } ); @endcode - @see fuse + @see fuse, ReadStream */ class read_stream { - fuse* f_; + fuse f_; std::string data_; std::size_t pos_ = 0; std::size_t max_read_size_; @@ -74,9 +76,9 @@ class read_stream Use to simulate chunked network delivery. */ explicit read_stream( - fuse& f, + fuse f = {}, std::size_t max_read_size = std::size_t(-1)) noexcept - : f_(&f) + : f_(std::move(f)) , max_read_size_(max_read_size) { } @@ -162,7 +164,12 @@ class read_stream io_result await_resume() { - auto ec = self_->f_->maybe_fail(); + // Empty buffer is a no-op regardless of + // stream state or fuse. + if(buffer_empty(buffers_)) + return {{}, 0}; + + auto ec = self_->f_.maybe_fail(); if(ec) return {ec, 0}; diff --git a/include/boost/capy/test/stream.hpp b/include/boost/capy/test/stream.hpp index 475fbf6d..ec77b033 100644 --- a/include/boost/capy/test/stream.hpp +++ b/include/boost/capy/test/stream.hpp @@ -18,154 +18,196 @@ #include #include #include +#include +#include #include +#include -#include +#include #include #include #include +#include namespace boost { namespace capy { namespace test { -/** A mock stream for testing both read and write operations. +/** A connected stream for testing bidirectional I/O. - Use this to verify code that performs reads and writes without - needing real I/O. Call @ref provide to supply data for reads, - then @ref read_some to consume it. Call @ref write_some to write - data, then @ref data to retrieve what was written. The associated - @ref fuse enables error injection at controlled points. Optional - `max_read_size` and `max_write_size` constructor parameters limit - bytes per operation to simulate chunked delivery. + Streams are created in pairs via @ref make_stream_pair. + Data written to one end becomes available for reading on + the other. If no data is available when @ref read_some + is called, the calling coroutine suspends until the peer + calls @ref write_some. The shared @ref fuse enables error + injection at controlled points in both directions. + + When the fuse injects an error or throws on one end, the + other end is automatically closed: any suspended reader is + resumed with `error::eof`, and subsequent operations on + both ends return `error::eof`. Calling @ref close on one + end signals eof to the peer's reads after draining any + buffered data, while the peer may still write. @par Thread Safety - Not thread-safe. + Single-threaded only. Both ends of the pair must be + accessed from the same thread. Concurrent access is + undefined behavior. @par Example @code fuse f; - stream s( f ); - s.provide( "Hello, " ); - s.provide( "World!" ); + auto [a, b] = make_stream_pair( f ); - auto r = f.armed( [&]( fuse& ) -> task { - char buf[32]; - auto [ec, n] = co_await s.read_some( - mutable_buffer( buf, sizeof( buf ) ) ); + auto r = f.armed( [&]( fuse& ) -> task<> { + auto [ec, n] = co_await a.write_some( + const_buffer( "hello", 5 ) ); if( ec ) co_return; - // buf contains "Hello, World!" - auto [ec2, n2] = co_await s.write_some( - const_buffer( "Response", 8 ) ); + char buf[32]; + auto [ec2, n2] = co_await b.read_some( + mutable_buffer( buf, sizeof( buf ) ) ); if( ec2 ) co_return; - // s.data() returns "Response" + // buf contains "hello" } ); @endcode - @see fuse, read_stream, write_stream + @see make_stream_pair, fuse */ class stream { - fuse* f_; - std::string read_data_; - std::size_t read_pos_ = 0; - std::string write_data_; - std::string expect_; - std::size_t max_read_size_; - std::size_t max_write_size_; - - std::error_code - consume_match_() noexcept - { - if(write_data_.empty() || expect_.empty()) - return {}; - std::size_t const n = (std::min)(write_data_.size(), expect_.size()); - if(std::string_view(write_data_.data(), n) != - std::string_view(expect_.data(), n)) - return error::test_failure; - write_data_.erase(0, n); - expect_.erase(0, n); - return {}; - } + // Single-threaded only. No concurrent access to either + // end of the pair. Both streams and all operations must + // run on the same thread. -public: - /** Construct a stream. - - @param f The fuse used to inject errors during operations. + struct half + { + std::string buf; + std::size_t max_read_size = std::size_t(-1); + coro pending_h{}; + executor_ref pending_ex; + bool eof = false; + }; + + struct state + { + fuse f; + bool closed = false; + half sides[2]; - @param max_read_size Maximum bytes returned per read. - Use to simulate chunked network delivery. + explicit state(fuse f_) noexcept + : f(std::move(f_)) + { + } - @param max_write_size Maximum bytes transferred per write. - Use to simulate chunked network delivery. - */ - explicit stream( - fuse& f, - std::size_t max_read_size = std::size_t(-1), - std::size_t max_write_size = std::size_t(-1)) noexcept - : f_(&f) - , max_read_size_(max_read_size) - , max_write_size_(max_write_size) + // Set closed and resume any suspended readers + // with eof on both sides. + void close() noexcept + { + closed = true; + for(auto& side : sides) + { + if(side.pending_h) + { + auto h = side.pending_h; + side.pending_h = {}; + auto ex = side.pending_ex; + side.pending_ex = {}; + ex.dispatch(h); + } + } + } + }; + + // Wraps the maybe_fail() call. If the guard is + // not disarmed before destruction (fuse returned + // an error, or threw an exception), closes both + // ends so any suspended peer gets eof. + struct close_guard + { + state* st; + bool armed = true; + void disarm() noexcept { armed = false; } + ~close_guard() { if(armed) st->close(); } + }; + + std::shared_ptr state_; + int index_; + + stream( + std::shared_ptr sp, + int index) noexcept + : state_(std::move(sp)) + , index_(index) { } - //-------------------------------------------- - // - // Read operations - // - //-------------------------------------------- - - /** Append data to be returned by subsequent reads. + friend std::pair + make_stream_pair(fuse); - Multiple calls accumulate data that @ref read_some returns. - - @param sv The data to append. +public: + stream(stream const&) = delete; + stream& operator=(stream const&) = delete; + stream(stream&&) = default; + stream& operator=(stream&&) = default; + + /** Signal end-of-stream to the peer. + + Marks the peer's read direction as closed. + If the peer is suspended in @ref read_some, + it is resumed. The peer drains any buffered + data before receiving `error::eof`. Writes + from the peer are unaffected. */ void - provide(std::string_view sv) + close() noexcept { - read_data_.append(sv); + int peer = 1 - index_; + auto& side = state_->sides[peer]; + side.eof = true; + if(side.pending_h) + { + auto h = side.pending_h; + side.pending_h = {}; + auto ex = side.pending_ex; + side.pending_ex = {}; + ex.dispatch(h); + } } - /// Clear all read data and reset the read position. - void - clear() noexcept - { - read_data_.clear(); - read_pos_ = 0; - } + /** Set the maximum bytes returned per read. + + Limits how many bytes @ref read_some returns in + a single call, simulating chunked network delivery. + The default is unlimited. - /// Return the number of bytes available for reading. - std::size_t - available() const noexcept + @param n Maximum bytes per read. + */ + void + set_max_read_size(std::size_t n) noexcept { - return read_data_.size() - read_pos_; + state_->sides[index_].max_read_size = n; } /** Asynchronously read data from the stream. - Transfers up to `buffer_size( buffers )` bytes from the internal - buffer to the provided mutable buffer sequence. If no data remains, - returns `error::eof`. Before every read, the attached @ref fuse is - consulted to possibly inject an error for testing fault scenarios. - The returned `std::size_t` is the number of bytes transferred. - - @par Effects - On success, advances the internal read position by the number of - bytes copied. If an error is injected by the fuse, the read position - remains unchanged. - - @par Exception Safety - No-throw guarantee. + Transfers up to `buffer_size(buffers)` bytes from + data written by the peer. If no data is available, + the calling coroutine suspends until the peer calls + @ref write_some. Before every read, the attached + @ref fuse is consulted to possibly inject an error. + If the fuse fires, the peer is automatically closed. + If the stream is closed, returns `error::eof`. + The returned `std::size_t` is the number of bytes + transferred. @param buffers The mutable buffer sequence to receive data. @return An awaitable yielding `(error_code,std::size_t)`. - @see fuse + @see fuse, close */ template auto @@ -176,107 +218,80 @@ class stream stream* self_; MB buffers_; - bool await_ready() const noexcept { return true; } + bool await_ready() const noexcept + { + if(buffer_empty(buffers_)) + return true; + auto* st = self_->state_.get(); + auto& side = st->sides[self_->index_]; + return st->closed || side.eof || + !side.buf.empty(); + } - // This method is required to satisfy Capy's IoAwaitable concept, - // but is never called because await_ready() returns true. - // - // Capy uses a two-layer awaitable system: the promise's - // await_transform wraps awaitables in a transform_awaiter whose - // standard await_suspend(coroutine_handle) calls this custom - // 3-argument overload, passing the executor and stop_token from - // the coroutine's context. For synchronous test awaitables like - // this one, the coroutine never suspends, so this is not invoked. - // The signature exists to allow the same awaitable type to work - // with both synchronous (test) and asynchronous (real I/O) code. - void await_suspend( - coro, - executor_ref, - std::stop_token) const noexcept + coro await_suspend( + coro h, + executor_ref ex, + std::stop_token) noexcept { + auto& side = self_->state_->sides[ + self_->index_]; + side.pending_h = h; + side.pending_ex = ex; + return std::noop_coroutine(); } io_result await_resume() { - auto ec = self_->f_->maybe_fail(); - if(ec) - return {ec, 0}; + if(buffer_empty(buffers_)) + return {{}, 0}; + + auto* st = self_->state_.get(); + auto& side = st->sides[ + self_->index_]; - if(self_->read_pos_ >= self_->read_data_.size()) + if(st->closed) return {error::eof, 0}; - std::size_t avail = self_->read_data_.size() - self_->read_pos_; - if(avail > self_->max_read_size_) - avail = self_->max_read_size_; - auto src = make_buffer( - self_->read_data_.data() + self_->read_pos_, avail); - std::size_t const n = buffer_copy(buffers_, src); - self_->read_pos_ += n; + if(side.eof && side.buf.empty()) + return {error::eof, 0}; + + if(!side.eof) + { + close_guard g{st}; + auto ec = st->f.maybe_fail(); + if(ec) + return {ec, 0}; + g.disarm(); + } + + std::size_t const n = buffer_copy( + buffers_, make_buffer(side.buf), + side.max_read_size); + side.buf.erase(0, n); return {{}, n}; } }; return awaitable{this, buffers}; } - //-------------------------------------------- - // - // Write operations - // - //-------------------------------------------- - - /// Return the written data as a string view. - std::string_view - data() const noexcept - { - return write_data_; - } - - /** Set the expected data for subsequent writes. - - Stores the expected data and immediately tries to match - against any data already written. Matched data is consumed - from both buffers. - - @param sv The expected data. - - @return An error if existing data does not match. - */ - std::error_code - expect(std::string_view sv) - { - expect_.assign(sv); - return consume_match_(); - } - - /// Return the number of bytes written. - std::size_t - size() const noexcept - { - return write_data_.size(); - } - /** Asynchronously write data to the stream. - Transfers up to `buffer_size( buffers )` bytes from the provided - const buffer sequence to the internal buffer. Before every write, - the attached @ref fuse is consulted to possibly inject an error - for testing fault scenarios. The returned `std::size_t` is the - number of bytes transferred. - - @par Effects - On success, appends the written bytes to the internal buffer. - If an error is injected by the fuse, the internal buffer remains - unchanged. - - @par Exception Safety - No-throw guarantee. + Transfers up to `buffer_size(buffers)` bytes to the + peer's incoming buffer. If the peer is suspended in + @ref read_some, it is resumed. Before every write, + the attached @ref fuse is consulted to possibly inject + an error. If the fuse fires, the peer is automatically + closed. If the stream is closed, returns `error::eof`. + The returned `std::size_t` is the number of bytes + transferred. - @param buffers The const buffer sequence containing data to write. + @param buffers The const buffer sequence containing + data to write. @return An awaitable yielding `(error_code,std::size_t)`. - @see fuse + @see fuse, close */ template auto @@ -289,17 +304,6 @@ class stream bool await_ready() const noexcept { return true; } - // This method is required to satisfy Capy's IoAwaitable concept, - // but is never called because await_ready() returns true. - // - // Capy uses a two-layer awaitable system: the promise's - // await_transform wraps awaitables in a transform_awaiter whose - // standard await_suspend(coroutine_handle) calls this custom - // 3-argument overload, passing the executor and stop_token from - // the coroutine's context. For synchronous test awaitables like - // this one, the coroutine never suspends, so this is not invoked. - // The signature exists to allow the same awaitable type to work - // with both synchronous (test) and asynchronous (real I/O) code. void await_suspend( coro, executor_ref, @@ -310,31 +314,151 @@ class stream io_result await_resume() { - auto ec = self_->f_->maybe_fail(); - if(ec) - return {ec, 0}; - std::size_t n = buffer_size(buffers_); - n = (std::min)(n, self_->max_write_size_); if(n == 0) return {{}, 0}; - std::size_t const old_size = self_->write_data_.size(); - self_->write_data_.resize(old_size + n); - buffer_copy(make_buffer( - self_->write_data_.data() + old_size, n), buffers_, n); + auto* st = self_->state_.get(); - ec = self_->consume_match_(); + if(st->closed) + return {error::eof, 0}; + + close_guard g{st}; + auto ec = st->f.maybe_fail(); if(ec) - return {ec, n}; + return {ec, 0}; + g.disarm(); + + int peer = 1 - self_->index_; + auto& side = st->sides[peer]; + + std::size_t const old_size = side.buf.size(); + side.buf.resize(old_size + n); + buffer_copy(make_buffer( + side.buf.data() + old_size, n), + buffers_, n); + + if(side.pending_h) + { + auto h = side.pending_h; + side.pending_h = {}; + auto ex = side.pending_ex; + side.pending_ex = {}; + ex.dispatch(h); + } return {{}, n}; } }; return awaitable{this, buffers}; } + + /** Inject data into this stream's peer for reading. + + Appends data directly to the peer's incoming buffer, + bypassing the fuse. If the peer is suspended in + @ref read_some, it is resumed. This is test setup, + not an operation under test. + + @param sv The data to inject. + + @see make_stream_pair + */ + void + provide(std::string_view sv) + { + int peer = 1 - index_; + auto& side = state_->sides[peer]; + side.buf.append(sv); + if(side.pending_h) + { + auto h = side.pending_h; + side.pending_h = {}; + auto ex = side.pending_ex; + side.pending_ex = {}; + ex.dispatch(h); + } + } + + /** Read from this stream and verify the content. + + Reads exactly `expected.size()` bytes from the stream + and compares against the expected string. The read goes + through the normal path including the fuse. + + @param expected The expected content. + + @return A pair of `(error_code, bool)`. The error_code + is set if a read error occurs (e.g. fuse injection). + The bool is true if the data matches. + + @see provide + */ + std::pair + expect(std::string_view expected) + { + std::error_code result; + bool match = false; + run_blocking()([]( + stream& self, + std::string_view expected, + std::error_code& result, + bool& match) -> task<> + { + std::string buf(expected.size(), '\0'); + auto [ec, n] = co_await read( + self, mutable_buffer( + buf.data(), buf.size())); + if(ec) + { + result = ec; + co_return; + } + match = (std::string_view( + buf.data(), n) == expected); + }(*this, expected, result, match)); + return {result, match}; + } + + /** Return the stream's pending read data. + + Returns a view of the data waiting to be read + from this stream. This is a direct peek at the + internal buffer, bypassing the fuse. + + @return A view of the pending data. + + @see provide, expect + */ + std::string_view + data() const noexcept + { + return state_->sides[index_].buf; + } }; +/** Create a connected pair of test streams. + + Data written to one stream becomes readable on the other. + If a coroutine calls @ref stream::read_some when no data + is available, it suspends until the peer writes. Before + every read or write, the @ref fuse is consulted to + possibly inject an error for testing fault scenarios. + When the fuse fires, the peer is automatically closed. + + @param f The fuse used to inject errors during operations. + + @return A pair of connected streams. + + @see stream, fuse +*/ +inline std::pair +make_stream_pair(fuse f = {}) +{ + auto sp = std::make_shared(std::move(f)); + return {stream(sp, 0), stream(sp, 1)}; +} + } // test } // capy } // boost diff --git a/include/boost/capy/test/write_sink.hpp b/include/boost/capy/test/write_sink.hpp index 53549ddf..98ef9548 100644 --- a/include/boost/capy/test/write_sink.hpp +++ b/include/boost/capy/test/write_sink.hpp @@ -36,9 +36,9 @@ namespace test { what was written. The associated @ref fuse enables error injection at controlled points. - Unlike @ref write_stream which provides partial writes via `write_some`, - this class satisfies the @ref WriteSink concept by providing complete - writes and EOF signaling. + This class satisfies the @ref WriteSink concept by providing partial + writes via `write_some` (satisfying @ref WriteStream), complete + writes via `write`, and EOF signaling via `write_eof`. @par Thread Safety Not thread-safe. @@ -64,7 +64,7 @@ namespace test { */ class write_sink { - fuse* f_; + fuse f_; std::string data_; std::string expect_; std::size_t max_write_size_; @@ -93,9 +93,9 @@ class write_sink Use to simulate chunked delivery. */ explicit write_sink( - fuse& f, + fuse f = {}, std::size_t max_write_size = std::size_t(-1)) noexcept - : f_(&f) + : f_(std::move(f)) , max_write_size_(max_write_size) { } @@ -147,21 +147,73 @@ class write_sink eof_called_ = false; } - /** Asynchronously write data to the sink. + /** Asynchronously write some data to the sink. - Transfers all bytes from the provided const buffer sequence to - the internal buffer. Before every write, the attached @ref fuse - is consulted to possibly inject an error for testing fault - scenarios. The returned `std::size_t` is the number of bytes - transferred. + Transfers up to `buffer_size( buffers )` bytes from the provided + const buffer sequence to the internal buffer. Before every write, + the attached @ref fuse is consulted to possibly inject an error. - @par Effects - On success, appends the written bytes to the internal buffer. - If an error is injected by the fuse, the internal buffer remains - unchanged. + @param buffers The const buffer sequence containing data to write. - @par Exception Safety - No-throw guarantee. + @return An awaitable yielding `(error_code,std::size_t)`. + + @see fuse + */ + template + auto + write_some(CB buffers) + { + struct awaitable + { + write_sink* self_; + CB buffers_; + + bool await_ready() const noexcept { return true; } + + void await_suspend( + coro, + executor_ref, + std::stop_token) const noexcept + { + } + + io_result + await_resume() + { + if(buffer_empty(buffers_)) + return {{}, 0}; + + auto ec = self_->f_.maybe_fail(); + if(ec) + return {ec, 0}; + + std::size_t n = buffer_size(buffers_); + n = (std::min)(n, self_->max_write_size_); + + std::size_t const old_size = self_->data_.size(); + self_->data_.resize(old_size + n); + buffer_copy(make_buffer( + self_->data_.data() + old_size, n), buffers_, n); + + ec = self_->consume_match_(); + if(ec) + { + self_->data_.resize(old_size); + return {ec, 0}; + } + + return {{}, n}; + } + }; + return awaitable{this, buffers}; + } + + /** Asynchronously write data to the sink. + + Transfers all bytes from the provided const buffer sequence + to the internal buffer. Unlike @ref write_some, this ignores + `max_write_size` and writes all available data, matching the + @ref WriteSink semantic contract. @param buffers The const buffer sequence containing data to write. @@ -180,17 +232,6 @@ class write_sink bool await_ready() const noexcept { return true; } - // This method is required to satisfy Capy's IoAwaitable concept, - // but is never called because await_ready() returns true. - // - // Capy uses a two-layer awaitable system: the promise's - // await_transform wraps awaitables in a transform_awaiter whose - // standard await_suspend(coroutine_handle) calls this custom - // 3-argument overload, passing the executor and stop_token from - // the coroutine's context. For synchronous test awaitables like - // this one, the coroutine never suspends, so this is not invoked. - // The signature exists to allow the same awaitable type to work - // with both synchronous (test) and asynchronous (real I/O) code. void await_suspend( coro, executor_ref, @@ -201,19 +242,18 @@ class write_sink io_result await_resume() { - auto ec = self_->f_->maybe_fail(); + auto ec = self_->f_.maybe_fail(); if(ec) return {ec, 0}; std::size_t n = buffer_size(buffers_); - n = (std::min)(n, self_->max_write_size_); if(n == 0) return {{}, 0}; std::size_t const old_size = self_->data_.size(); self_->data_.resize(old_size + n); buffer_copy(make_buffer( - self_->data_.data() + old_size, n), buffers_, n); + self_->data_.data() + old_size, n), buffers_); ec = self_->consume_match_(); if(ec) @@ -225,17 +265,16 @@ class write_sink return awaitable{this, buffers}; } - /** Asynchronously write data to the sink with optional EOF. + /** Atomically write data and signal end-of-stream. Transfers all bytes from the provided const buffer sequence to - the internal buffer, optionally signaling end-of-stream. Before - every write, the attached @ref fuse is consulted to possibly - inject an error for testing fault scenarios. The returned - `std::size_t` is the number of bytes transferred. + the internal buffer and signals end-of-stream. Before the write, + the attached @ref fuse is consulted to possibly inject an error + for testing fault scenarios. @par Effects - On success, appends the written bytes to the internal buffer. - If `eof` is `true`, marks the sink as finalized. + On success, appends the written bytes to the internal buffer + and marks the sink as finalized. If an error is injected by the fuse, the internal buffer remains unchanged. @@ -243,7 +282,6 @@ class write_sink No-throw guarantee. @param buffers The const buffer sequence containing data to write. - @param eof If true, signals end-of-stream after writing. @return An awaitable yielding `(error_code,std::size_t)`. @@ -251,19 +289,15 @@ class write_sink */ template auto - write(CB buffers, bool eof) + write_eof(CB buffers) { struct awaitable { write_sink* self_; CB buffers_; - bool eof_; bool await_ready() const noexcept { return true; } - // This method is required to satisfy Capy's IoAwaitable concept, - // but is never called because await_ready() returns true. - // See the comment on write(CB buffers) for a detailed explanation. void await_suspend( coro, executor_ref, @@ -274,31 +308,29 @@ class write_sink io_result await_resume() { - auto ec = self_->f_->maybe_fail(); + auto ec = self_->f_.maybe_fail(); if(ec) return {ec, 0}; std::size_t n = buffer_size(buffers_); - n = (std::min)(n, self_->max_write_size_); if(n > 0) { std::size_t const old_size = self_->data_.size(); self_->data_.resize(old_size + n); buffer_copy(make_buffer( - self_->data_.data() + old_size, n), buffers_, n); + self_->data_.data() + old_size, n), buffers_); ec = self_->consume_match_(); if(ec) return {ec, n}; } - if(eof_) - self_->eof_called_ = true; + self_->eof_called_ = true; return {{}, n}; } }; - return awaitable{this, buffers, eof}; + return awaitable{this, buffers}; } /** Signal end-of-stream. @@ -340,7 +372,7 @@ class write_sink io_result<> await_resume() { - auto ec = self_->f_->maybe_fail(); + auto ec = self_->f_.maybe_fail(); if(ec) return {ec}; diff --git a/include/boost/capy/test/write_stream.hpp b/include/boost/capy/test/write_stream.hpp index 99271252..26b301ee 100644 --- a/include/boost/capy/test/write_stream.hpp +++ b/include/boost/capy/test/write_stream.hpp @@ -32,11 +32,13 @@ namespace test { /** A mock stream for testing write operations. Use this to verify code that performs writes without needing - real I/O. Call @ref write_some to write data, then @ref str - or @ref data to retrieve what was written. The associated - @ref fuse enables error injection at controlled points. An - optional `max_write_size` constructor parameter limits bytes - per write to simulate chunked delivery. + real I/O. Call @ref write_some to write data, then @ref data + to retrieve what was written. The associated @ref fuse enables + error injection at controlled points. An optional + `max_write_size` constructor parameter limits bytes per write + to simulate chunked delivery. + + This class satisfies the @ref WriteStream concept. @par Thread Safety Not thread-safe. @@ -51,15 +53,15 @@ namespace test { const_buffer( "Hello", 5 ) ); if( ec ) co_return; - // ws.str() returns "Hello" + // ws.data() returns "Hello" } ); @endcode - @see fuse + @see fuse, WriteStream */ class write_stream { - fuse* f_; + fuse f_; std::string data_; std::string expect_; std::size_t max_write_size_; @@ -87,9 +89,9 @@ class write_stream Use to simulate chunked network delivery. */ explicit write_stream( - fuse& f, + fuse f = {}, std::size_t max_write_size = std::size_t(-1)) noexcept - : f_(&f) + : f_(std::move(f)) , max_write_size_(max_write_size) { } @@ -179,14 +181,15 @@ class write_stream io_result await_resume() { - auto ec = self_->f_->maybe_fail(); + if(buffer_empty(buffers_)) + return {{}, 0}; + + auto ec = self_->f_.maybe_fail(); if(ec) return {ec, 0}; std::size_t n = buffer_size(buffers_); n = (std::min)(n, self_->max_write_size_); - if(n == 0) - return {{}, 0}; std::size_t const old_size = self_->data_.size(); self_->data_.resize(old_size + n); @@ -195,7 +198,10 @@ class write_stream ec = self_->consume_match_(); if(ec) - return {ec, n}; + { + self_->data_.resize(old_size); + return {ec, 0}; + } return {{}, n}; } diff --git a/include/boost/capy/write.hpp b/include/boost/capy/write.hpp index 12095a49..c36a6053 100644 --- a/include/boost/capy/write.hpp +++ b/include/boost/capy/write.hpp @@ -11,8 +11,7 @@ #define BOOST_CAPY_WRITE_HPP #include -#include -#include +#include #include #include #include @@ -54,7 +53,7 @@ namespace capy { task<> send_response( WriteStream auto& stream, std::string_view body ) { auto [ec, n] = co_await write( stream, make_buffer( body ) ); - if( ec.failed() ) + if( ec ) detail::throw_system_error( ec ); // All bytes written successfully } @@ -66,7 +65,7 @@ auto write( WriteStream auto& stream, ConstBufferSequence auto const& buffers) -> - task> + io_task { consuming_buffers consuming(buffers); std::size_t const total_size = buffer_size(buffers); diff --git a/papers/B1005.io-streamables.md b/papers/B1005.io-streamables.md index 4e4d2ffa..099ddd96 100644 --- a/papers/B1005.io-streamables.md +++ b/papers/B1005.io-streamables.md @@ -295,11 +295,11 @@ The library provides composing algorithms that operate on the stream, source, an ```cpp template -task> +io_task push_to(Src& source, Sink& sink); template -task> +io_task push_to(Src& source, Stream& stream); ``` @@ -309,11 +309,11 @@ The function pulls data from the source and writes it to the destination until t ```cpp template -task> +io_task pull_from(Src& source, Sink& sink); template -task> +io_task pull_from(Src& source, Sink& sink); ``` @@ -327,7 +327,7 @@ The function uses the callee-owns-buffers model: the sink provides writable buff auto read( ReadStream auto& stream, MutableBufferSequence auto const& buffers) -> - task>; + io_task; ``` Loops calling `read_some()` until the entire buffer sequence is filled or an error occurs. This converts partial-read semantics into complete-read semantics. @@ -339,7 +339,7 @@ auto read( ReadSource auto& source, DynamicBufferParam auto&& buffers, std::size_t initial_amount = 2048) -> - task>; + io_task; ``` Reads until EOF, growing the buffer using a 1.5x growth strategy. Useful for reading complete responses of unknown size. @@ -350,7 +350,7 @@ Reads until EOF, growing the buffer using a 1.5x growth strategy. Useful for rea auto write( WriteStream auto& stream, ConstBufferSequence auto const& buffers) -> - task>; + io_task; ``` Loops calling `write_some()` until the entire buffer sequence is written or an error occurs. @@ -495,7 +495,7 @@ A generic transfer from source to sink: ```cpp template -task> transfer(Source& source, Sink& sink) +io_task transfer(Source& source, Sink& sink) { const_buffer arr[16]; std::size_t total = 0; diff --git a/test/unit/buffers/buffer_array.cpp b/test/unit/buffers/buffer_array.cpp index 71b0ccb6..33407de4 100644 --- a/test/unit/buffers/buffer_array.cpp +++ b/test/unit/buffers/buffer_array.cpp @@ -154,6 +154,82 @@ struct buffer_array_test BOOST_TEST_EQ(test::make_string(ba), pat); } + // iterator-pair constructor (fits) + { + std::vector v; + v.emplace_back(pat.data(), 3); + v.emplace_back(pat.data() + 3, 5); + v.emplace_back(pat.data() + 8, pat.size() - 8); + const_buffer_array<4> ba(v.begin(), v.end()); + BOOST_TEST_EQ(ba.to_span().size(), 3); + BOOST_TEST_EQ(buffer_size(ba), pat.size()); + BOOST_TEST_EQ(test::make_string(ba), pat); + } + + // iterator-pair constructor truncates + { + std::vector v; + v.emplace_back(pat.data(), 3); + v.emplace_back(pat.data() + 3, 5); + v.emplace_back(pat.data() + 8, pat.size() - 8); + const_buffer_array<2> ba(v.begin(), v.end()); + BOOST_TEST_EQ(ba.to_span().size(), 2); + BOOST_TEST_EQ(buffer_size(ba), 8); + } + + // iterator-pair empty range + { + std::vector v; + const_buffer_array<4> ba(v.begin(), v.end()); + BOOST_TEST_EQ(ba.to_span().size(), 0); + BOOST_TEST_EQ(buffer_size(ba), 0); + } + + // iterator-pair skips zero-sized buffers + { + std::vector v; + v.emplace_back(pat.data(), 0); + v.emplace_back(pat.data(), 3); + v.emplace_back(pat.data() + 3, 0); + v.emplace_back(pat.data() + 3, 5); + const_buffer_array<4> ba(v.begin(), v.end()); + BOOST_TEST_EQ(ba.to_span().size(), 2); + BOOST_TEST_EQ(buffer_size(ba), 8); + } + + // in_place iterator-pair throws on overflow + { + std::vector v; + v.emplace_back(pat.data(), 3); + v.emplace_back(pat.data() + 3, 5); + v.emplace_back(pat.data() + 8, pat.size() - 8); + bool threw = false; + try + { + const_buffer_array<2> ba( + std::in_place, v.begin(), v.end()); + (void)ba; + } + catch(std::length_error const&) + { + threw = true; + } + BOOST_TEST(threw); + } + + // in_place iterator-pair with exact fit + { + std::vector v; + v.emplace_back(pat.data(), 3); + v.emplace_back(pat.data() + 3, 5); + v.emplace_back(pat.data() + 8, pat.size() - 8); + const_buffer_array<4> ba( + std::in_place, v.begin(), v.end()); + BOOST_TEST_EQ(ba.to_span().size(), 3); + BOOST_TEST_EQ(buffer_size(ba), pat.size()); + BOOST_TEST_EQ(test::make_string(ba), pat); + } + // slice tests { for(std::size_t i = 0; i <= pat.size(); ++i) @@ -293,6 +369,82 @@ struct buffer_array_test BOOST_TEST_EQ(test::make_string(ba), pat); } + // iterator-pair constructor (fits) + { + std::vector v; + v.emplace_back(pat.data(), 3); + v.emplace_back(pat.data() + 3, 5); + v.emplace_back(pat.data() + 8, pat.size() - 8); + mutable_buffer_array<4> ba(v.begin(), v.end()); + BOOST_TEST_EQ(ba.to_span().size(), 3); + BOOST_TEST_EQ(buffer_size(ba), pat.size()); + BOOST_TEST_EQ(test::make_string(ba), pat); + } + + // iterator-pair constructor truncates + { + std::vector v; + v.emplace_back(pat.data(), 3); + v.emplace_back(pat.data() + 3, 5); + v.emplace_back(pat.data() + 8, pat.size() - 8); + mutable_buffer_array<2> ba(v.begin(), v.end()); + BOOST_TEST_EQ(ba.to_span().size(), 2); + BOOST_TEST_EQ(buffer_size(ba), 8); + } + + // iterator-pair empty range + { + std::vector v; + mutable_buffer_array<4> ba(v.begin(), v.end()); + BOOST_TEST_EQ(ba.to_span().size(), 0); + BOOST_TEST_EQ(buffer_size(ba), 0); + } + + // iterator-pair skips zero-sized buffers + { + std::vector v; + v.emplace_back(pat.data(), 0); + v.emplace_back(pat.data(), 3); + v.emplace_back(pat.data() + 3, 0); + v.emplace_back(pat.data() + 3, 5); + mutable_buffer_array<4> ba(v.begin(), v.end()); + BOOST_TEST_EQ(ba.to_span().size(), 2); + BOOST_TEST_EQ(buffer_size(ba), 8); + } + + // in_place iterator-pair throws on overflow + { + std::vector v; + v.emplace_back(pat.data(), 3); + v.emplace_back(pat.data() + 3, 5); + v.emplace_back(pat.data() + 8, pat.size() - 8); + bool threw = false; + try + { + mutable_buffer_array<2> ba( + std::in_place, v.begin(), v.end()); + (void)ba; + } + catch(std::length_error const&) + { + threw = true; + } + BOOST_TEST(threw); + } + + // in_place iterator-pair with exact fit + { + std::vector v; + v.emplace_back(pat.data(), 3); + v.emplace_back(pat.data() + 3, 5); + v.emplace_back(pat.data() + 8, pat.size() - 8); + mutable_buffer_array<4> ba( + std::in_place, v.begin(), v.end()); + BOOST_TEST_EQ(ba.to_span().size(), 3); + BOOST_TEST_EQ(buffer_size(ba), pat.size()); + BOOST_TEST_EQ(test::make_string(ba), pat); + } + // slice tests { for(std::size_t i = 0; i <= pat.size(); ++i) diff --git a/test/unit/buffers/buffer_param.cpp b/test/unit/buffers/buffer_param.cpp index 22da5e7d..9859bc60 100644 --- a/test/unit/buffers/buffer_param.cpp +++ b/test/unit/buffers/buffer_param.cpp @@ -282,6 +282,66 @@ struct buffer_param_test } } + void + testMore() + { + // Single buffer — no more after first window + { + std::string data = "Hello"; + const_buffer buf(data.data(), data.size()); + buffer_param bp(buf); + + auto bufs = bp.data(); + BOOST_TEST(! bufs.empty()); + BOOST_TEST(! bp.more()); + } + + // Empty sequence — no more + { + std::vector bufs; + buffer_param bp(bufs); + auto d = bp.data(); + BOOST_TEST(d.empty()); + BOOST_TEST(! bp.more()); + } + + // Exactly max_iovec_ buffers — fits in one window + { + std::vector strings(detail::max_iovec_, "x"); + std::vector bufs; + for(auto const& s : strings) + bufs.emplace_back(s.data(), s.size()); + + buffer_param bp(bufs); + auto d = bp.data(); + BOOST_TEST(! d.empty()); + BOOST_TEST(! bp.more()); + } + + // One more than max_iovec_ — needs two windows + { + std::vector strings( + detail::max_iovec_ + 1, "x"); + std::vector bufs; + for(auto const& s : strings) + bufs.emplace_back(s.data(), s.size()); + + buffer_param bp(bufs); + auto d = bp.data(); + BOOST_TEST(! d.empty()); + BOOST_TEST(bp.more()); + + bp.consume(total_size(d)); + d = bp.data(); + BOOST_TEST(! d.empty()); + BOOST_TEST(! bp.more()); + + bp.consume(total_size(d)); + d = bp.data(); + BOOST_TEST(d.empty()); + } + } + void testPartialByteConsumption() { @@ -327,6 +387,7 @@ struct buffer_param_test testMutableSingleBuffer(); testMutableMultipleBuffers(); testBufferType(); + testMore(); testPartialByteConsumption(); } }; diff --git a/test/unit/concept/buffer_sink.cpp b/test/unit/concept/buffer_sink.cpp index 4e1c085b..ebb2a36b 100644 --- a/test/unit/concept/buffer_sink.cpp +++ b/test/unit/concept/buffer_sink.cpp @@ -94,13 +94,7 @@ struct valid_buffer_sink } mock_commit_awaitable - commit(std::size_t, bool) - { - return {}; - } - - mock_commit_awaitable - commit_eof() + commit_eof(std::size_t) { return {}; } @@ -122,13 +116,7 @@ struct invalid_buffer_sink_wrong_type } mock_commit_awaitable_wrong_type - commit(std::size_t, bool) - { - return {}; - } - - mock_commit_awaitable_wrong_type - commit_eof() + commit_eof(std::size_t) { return {}; } @@ -144,13 +132,7 @@ struct invalid_buffer_sink_no_prepare } mock_commit_awaitable - commit(std::size_t, bool) - { - return {}; - } - - mock_commit_awaitable - commit_eof() + commit_eof(std::size_t) { return {}; } @@ -166,7 +148,7 @@ struct invalid_buffer_sink_no_commit } mock_commit_awaitable - commit_eof() + commit_eof(std::size_t) { return {}; } @@ -186,12 +168,6 @@ struct invalid_buffer_sink_no_commit_eof { return {}; } - - mock_commit_awaitable - commit(std::size_t, bool) - { - return {}; - } }; // Invalid: commit is not IoAwaitable @@ -210,13 +186,7 @@ struct invalid_buffer_sink_not_io } mock_commit_awaitable_not_io - commit(std::size_t, bool) - { - return {}; - } - - mock_commit_awaitable_not_io - commit_eof() + commit_eof(std::size_t) { return {}; } @@ -237,13 +207,7 @@ struct invalid_buffer_sink_prepare_returns_void } mock_commit_awaitable - commit(std::size_t, bool) - { - return {}; - } - - mock_commit_awaitable - commit_eof() + commit_eof(std::size_t) { return {}; } @@ -265,13 +229,7 @@ struct invalid_buffer_sink_wrong_sig } mock_commit_awaitable - commit(std::size_t, bool) - { - return {}; - } - - mock_commit_awaitable - commit_eof() + commit_eof(std::size_t) { return {}; } diff --git a/test/unit/concept/read_source.cpp b/test/unit/concept/read_source.cpp index 5ac28f0c..fbe4547c 100644 --- a/test/unit/concept/read_source.cpp +++ b/test/unit/concept/read_source.cpp @@ -112,9 +112,16 @@ struct mock_source_awaitable_not_io // Mock source types //---------------------------------------------------------- -// Valid ReadSource with templated read (pair) +// Valid ReadSource with both read_some and read (pair) struct valid_read_source_pair { + template + mock_source_awaitable_pair + read_some(MB const&) + { + return {}; + } + template mock_source_awaitable_pair read(MB const&) @@ -123,9 +130,16 @@ struct valid_read_source_pair } }; -// Valid ReadSource with templated read (tuple) +// Valid ReadSource with both read_some and read (tuple) struct valid_read_source_tuple { + template + mock_source_awaitable_tuple + read_some(MB const&) + { + return {}; + } + template mock_source_awaitable_tuple read(MB const&) @@ -137,6 +151,12 @@ struct valid_read_source_tuple // Valid ReadSource accepting mutable_buffer directly (non-templated) struct valid_read_source_not_templated { + mock_source_awaitable_pair + read_some(mutable_buffer const&) + { + return {}; + } + mock_source_awaitable_pair read(mutable_buffer const&) { @@ -144,9 +164,38 @@ struct valid_read_source_not_templated } }; +// Invalid: has read but no read_some (does not satisfy ReadStream) +struct invalid_read_source_no_read_some +{ + template + mock_source_awaitable_pair + read(MB const&) + { + return {}; + } +}; + +// Invalid: has read_some but no read +struct invalid_read_source_no_read +{ + template + mock_source_awaitable_pair + read_some(MB const&) + { + return {}; + } +}; + // Invalid: read returns wrong type (just ec instead of ec, size_t) struct invalid_read_source_wrong_type { + template + mock_source_awaitable_pair + read_some(MB const&) + { + return {}; + } + template mock_source_awaitable_wrong_type read(MB const&) @@ -155,14 +204,21 @@ struct invalid_read_source_wrong_type } }; -// Invalid: missing read -struct invalid_read_source_no_read +// Invalid: missing both read_some and read +struct invalid_read_source_empty { }; // Invalid: read is not IoAwaitable struct invalid_read_source_not_io { + template + mock_source_awaitable_pair + read_some(MB const&) + { + return {}; + } + template mock_source_awaitable_not_io read(MB const&) @@ -174,6 +230,13 @@ struct invalid_read_source_not_io // Invalid: read returns non-awaitable struct invalid_read_source_returns_int { + template + mock_source_awaitable_pair + read_some(MB const&) + { + return {}; + } + template int read(MB const&) { return 0; } }; @@ -189,11 +252,17 @@ static_assert(ReadSource); static_assert(ReadSource); static_assert(ReadSource); +// Has read but no read_some: does not satisfy ReadSource +static_assert(!ReadSource); + +// Has read_some but no read: does not satisfy ReadSource +static_assert(!ReadSource); + // Wrong return type does not satisfy ReadSource static_assert(!ReadSource); -// Missing read does not satisfy ReadSource -static_assert(!ReadSource); +// Missing everything does not satisfy ReadSource +static_assert(!ReadSource); // Non-IoAwaitable does not satisfy ReadSource static_assert(!ReadSource); diff --git a/test/unit/concept/write_sink.cpp b/test/unit/concept/write_sink.cpp index 056883d2..44eff5f7 100644 --- a/test/unit/concept/write_sink.cpp +++ b/test/unit/concept/write_sink.cpp @@ -22,7 +22,7 @@ namespace capy { namespace { -// Mock IoAwaitable returning std::error_code (for io_result<>) +// Mock IoAwaitable returning std::error_code (for write_eof()) struct mock_sink_awaitable { bool await_ready() const noexcept { return true; } @@ -41,7 +41,7 @@ struct mock_sink_awaitable } }; -// Mock IoAwaitable returning (error_code, size_t) for write(buffers, eof) +// Mock IoAwaitable returning (error_code, size_t) struct mock_sink_awaitable_with_size { bool await_ready() const noexcept { return true; } @@ -107,9 +107,16 @@ struct mock_sink_awaitable_with_size_not_io // Mock sink types //---------------------------------------------------------- -// Valid WriteSink with templated write +// Valid WriteSink: write_some + write + write_eof(buffers) + write_eof() struct valid_write_sink { + template + mock_sink_awaitable_with_size + write_some(CB const&) + { + return {}; + } + template mock_sink_awaitable_with_size write(CB const&) @@ -119,7 +126,7 @@ struct valid_write_sink template mock_sink_awaitable_with_size - write(CB const&, bool) + write_eof(CB const&) { return {}; } @@ -131,9 +138,15 @@ struct valid_write_sink } }; -// Valid WriteSink accepting const_buffer directly (non-templated) +// Valid WriteSink with non-templated overloads struct valid_write_sink_not_templated { + mock_sink_awaitable_with_size + write_some(const_buffer const&) + { + return {}; + } + mock_sink_awaitable_with_size write(const_buffer const&) { @@ -141,7 +154,31 @@ struct valid_write_sink_not_templated } mock_sink_awaitable_with_size - write(const_buffer const&, bool) + write_eof(const_buffer const&) + { + return {}; + } + + mock_sink_awaitable + write_eof() + { + return {}; + } +}; + +// Invalid: missing write_some (does not satisfy WriteStream) +struct invalid_write_sink_no_write_some +{ + template + mock_sink_awaitable_with_size + write(CB const&) + { + return {}; + } + + template + mock_sink_awaitable_with_size + write_eof(CB const&) { return {}; } @@ -156,6 +193,13 @@ struct valid_write_sink_not_templated // Invalid: write returns wrong type (ec instead of ec, size_t) struct invalid_write_sink_wrong_write_type { + template + mock_sink_awaitable_with_size + write_some(CB const&) + { + return {}; + } + template mock_sink_awaitable write(CB const&) @@ -165,7 +209,7 @@ struct invalid_write_sink_wrong_write_type template mock_sink_awaitable_with_size - write(CB const&, bool) + write_eof(CB const&) { return {}; } @@ -177,11 +221,18 @@ struct invalid_write_sink_wrong_write_type } }; -// Invalid: write_eof returns wrong type +// Invalid: write_eof() returns wrong type (ec,size_t instead of ec) struct invalid_write_sink_wrong_eof_type { template - mock_sink_awaitable + mock_sink_awaitable_with_size + write_some(CB const&) + { + return {}; + } + + template + mock_sink_awaitable_with_size write(CB const&) { return {}; @@ -189,7 +240,7 @@ struct invalid_write_sink_wrong_eof_type template mock_sink_awaitable_with_size - write(CB const&, bool) + write_eof(CB const&) { return {}; } @@ -204,6 +255,20 @@ struct invalid_write_sink_wrong_eof_type // Invalid: missing write struct invalid_write_sink_no_write { + template + mock_sink_awaitable_with_size + write_some(CB const&) + { + return {}; + } + + template + mock_sink_awaitable_with_size + write_eof(CB const&) + { + return {}; + } + mock_sink_awaitable write_eof() { @@ -211,11 +276,18 @@ struct invalid_write_sink_no_write } }; -// Invalid: missing write_eof -struct invalid_write_sink_no_write_eof +// Invalid: missing write_eof() (bare) +struct invalid_write_sink_no_bare_eof { template - mock_sink_awaitable + mock_sink_awaitable_with_size + write_some(CB const&) + { + return {}; + } + + template + mock_sink_awaitable_with_size write(CB const&) { return {}; @@ -223,17 +295,24 @@ struct invalid_write_sink_no_write_eof template mock_sink_awaitable_with_size - write(CB const&, bool) + write_eof(CB const&) { return {}; } }; -// Invalid: missing write with eof parameter -struct invalid_write_sink_no_write_eof_param +// Invalid: missing write_eof(buffers) +struct invalid_write_sink_no_write_eof_buffers { template - mock_sink_awaitable + mock_sink_awaitable_with_size + write_some(CB const&) + { + return {}; + } + + template + mock_sink_awaitable_with_size write(CB const&) { return {}; @@ -250,7 +329,14 @@ struct invalid_write_sink_no_write_eof_param struct invalid_write_sink_write_not_io { template - mock_sink_awaitable_not_io + mock_sink_awaitable_with_size + write_some(CB const&) + { + return {}; + } + + template + mock_sink_awaitable_with_size_not_io write(CB const&) { return {}; @@ -258,7 +344,7 @@ struct invalid_write_sink_write_not_io template mock_sink_awaitable_with_size - write(CB const&, bool) + write_eof(CB const&) { return {}; } @@ -270,11 +356,18 @@ struct invalid_write_sink_write_not_io } }; -// Invalid: write_eof is not IoAwaitable +// Invalid: write_eof() is not IoAwaitable struct invalid_write_sink_eof_not_io { template - mock_sink_awaitable + mock_sink_awaitable_with_size + write_some(CB const&) + { + return {}; + } + + template + mock_sink_awaitable_with_size write(CB const&) { return {}; @@ -282,7 +375,7 @@ struct invalid_write_sink_eof_not_io template mock_sink_awaitable_with_size - write(CB const&, bool) + write_eof(CB const&) { return {}; } @@ -294,31 +387,18 @@ struct invalid_write_sink_eof_not_io } }; -// Invalid: write returns non-awaitable -struct invalid_write_sink_write_returns_int +// Invalid: write_eof(buffers) returns wrong type +struct invalid_write_sink_wrong_write_eof_buffers_type { - template - int write(CB const&) { return 0; } - template mock_sink_awaitable_with_size - write(CB const&, bool) + write_some(CB const&) { return {}; } - mock_sink_awaitable - write_eof() - { - return {}; - } -}; - -// Invalid: write with eof returns wrong type (ec only instead of ec, size_t) -struct invalid_write_sink_wrong_write_eof_param_type -{ template - mock_sink_awaitable + mock_sink_awaitable_with_size write(CB const&) { return {}; @@ -326,7 +406,7 @@ struct invalid_write_sink_wrong_write_eof_param_type template mock_sink_awaitable - write(CB const&, bool) + write_eof(CB const&) { return {}; } @@ -338,19 +418,22 @@ struct invalid_write_sink_wrong_write_eof_param_type } }; -// Invalid: write with eof is not IoAwaitable -struct invalid_write_sink_write_eof_param_not_io +// Invalid: write returns non-awaitable +struct invalid_write_sink_write_returns_int { template - mock_sink_awaitable - write(CB const&) + mock_sink_awaitable_with_size + write_some(CB const&) { return {}; } template - mock_sink_awaitable_with_size_not_io - write(CB const&, bool) + int write(CB const&) { return 0; } + + template + mock_sink_awaitable_with_size + write_eof(CB const&) { return {}; } @@ -362,6 +445,11 @@ struct invalid_write_sink_write_eof_param_not_io } }; +// Invalid: empty type +struct invalid_write_sink_empty +{ +}; + } // namespace //---------------------------------------------------------- @@ -372,20 +460,23 @@ struct invalid_write_sink_write_eof_param_not_io static_assert(WriteSink); static_assert(WriteSink); +// Missing write_some does not satisfy WriteSink +static_assert(!WriteSink); + // Wrong return types do not satisfy WriteSink static_assert(!WriteSink); static_assert(!WriteSink); -static_assert(!WriteSink); +static_assert(!WriteSink); // Missing methods do not satisfy WriteSink static_assert(!WriteSink); -static_assert(!WriteSink); -static_assert(!WriteSink); +static_assert(!WriteSink); +static_assert(!WriteSink); +static_assert(!WriteSink); // Non-IoAwaitable does not satisfy WriteSink static_assert(!WriteSink); static_assert(!WriteSink); -static_assert(!WriteSink); // Non-awaitable return does not satisfy WriteSink static_assert(!WriteSink); diff --git a/test/unit/ex/immediate.cpp b/test/unit/ex/immediate.cpp index 79f6da00..ca0163c9 100644 --- a/test/unit/ex/immediate.cpp +++ b/test/unit/ex/immediate.cpp @@ -11,7 +11,7 @@ #include #include -#include +#include #include "test/unit/test_helpers.hpp" @@ -125,7 +125,7 @@ struct immediate_test // co_await immediate> { - auto coro = []() -> task> { + auto coro = []() -> io_task { co_return co_await immediate>{{{}, 100}}; }; auto result = run_task(coro()); diff --git a/test/unit/io/any_buffer_sink.cpp b/test/unit/io/any_buffer_sink.cpp index 7cef4210..a0547623 100644 --- a/test/unit/io/any_buffer_sink.cpp +++ b/test/unit/io/any_buffer_sink.cpp @@ -18,18 +18,263 @@ #include #include #include +#include #include "test/unit/test_helpers.hpp" +#include #include +#include #include namespace boost { namespace capy { namespace { -// Static assert that any_buffer_sink satisfies BufferSink +// Static assert that any_buffer_sink satisfies BufferSink and WriteSink static_assert(BufferSink); +static_assert(WriteSink); + +//---------------------------------------------------------- +// Mock satisfying both BufferSink and WriteSink. +// Tracks which API was used so tests can verify native +// forwarding vs. synthesized path. + +class buffer_write_sink +{ + test::fuse f_; + std::string data_; + std::string prepare_buf_; + std::size_t prepare_size_ = 0; + std::size_t max_prepare_size_; + bool eof_called_ = false; + bool write_api_used_ = false; + +public: + explicit buffer_write_sink( + test::fuse f = {}, + std::size_t max_prepare_size = 4096) noexcept + : f_(std::move(f)) + , max_prepare_size_(max_prepare_size) + { + prepare_buf_.resize(max_prepare_size_); + } + + std::string_view + data() const noexcept + { + return data_; + } + + bool + eof_called() const noexcept + { + return eof_called_; + } + + /// Return true if the WriteSink API was used. + bool + write_api_used() const noexcept + { + return write_api_used_; + } + + //------------------------------------------------------ + // BufferSink interface + + std::span + prepare(std::span dest) + { + if(dest.empty()) + return {}; + prepare_size_ = max_prepare_size_; + dest[0] = make_buffer(prepare_buf_.data(), prepare_size_); + return dest.first(1); + } + + auto + commit(std::size_t n) + { + struct awaitable + { + buffer_write_sink* self_; + std::size_t n_; + + bool await_ready() const noexcept { return true; } + void await_suspend(coro, executor_ref, std::stop_token) const noexcept {} + + io_result<> + await_resume() + { + auto ec = self_->f_.maybe_fail(); + if(ec) return {ec}; + std::size_t to_commit = (std::min)(n_, self_->prepare_size_); + self_->data_.append(self_->prepare_buf_.data(), to_commit); + self_->prepare_size_ = 0; + return {}; + } + }; + return awaitable{this, n}; + } + + auto + commit_eof(std::size_t n) + { + struct awaitable + { + buffer_write_sink* self_; + std::size_t n_; + + bool await_ready() const noexcept { return true; } + void await_suspend(coro, executor_ref, std::stop_token) const noexcept {} + + io_result<> + await_resume() + { + auto ec = self_->f_.maybe_fail(); + if(ec) return {ec}; + std::size_t to_commit = (std::min)(n_, self_->prepare_size_); + self_->data_.append(self_->prepare_buf_.data(), to_commit); + self_->prepare_size_ = 0; + self_->eof_called_ = true; + return {}; + } + }; + return awaitable{this, n}; + } + + //------------------------------------------------------ + // WriteSink interface + + template + auto + write_some(CB buffers) + { + struct awaitable + { + buffer_write_sink* self_; + CB buffers_; + + bool await_ready() const noexcept { return true; } + void await_suspend(coro, executor_ref, std::stop_token) const noexcept {} + + io_result + await_resume() + { + self_->write_api_used_ = true; + if(buffer_empty(buffers_)) + return {{}, 0}; + auto ec = self_->f_.maybe_fail(); + if(ec) return {ec, 0}; + + std::size_t n = buffer_size(buffers_); + std::size_t const old_size = self_->data_.size(); + self_->data_.resize(old_size + n); + buffer_copy(make_buffer( + self_->data_.data() + old_size, n), buffers_, n); + return {{}, n}; + } + }; + return awaitable{this, buffers}; + } + + template + auto + write(CB buffers) + { + struct awaitable + { + buffer_write_sink* self_; + CB buffers_; + + bool await_ready() const noexcept { return true; } + void await_suspend(coro, executor_ref, std::stop_token) const noexcept {} + + io_result + await_resume() + { + self_->write_api_used_ = true; + auto ec = self_->f_.maybe_fail(); + if(ec) return {ec, 0}; + + std::size_t n = buffer_size(buffers_); + if(n == 0) return {{}, 0}; + std::size_t const old_size = self_->data_.size(); + self_->data_.resize(old_size + n); + buffer_copy(make_buffer( + self_->data_.data() + old_size, n), buffers_); + return {{}, n}; + } + }; + return awaitable{this, buffers}; + } + + template + auto + write_eof(CB buffers) + { + struct awaitable + { + buffer_write_sink* self_; + CB buffers_; + + bool await_ready() const noexcept { return true; } + void await_suspend(coro, executor_ref, std::stop_token) const noexcept {} + + io_result + await_resume() + { + self_->write_api_used_ = true; + auto ec = self_->f_.maybe_fail(); + if(ec) return {ec, 0}; + + std::size_t n = buffer_size(buffers_); + if(n > 0) + { + std::size_t const old_size = self_->data_.size(); + self_->data_.resize(old_size + n); + buffer_copy(make_buffer( + self_->data_.data() + old_size, n), buffers_); + } + self_->eof_called_ = true; + return {{}, n}; + } + }; + return awaitable{this, buffers}; + } + + auto + write_eof() + { + struct awaitable + { + buffer_write_sink* self_; + + bool await_ready() const noexcept { return true; } + void await_suspend(coro, executor_ref, std::stop_token) const noexcept {} + + io_result<> + await_resume() + { + self_->write_api_used_ = true; + auto ec = self_->f_.maybe_fail(); + if(ec) return {ec}; + self_->eof_called_ = true; + return {}; + } + }; + return awaitable{this}; + } +}; + +// Verify concepts at compile time +static_assert(BufferSink); +static_assert(WriteSink); + +// Verify BufferSink-only mock does NOT satisfy WriteSink +static_assert(!WriteSink); + +//---------------------------------------------------------- class any_buffer_sink_test { @@ -44,7 +289,7 @@ class any_buffer_sink_test BOOST_TEST(!abs); } - // Construct from sink + // Construct from BufferSink-only (reference) { test::fuse f; test::buffer_sink bs(f); @@ -52,6 +297,55 @@ class any_buffer_sink_test BOOST_TEST(abs.has_value()); BOOST_TEST(static_cast(abs)); } + + // Construct from BufferSink+WriteSink (reference) + { + test::fuse f; + buffer_write_sink bws(f); + any_buffer_sink abs(&bws); + BOOST_TEST(abs.has_value()); + BOOST_TEST(static_cast(abs)); + } + } + + void + testConstructOwning() + { + // Owning construct by value (BufferSink-only) + { + test::fuse f; + test::buffer_sink bs(f); + any_buffer_sink abs(std::move(bs)); + BOOST_TEST(abs.has_value()); + } + + // Owning construct by value (BufferSink+WriteSink) + { + test::fuse f; + buffer_write_sink bws(f); + any_buffer_sink abs(std::move(bws)); + BOOST_TEST(abs.has_value()); + } + + // Owning construct, then use + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_sink bs(f); + any_buffer_sink abs(std::move(bs)); + + mutable_buffer arr[detail::max_iovec_]; + auto bufs = abs.prepare(arr); + BOOST_TEST_EQ(bufs.size(), 1u); + + std::memcpy(bufs[0].data(), "owned", 5); + + auto [ec] = co_await abs.commit_eof(5); + if(ec) + co_return; + }); + BOOST_TEST(r.success); + } } void @@ -68,13 +362,31 @@ class any_buffer_sink_test BOOST_TEST(abs2.has_value()); BOOST_TEST(!abs1.has_value()); - // Move assign + // Move assign into empty any_buffer_sink abs3; abs3 = std::move(abs2); BOOST_TEST(abs3.has_value()); BOOST_TEST(!abs2.has_value()); } + void + testMoveAssignOverExisting() + { + test::fuse f; + test::buffer_sink bs1(f); + test::buffer_sink bs2(f); + + any_buffer_sink abs1(&bs1); + any_buffer_sink abs2(&bs2); + BOOST_TEST(abs1.has_value()); + BOOST_TEST(abs2.has_value()); + + // Move assign over a wrapper that already holds a sink + abs1 = std::move(abs2); + BOOST_TEST(abs1.has_value()); + BOOST_TEST(!abs2.has_value()); + } + void testPrepareCommit() { @@ -114,7 +426,7 @@ class any_buffer_sink_test std::memcpy(bufs[0].data(), "world", 5); - auto [ec] = co_await abs.commit(5, true); + auto [ec] = co_await abs.commit_eof(5); if(ec) co_return; @@ -142,7 +454,7 @@ class any_buffer_sink_test if(ec1) co_return; - auto [ec2] = co_await abs.commit_eof(); + auto [ec2] = co_await abs.commit_eof(0); if(ec2) co_return; @@ -181,7 +493,7 @@ class any_buffer_sink_test std::memcpy(bufs[0].data(), "world", 5); - auto [ec] = co_await abs.commit(5, true); + auto [ec] = co_await abs.commit_eof(5); if(ec) co_return; } @@ -200,7 +512,7 @@ class any_buffer_sink_test test::buffer_sink bs(f); any_buffer_sink abs(&bs); - auto [ec] = co_await abs.commit_eof(); + auto [ec] = co_await abs.commit_eof(0); if(ec) co_return; @@ -210,6 +522,582 @@ class any_buffer_sink_test BOOST_TEST(r.success); } + //------------------------------------------------------ + // Synthesized WriteSink tests (BufferSink-only) + + void + testWriteSome() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_sink bs(f); + any_buffer_sink abs(&bs); + + auto buf = make_buffer("hello", 5); + auto [ec, n] = co_await abs.write_some(buf); + if(ec) + co_return; + + BOOST_TEST(n > 0); + BOOST_TEST(n <= 5u); + BOOST_TEST_EQ(bs.data(), + std::string_view("hello", n)); + }); + BOOST_TEST(r.success); + } + + void + testWrite() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_sink bs(f); + any_buffer_sink abs(&bs); + + auto buf = make_buffer("hello world", 11); + auto [ec, n] = co_await abs.write(buf); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 11u); + BOOST_TEST_EQ(bs.data(), "hello world"); + }); + BOOST_TEST(r.success); + } + + void + testWriteEofWithBuffers() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_sink bs(f); + any_buffer_sink abs(&bs); + + auto buf = make_buffer("final", 5); + auto [ec, n] = co_await abs.write_eof(buf); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 5u); + BOOST_TEST_EQ(bs.data(), "final"); + BOOST_TEST(bs.eof_called()); + }); + BOOST_TEST(r.success); + } + + void + testWriteEofNoArg() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_sink bs(f); + any_buffer_sink abs(&bs); + + auto [ec] = co_await abs.write_eof(); + if(ec) + co_return; + + BOOST_TEST(bs.data().empty()); + BOOST_TEST(bs.eof_called()); + }); + BOOST_TEST(r.success); + } + + void + testWriteThenEof() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_sink bs(f); + any_buffer_sink abs(&bs); + + auto buf = make_buffer("payload", 7); + auto [ec1, n] = co_await abs.write(buf); + if(ec1) + co_return; + + BOOST_TEST_EQ(n, 7u); + BOOST_TEST(!bs.eof_called()); + + auto [ec2] = co_await abs.write_eof(); + if(ec2) + co_return; + + BOOST_TEST_EQ(bs.data(), "payload"); + BOOST_TEST(bs.eof_called()); + }); + BOOST_TEST(r.success); + } + + void + testFuseErrorCommit() + { + int success_count = 0; + int error_count = 0; + + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_sink bs(f); + any_buffer_sink abs(&bs); + + mutable_buffer arr[detail::max_iovec_]; + auto bufs = abs.prepare(arr); + std::memcpy(bufs[0].data(), "data", 4); + + auto [ec] = co_await abs.commit(4); + if(ec) + { + ++error_count; + co_return; + } + ++success_count; + }); + + BOOST_TEST(r.success); + BOOST_TEST(error_count > 0); + BOOST_TEST(success_count > 0); + } + + void + testFuseErrorCommitEof() + { + int success_count = 0; + int error_count = 0; + + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_sink bs(f); + any_buffer_sink abs(&bs); + + auto [ec] = co_await abs.commit_eof(0); + if(ec) + { + ++error_count; + co_return; + } + ++success_count; + }); + + BOOST_TEST(r.success); + BOOST_TEST(error_count > 0); + BOOST_TEST(success_count > 0); + } + + void + testWriteSomeEmpty() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_sink bs(f); + any_buffer_sink abs(&bs); + + auto [ec, n] = co_await abs.write_some(const_buffer{}); + BOOST_TEST(!ec); + BOOST_TEST_EQ(n, 0u); + BOOST_TEST(bs.data().empty()); + }); + BOOST_TEST(r.success); + } + + void + testWriteEmpty() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_sink bs(f); + any_buffer_sink abs(&bs); + + auto [ec, n] = co_await abs.write(const_buffer{}); + BOOST_TEST(!ec); + BOOST_TEST_EQ(n, 0u); + BOOST_TEST(bs.data().empty()); + }); + BOOST_TEST(r.success); + } + + void + testWriteEofEmpty() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_sink bs(f); + any_buffer_sink abs(&bs); + + auto [ec, n] = co_await abs.write_eof(const_buffer{}); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 0u); + BOOST_TEST(bs.data().empty()); + BOOST_TEST(bs.eof_called()); + }); + BOOST_TEST(r.success); + } + + void + testFuseErrorWriteSome() + { + int success_count = 0; + int error_count = 0; + + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_sink bs(f); + any_buffer_sink abs(&bs); + + auto buf = make_buffer("hello", 5); + auto [ec, n] = co_await abs.write_some(buf); + if(ec) + { + ++error_count; + co_return; + } + ++success_count; + }); + + BOOST_TEST(r.success); + BOOST_TEST(error_count > 0); + BOOST_TEST(success_count > 0); + } + + void + testFuseErrorWrite() + { + int success_count = 0; + int error_count = 0; + + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_sink bs(f); + any_buffer_sink abs(&bs); + + auto buf = make_buffer("hello world", 11); + auto [ec, n] = co_await abs.write(buf); + if(ec) + { + ++error_count; + co_return; + } + ++success_count; + }); + + BOOST_TEST(r.success); + BOOST_TEST(error_count > 0); + BOOST_TEST(success_count > 0); + } + + void + testFuseErrorWriteEofBuffers() + { + int success_count = 0; + int error_count = 0; + + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_sink bs(f); + any_buffer_sink abs(&bs); + + auto buf = make_buffer("final", 5); + auto [ec, n] = co_await abs.write_eof(buf); + if(ec) + { + ++error_count; + co_return; + } + ++success_count; + }); + + BOOST_TEST(r.success); + BOOST_TEST(error_count > 0); + BOOST_TEST(success_count > 0); + } + + void + testFuseErrorWriteEof() + { + int success_count = 0; + int error_count = 0; + + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_sink bs(f); + any_buffer_sink abs(&bs); + + auto [ec] = co_await abs.write_eof(); + if(ec) + { + ++error_count; + co_return; + } + ++success_count; + }); + + BOOST_TEST(r.success); + BOOST_TEST(error_count > 0); + BOOST_TEST(success_count > 0); + } + + void + testMoveOwning() + { + // Move construct from owning + { + test::fuse f; + test::buffer_sink bs(f); + any_buffer_sink abs1(std::move(bs)); + BOOST_TEST(abs1.has_value()); + + any_buffer_sink abs2(std::move(abs1)); + BOOST_TEST(abs2.has_value()); + BOOST_TEST(!abs1.has_value()); + } + + // Move assign from owning into empty + { + test::fuse f; + test::buffer_sink bs(f); + any_buffer_sink abs1(std::move(bs)); + + any_buffer_sink abs2; + abs2 = std::move(abs1); + BOOST_TEST(abs2.has_value()); + BOOST_TEST(!abs1.has_value()); + } + + // Move assign from owning over existing + { + test::fuse f; + test::buffer_sink bs1(f); + test::buffer_sink bs2(f); + any_buffer_sink abs1(std::move(bs1)); + any_buffer_sink abs2(std::move(bs2)); + + abs1 = std::move(abs2); + BOOST_TEST(abs1.has_value()); + BOOST_TEST(!abs2.has_value()); + } + } + + void + testSelfMoveAssign() + { + test::fuse f; + test::buffer_sink bs(f); + any_buffer_sink abs(&bs); + BOOST_TEST(abs.has_value()); + + any_buffer_sink* p = &abs; + any_buffer_sink* q = p; + *p = std::move(*q); + BOOST_TEST(abs.has_value()); + } + + //------------------------------------------------------ + // Native WriteSink forwarding tests (BufferSink+WriteSink) + + void + testNativeWriteSome() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_write_sink bws(f); + any_buffer_sink abs(&bws); + + auto buf = make_buffer("hello", 5); + auto [ec, n] = co_await abs.write_some(buf); + if(ec) + co_return; + + BOOST_TEST(n > 0); + BOOST_TEST(n <= 5u); + BOOST_TEST(bws.write_api_used()); + BOOST_TEST_EQ(bws.data(), + std::string_view("hello", n)); + }); + BOOST_TEST(r.success); + } + + void + testNativeWrite() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_write_sink bws(f); + any_buffer_sink abs(&bws); + + auto buf = make_buffer("hello world", 11); + auto [ec, n] = co_await abs.write(buf); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 11u); + BOOST_TEST(bws.write_api_used()); + BOOST_TEST_EQ(bws.data(), "hello world"); + }); + BOOST_TEST(r.success); + } + + void + testNativeWriteEofWithBuffers() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_write_sink bws(f); + any_buffer_sink abs(&bws); + + auto buf = make_buffer("final", 5); + auto [ec, n] = co_await abs.write_eof(buf); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 5u); + BOOST_TEST(bws.write_api_used()); + BOOST_TEST_EQ(bws.data(), "final"); + BOOST_TEST(bws.eof_called()); + }); + BOOST_TEST(r.success); + } + + void + testNativeWriteEofNoArg() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_write_sink bws(f); + any_buffer_sink abs(&bws); + + auto [ec] = co_await abs.write_eof(); + if(ec) + co_return; + + BOOST_TEST(bws.write_api_used()); + BOOST_TEST(bws.data().empty()); + BOOST_TEST(bws.eof_called()); + }); + BOOST_TEST(r.success); + } + + void + testNativeWriteThenEof() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_write_sink bws(f); + any_buffer_sink abs(&bws); + + auto buf = make_buffer("payload", 7); + auto [ec1, n] = co_await abs.write(buf); + if(ec1) + co_return; + + BOOST_TEST_EQ(n, 7u); + BOOST_TEST(!bws.eof_called()); + + auto [ec2] = co_await abs.write_eof(); + if(ec2) + co_return; + + BOOST_TEST_EQ(bws.data(), "payload"); + BOOST_TEST(bws.eof_called()); + }); + BOOST_TEST(r.success); + } + + void + testNativeWriteSomeEmpty() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_write_sink bws(f); + any_buffer_sink abs(&bws); + + auto [ec, n] = co_await abs.write_some(const_buffer{}); + BOOST_TEST(!ec); + BOOST_TEST_EQ(n, 0u); + }); + BOOST_TEST(r.success); + } + + void + testNativeWriteEmpty() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_write_sink bws(f); + any_buffer_sink abs(&bws); + + auto [ec, n] = co_await abs.write(const_buffer{}); + BOOST_TEST(!ec); + BOOST_TEST_EQ(n, 0u); + }); + BOOST_TEST(r.success); + } + + void + testNativeWriteEofEmpty() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_write_sink bws(f); + any_buffer_sink abs(&bws); + + auto [ec, n] = co_await abs.write_eof(const_buffer{}); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 0u); + BOOST_TEST(bws.eof_called()); + }); + BOOST_TEST(r.success); + } + + void + testNativeOwning() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_write_sink bws(f); + any_buffer_sink abs(std::move(bws)); + + auto buf = make_buffer("owned", 5); + auto [ec, n] = co_await abs.write_eof(buf); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 5u); + }); + BOOST_TEST(r.success); + } + + void + testNativePrepareCommit() + { + // BufferSink API still works when WriteSink is also present + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_write_sink bws(f); + any_buffer_sink abs(&bws); + + mutable_buffer arr[detail::max_iovec_]; + auto bufs = abs.prepare(arr); + BOOST_TEST_EQ(bufs.size(), 1u); + + std::memcpy(bufs[0].data(), "buf-api", 7); + + auto [ec] = co_await abs.commit(7); + if(ec) + co_return; + + // BufferSink API used, not WriteSink + BOOST_TEST(!bws.write_api_used()); + BOOST_TEST_EQ(bws.data(), "buf-api"); + }); + BOOST_TEST(r.success); + } + + //------------------------------------------------------ + // pull_from tests + void testPullFromReadStream() { @@ -384,12 +1272,40 @@ class any_buffer_sink_test run() { testConstruct(); + testConstructOwning(); testMove(); + testMoveAssignOverExisting(); testPrepareCommit(); testCommitWithEof(); testCommitEof(); testMultipleCommits(); testEmptyCommit(); + testWriteSome(); + testWrite(); + testWriteEofWithBuffers(); + testWriteEofNoArg(); + testWriteThenEof(); + testFuseErrorCommit(); + testFuseErrorCommitEof(); + testWriteSomeEmpty(); + testWriteEmpty(); + testWriteEofEmpty(); + testFuseErrorWriteSome(); + testFuseErrorWrite(); + testFuseErrorWriteEofBuffers(); + testFuseErrorWriteEof(); + testMoveOwning(); + testSelfMoveAssign(); + testNativeWriteSome(); + testNativeWrite(); + testNativeWriteEofWithBuffers(); + testNativeWriteEofNoArg(); + testNativeWriteThenEof(); + testNativeWriteSomeEmpty(); + testNativeWriteEmpty(); + testNativeWriteEofEmpty(); + testNativeOwning(); + testNativePrepareCommit(); testPullFromReadStream(); testPullFromReadStreamTypeErased(); testPullFromReadStreamChunked(); diff --git a/test/unit/io/any_buffer_source.cpp b/test/unit/io/any_buffer_source.cpp index f50049e2..acc75be1 100644 --- a/test/unit/io/any_buffer_source.cpp +++ b/test/unit/io/any_buffer_source.cpp @@ -14,12 +14,16 @@ #include #include +#include #include #include #include #include "test/unit/test_helpers.hpp" +#include +#include +#include #include namespace boost { @@ -28,6 +32,183 @@ namespace { // Static assert that any_buffer_source satisfies BufferSource static_assert(BufferSource); +static_assert(ReadSource); + +//---------------------------------------------------------- +// Mock satisfying both BufferSource and ReadSource. +// Tracks which API was used so tests can verify native +// forwarding vs. synthesized path. + +class buffer_read_source +{ + test::fuse f_; + std::string data_; + std::size_t pos_ = 0; + std::size_t max_pull_size_; + bool read_api_used_ = false; + +public: + explicit buffer_read_source( + test::fuse f = {}, + std::size_t max_pull_size = std::size_t(-1)) noexcept + : f_(std::move(f)) + , max_pull_size_(max_pull_size) + { + } + + void + provide(std::string_view sv) + { + data_.append(sv); + } + + std::size_t + available() const noexcept + { + return data_.size() - pos_; + } + + /// Return true if the ReadSource API was used. + bool + read_api_used() const noexcept + { + return read_api_used_; + } + + //------------------------------------------------------ + // BufferSource interface + + void + consume(std::size_t n) noexcept + { + pos_ += n; + } + + auto + pull(std::span dest) + { + struct awaitable + { + buffer_read_source* self_; + std::span dest_; + + bool await_ready() const noexcept { return true; } + void await_suspend(coro, executor_ref, std::stop_token) const noexcept {} + + io_result> + await_resume() + { + auto ec = self_->f_.maybe_fail(); + if(ec) + return {ec, {}}; + + if(self_->pos_ >= self_->data_.size()) + return {error::eof, {}}; + + std::size_t avail = self_->data_.size() - self_->pos_; + std::size_t to_return = (std::min)(avail, self_->max_pull_size_); + + if(dest_.empty()) + return {{}, {}}; + + dest_[0] = make_buffer( + self_->data_.data() + self_->pos_, + to_return); + return {{}, dest_.first(1)}; + } + }; + return awaitable{this, dest}; + } + + //------------------------------------------------------ + // ReadSource interface + + template + auto + read_some(MB buffers) + { + struct awaitable + { + buffer_read_source* self_; + MB buffers_; + + bool await_ready() const noexcept { return true; } + void await_suspend(coro, executor_ref, std::stop_token) const noexcept {} + + io_result + await_resume() + { + self_->read_api_used_ = true; + if(buffer_empty(buffers_)) + return {{}, 0}; + auto ec = self_->f_.maybe_fail(); + if(ec) + return {ec, 0}; + + if(self_->pos_ >= self_->data_.size()) + return {error::eof, 0}; + + std::size_t avail = self_->data_.size() - self_->pos_; + if(avail > self_->max_pull_size_) + avail = self_->max_pull_size_; + auto src = make_buffer( + self_->data_.data() + self_->pos_, avail); + std::size_t const n = buffer_copy(buffers_, src); + self_->pos_ += n; + return {{}, n}; + } + }; + return awaitable{this, buffers}; + } + + template + auto + read(MB buffers) + { + struct awaitable + { + buffer_read_source* self_; + MB buffers_; + + bool await_ready() const noexcept { return true; } + void await_suspend(coro, executor_ref, std::stop_token) const noexcept {} + + io_result + await_resume() + { + self_->read_api_used_ = true; + if(buffer_empty(buffers_)) + return {{}, 0}; + auto ec = self_->f_.maybe_fail(); + if(ec) + return {ec, 0}; + + if(self_->pos_ >= self_->data_.size()) + return {error::eof, 0}; + + std::size_t avail = self_->data_.size() - self_->pos_; + auto src = make_buffer( + self_->data_.data() + self_->pos_, avail); + std::size_t const n = buffer_copy(buffers_, src); + self_->pos_ += n; + + if(n < buffer_size(buffers_)) + return {error::eof, n}; + return {{}, n}; + } + }; + return awaitable{this, buffers}; + } +}; + +// Verify concepts at compile time +static_assert(BufferSource); +static_assert(ReadSource); + +// Verify BufferSource-only mock does NOT satisfy ReadSource +static_assert(!ReadSource); + +//---------------------------------------------------------- class any_buffer_source_test { @@ -42,7 +223,7 @@ class any_buffer_source_test BOOST_TEST(!abs); } - // Construct from source + // Construct from BufferSource-only (reference) { test::fuse f; test::buffer_source bs(f); @@ -50,6 +231,21 @@ class any_buffer_source_test BOOST_TEST(abs.has_value()); BOOST_TEST(static_cast(abs)); } + + // Construct from BufferSource+ReadSource (reference) + { + test::fuse f; + buffer_read_source brs(f); + any_buffer_source abs(&brs); + BOOST_TEST(abs.has_value()); + } + + // Owning construct from BufferSource+ReadSource + { + test::fuse f; + any_buffer_source abs((buffer_read_source(f))); + BOOST_TEST(abs.has_value()); + } } void @@ -73,6 +269,27 @@ class any_buffer_source_test BOOST_TEST(!abs2.has_value()); } + void + testMoveNative() + { + test::fuse f; + buffer_read_source brs(f); + + any_buffer_source abs1(&brs); + BOOST_TEST(abs1.has_value()); + + // Move construct + any_buffer_source abs2(std::move(abs1)); + BOOST_TEST(abs2.has_value()); + BOOST_TEST(!abs1.has_value()); + + // Move assign + any_buffer_source abs3; + abs3 = std::move(abs2); + BOOST_TEST(abs3.has_value()); + BOOST_TEST(!abs2.has_value()); + } + void testPull() { @@ -127,9 +344,9 @@ class any_buffer_source_test // Consume rest abs.consume(6); - // Third pull returns empty (exhausted) + // Third pull returns eof (exhausted) auto [ec3, bufs3] = co_await abs.pull(arr); - if(ec3) + if(ec3 != capy::cond::eof) co_return; BOOST_TEST(bufs3.empty()); }); @@ -182,10 +399,10 @@ class any_buffer_source_test { const_buffer arr[detail::max_iovec_]; auto [ec, bufs] = co_await abs.pull(arr); + if(ec == capy::cond::eof) + break; if(ec) co_return; - if(bufs.empty()) - break; for(auto const& buf : bufs) { total += buf.size(); @@ -210,14 +427,273 @@ class any_buffer_source_test const_buffer arr[detail::max_iovec_]; auto [ec, bufs] = co_await abs.pull(arr); + if(ec != capy::cond::eof) + co_return; + BOOST_TEST(bufs.empty()); + }); + BOOST_TEST(r.success); + } + + //------------------------------------------------------ + // Synthesized ReadSource tests (BufferSource-only mock) + + void + testSynthesizedReadSome() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_source bs(f); + bs.provide("hello world"); + + any_buffer_source abs(&bs); + + char buf[64]; + auto [ec, n] = co_await abs.read_some( + mutable_buffer(buf, sizeof(buf))); + if(ec) + co_return; + + BOOST_TEST(n > 0); + BOOST_TEST(n <= 11u); + BOOST_TEST_EQ( + std::string_view(buf, n), + std::string_view("hello world", n)); + }); + BOOST_TEST(r.success); + } + + void + testSynthesizedRead() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_source bs(f); + bs.provide("hello world"); + + any_buffer_source abs(&bs); + + char buf[11]; + auto [ec, n] = co_await abs.read( + mutable_buffer(buf, sizeof(buf))); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 11u); + BOOST_TEST_EQ( + std::string_view(buf, n), + "hello world"); + }); + BOOST_TEST(r.success); + } + + void + testSynthesizedReadSomeEmpty() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::buffer_source bs(f); + bs.provide("data"); + + any_buffer_source abs(&bs); + + // Empty buffer returns 0 immediately + auto [ec, n] = co_await abs.read_some( + mutable_buffer(nullptr, 0)); + BOOST_TEST(!ec); + BOOST_TEST_EQ(n, 0u); + }); + BOOST_TEST(r.success); + } + + //------------------------------------------------------ + // Native ReadSource tests (BufferSource+ReadSource mock) + + void + testNativeReadSome() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_read_source brs(f); + brs.provide("hello world"); + + any_buffer_source abs(&brs); + + char buf[64]; + auto [ec, n] = co_await abs.read_some( + mutable_buffer(buf, sizeof(buf))); + if(ec) + co_return; + + BOOST_TEST(n > 0); + BOOST_TEST(n <= 11u); + BOOST_TEST(brs.read_api_used()); + BOOST_TEST_EQ( + std::string_view(buf, n), + std::string_view("hello world", n)); + }); + BOOST_TEST(r.success); + } + + void + testNativeRead() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_read_source brs(f); + brs.provide("hello world"); + + any_buffer_source abs(&brs); + + char buf[11]; + auto [ec, n] = co_await abs.read( + mutable_buffer(buf, sizeof(buf))); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 11u); + BOOST_TEST(brs.read_api_used()); + BOOST_TEST_EQ( + std::string_view(buf, n), + "hello world"); + }); + BOOST_TEST(r.success); + } + + void + testNativeReadSomeEmpty() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_read_source brs(f); + brs.provide("data"); + + any_buffer_source abs(&brs); + + // Empty buffer returns 0 immediately + auto [ec, n] = co_await abs.read_some( + mutable_buffer(nullptr, 0)); + BOOST_TEST(!ec); + BOOST_TEST_EQ(n, 0u); + // ReadSource API should NOT be called for empty buffers + BOOST_TEST(!brs.read_api_used()); + }); + BOOST_TEST(r.success); + } + + void + testNativePullAndConsume() + { + // Verify that pull/consume still works even when + // the wrapped type satisfies ReadSource + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_read_source brs(f); + brs.provide("hello"); + + any_buffer_source abs(&brs); + + const_buffer arr[detail::max_iovec_]; + auto [ec, bufs] = co_await abs.pull(arr); + if(ec) + co_return; + + BOOST_TEST_EQ(bufs.size(), 1u); + BOOST_TEST_EQ(bufs[0].size(), 5u); + abs.consume(5); + + // Read API should NOT be used for pull/consume + BOOST_TEST(!brs.read_api_used()); + }); + BOOST_TEST(r.success); + } + + void + testNativeOwning() + { + // Verify owning construction forwards native ReadSource + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_read_source brs(f); + brs.provide("hello world"); + + any_buffer_source abs(std::move(brs)); + + char buf[11]; + auto [ec, n] = co_await abs.read( + mutable_buffer(buf, sizeof(buf))); if(ec) co_return; - BOOST_TEST(bufs.empty()); // Source exhausted + BOOST_TEST_EQ(n, 11u); + BOOST_TEST_EQ( + std::string_view(buf, n), + "hello world"); }); BOOST_TEST(r.success); } + void + testNativeReadEof() + { + // Verify EOF handling through native path + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_read_source brs(f); + brs.provide("hi"); + + any_buffer_source abs(&brs); + + // Try to read more than available + char buf[64]; + auto [ec, n] = co_await abs.read( + mutable_buffer(buf, sizeof(buf))); + + // Fuse may inject a non-eof error + if(ec && ec != capy::cond::eof) + co_return; + + // Should get partial data + EOF + BOOST_TEST(ec == capy::cond::eof); + BOOST_TEST_EQ(n, 2u); + BOOST_TEST(brs.read_api_used()); + BOOST_TEST_EQ(std::string_view(buf, n), "hi"); + }); + BOOST_TEST(r.success); + } + + void + testNativeReadSomeChunked() + { + // Verify chunked native read_some + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + buffer_read_source brs(f, 3); + brs.provide("hello world"); + + any_buffer_source abs(&brs); + + std::string result; + for(;;) + { + char buf[64]; + auto [ec, n] = co_await abs.read_some( + mutable_buffer(buf, sizeof(buf))); + if(ec == capy::cond::eof) + break; + if(ec) + co_return; + result.append(buf, n); + } + + BOOST_TEST(brs.read_api_used()); + BOOST_TEST_EQ(result, "hello world"); + }); + BOOST_TEST(r.success); + } + + //------------------------------------------------------ + // push_to tests + void testPushTo() { @@ -309,11 +785,22 @@ class any_buffer_source_test { testConstruct(); testMove(); + testMoveNative(); testPull(); testConsume(); testPullWithoutConsume(); testPullMultiple(); testPullEmpty(); + testSynthesizedReadSome(); + testSynthesizedRead(); + testSynthesizedReadSomeEmpty(); + testNativeReadSome(); + testNativeRead(); + testNativeReadSomeEmpty(); + testNativePullAndConsume(); + testNativeOwning(); + testNativeReadEof(); + testNativeReadSomeChunked(); testPushTo(); testPushToTypeErased(); testPushToChunked(); diff --git a/test/unit/io/any_read_source.cpp b/test/unit/io/any_read_source.cpp index e1284bf2..99bd621c 100644 --- a/test/unit/io/any_read_source.cpp +++ b/test/unit/io/any_read_source.cpp @@ -12,18 +12,53 @@ #include #include +#include +#include #include #include +#include #include "test/unit/test_helpers.hpp" +#include + #include +#include +#include #include namespace boost { namespace capy { + +static_assert(ReadSource); + namespace { +struct pending_source_awaitable +{ + int* counter_; + pending_source_awaitable(int* c) : counter_(c) {} + pending_source_awaitable(pending_source_awaitable&& o) noexcept + : counter_(std::exchange(o.counter_, nullptr)) {} + ~pending_source_awaitable() { if(counter_) ++(*counter_); } + bool await_ready() const noexcept { return false; } + coro await_suspend(coro, executor_ref, std::stop_token) + { return std::noop_coroutine(); } + io_result await_resume() + { return {{}, 0}; } +}; + +struct pending_read_source +{ + int* counter_; + pending_source_awaitable read_some( + MutableBufferSequence auto) + { return pending_source_awaitable{counter_}; } + pending_source_awaitable read( + MutableBufferSequence auto) + { return pending_source_awaitable{counter_}; } +}; + class any_read_source_test { public: @@ -47,6 +82,29 @@ class any_read_source_test } } + void + testConstructOwning() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::read_source rs(f); + rs.provide("owned"); + + any_read_source ars(std::move(rs)); + BOOST_TEST(ars.has_value()); + BOOST_TEST(static_cast(ars)); + + char buf[5] = {}; + auto [ec, n] = co_await ars.read_some(make_buffer(buf)); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 5u); + BOOST_TEST_EQ(std::string_view(buf, n), "owned"); + }); + BOOST_TEST(r.success); + } + void testMove() { @@ -61,13 +119,190 @@ class any_read_source_test BOOST_TEST(ars2.has_value()); BOOST_TEST(!ars1.has_value()); - // Move assign + // Move assign to empty any_read_source ars3; ars3 = std::move(ars2); BOOST_TEST(ars3.has_value()); BOOST_TEST(!ars2.has_value()); } + void + testMoveAssignNonEmpty() + { + test::fuse f; + test::read_source rs1(f); + test::read_source rs2(f); + + any_read_source ars1(&rs1); + any_read_source ars2(&rs2); + BOOST_TEST(ars1.has_value()); + BOOST_TEST(ars2.has_value()); + + // Move assign over non-empty target + ars1 = std::move(ars2); + BOOST_TEST(ars1.has_value()); + BOOST_TEST(!ars2.has_value()); + } + + void + testSelfAssign() + { + test::fuse f; + test::read_source rs(f); + + any_read_source ars(&rs); + BOOST_TEST(ars.has_value()); + + // Indirect self-assignment should be a no-op + auto& ref = ars; + ars = std::move(ref); + BOOST_TEST(ars.has_value()); + } + + void + testReadSome() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::read_source rs(f); + rs.provide("hello world"); + + any_read_source ars(&rs); + + char buf[32] = {}; + auto [ec, n] = co_await ars.read_some(make_buffer(buf)); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 11u); + BOOST_TEST_EQ(std::string_view(buf, n), "hello world"); + }); + BOOST_TEST(r.success); + } + + void + testReadSomePartial() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::read_source rs(f); + rs.provide("hello world"); + + any_read_source ars(&rs); + + char buf[5] = {}; + auto [ec, n] = co_await ars.read_some(make_buffer(buf)); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 5u); + BOOST_TEST_EQ(std::string_view(buf, n), "hello"); + BOOST_TEST_EQ(rs.available(), 6u); + }); + BOOST_TEST(r.success); + } + + void + testReadSomeMultiple() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::read_source rs(f); + rs.provide("abcdefghij"); + + any_read_source ars(&rs); + + char buf[3] = {}; + + auto [ec1, n1] = co_await ars.read_some(make_buffer(buf)); + if(ec1) + co_return; + BOOST_TEST_EQ(n1, 3u); + BOOST_TEST_EQ(std::string_view(buf, n1), "abc"); + + auto [ec2, n2] = co_await ars.read_some(make_buffer(buf)); + if(ec2) + co_return; + BOOST_TEST_EQ(n2, 3u); + BOOST_TEST_EQ(std::string_view(buf, n2), "def"); + + auto [ec3, n3] = co_await ars.read_some(make_buffer(buf)); + if(ec3) + co_return; + BOOST_TEST_EQ(n3, 3u); + BOOST_TEST_EQ(std::string_view(buf, n3), "ghi"); + }); + BOOST_TEST(r.success); + } + + void + testReadSomeEof() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::read_source rs(f); + + any_read_source ars(&rs); + + char buf[32] = {}; + auto [ec, n] = co_await ars.read_some(make_buffer(buf)); + if(ec && ec != cond::eof) + co_return; + + BOOST_TEST(ec == cond::eof); + BOOST_TEST_EQ(n, 0u); + }); + BOOST_TEST(r.success); + } + + void + testReadSomeBufferSequence() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::read_source rs(f); + rs.provide("helloworld"); + + any_read_source ars(&rs); + + char buf1[5] = {}; + char buf2[5] = {}; + std::array buffers = {{ + make_buffer(buf1), + make_buffer(buf2) + }}; + + auto [ec, n] = co_await ars.read_some(buffers); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 10u); + BOOST_TEST_EQ(std::string_view(buf1, 5), "hello"); + BOOST_TEST_EQ(std::string_view(buf2, 5), "world"); + }); + BOOST_TEST(r.success); + } + + void + testReadSomeEmptyBuffer() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::read_source rs(f); + rs.provide("data"); + + any_read_source ars(&rs); + + auto [ec, n] = co_await ars.read_some(mutable_buffer()); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 0u); + BOOST_TEST_EQ(rs.available(), 4u); + }); + BOOST_TEST(r.success); + } + void testRead() { @@ -318,11 +553,11 @@ class any_read_source_test void testReadWithMaxReadSize() { - // Verify any_read_source loops to fill buffer even when - // underlying source has max_read_size limitation + // Verify read forwards to underlying source's read which + // fills the buffer ignoring max_read_size test::fuse f; auto r = f.armed([&](test::fuse&) -> task<> { - test::read_source rs(f, 5); // max 5 bytes per read + test::read_source rs(f, 5); rs.provide("hello world"); any_read_source ars(&rs); @@ -332,7 +567,6 @@ class any_read_source_test if(ec) co_return; - // Should fill entire buffer by looping BOOST_TEST_EQ(n, 11u); BOOST_TEST_EQ(std::string_view(buf, n), "hello world"); }); @@ -342,24 +576,22 @@ class any_read_source_test void testReadWithMaxReadSizeMultiple() { - // Verify multiple reads with max_read_size, each filling buffer + // Verify multiple reads forward to underlying source's read test::fuse f; auto r = f.armed([&](test::fuse&) -> task<> { - test::read_source rs(f, 3); // max 3 bytes per read + test::read_source rs(f, 3); rs.provide("abcdefghij"); any_read_source ars(&rs); char buf[5] = {}; - // First read: fills 5 bytes by looping (3 + 2) auto [ec1, n1] = co_await ars.read(make_buffer(buf)); if(ec1) co_return; BOOST_TEST_EQ(n1, 5u); BOOST_TEST_EQ(std::string_view(buf, n1), "abcde"); - // Second read: fills 5 bytes by looping (3 + 2) auto [ec2, n2] = co_await ars.read(make_buffer(buf)); if(ec2) co_return; @@ -369,11 +601,131 @@ class any_read_source_test BOOST_TEST(r.success); } + void + testReadManyBuffers() + { + // Buffer sequence exceeds max_iovec_ -- verifies the + // windowed loop fills every buffer in the sequence. + constexpr unsigned N = detail::max_iovec_ + 4; + + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + // Build data: "abcd..." repeating, one byte per buffer + std::string data; + for(unsigned i = 0; i < N; ++i) + data.push_back(static_cast('a' + (i % 26))); + + test::read_source rs(f); + rs.provide(data); + + any_read_source ars(&rs); + + char storage[N] = {}; + std::array buffers; + for(unsigned i = 0; i < N; ++i) + buffers[i] = mutable_buffer(&storage[i], 1); + + auto [ec, n] = co_await ars.read(buffers); + if(ec) + co_return; + + BOOST_TEST_EQ(n, std::size_t(N)); + for(unsigned i = 0; i < N; ++i) + BOOST_TEST_EQ(storage[i], data[i]); + }); + BOOST_TEST(r.success); + } + + void + testReadManyBuffersEof() + { + // Buffer sequence exceeds max_iovec_ but data runs out + // mid-way through the second window. + constexpr unsigned N = detail::max_iovec_ + 4; + constexpr unsigned avail = detail::max_iovec_ + 2; + + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + std::string data; + for(unsigned i = 0; i < avail; ++i) + data.push_back(static_cast('a' + (i % 26))); + + test::read_source rs(f); + rs.provide(data); + + any_read_source ars(&rs); + + char storage[N] = {}; + std::array buffers; + for(unsigned i = 0; i < N; ++i) + buffers[i] = mutable_buffer(&storage[i], 1); + + auto [ec, n] = co_await ars.read(buffers); + if(ec && ec != cond::eof) + co_return; + + BOOST_TEST(ec == cond::eof); + BOOST_TEST_EQ(n, std::size_t(avail)); + for(unsigned i = 0; i < avail; ++i) + BOOST_TEST_EQ(storage[i], data[i]); + }); + BOOST_TEST(r.success); + } + + void + testDestroyWithActiveAwaitable() + { + // Split vtable: active_ops_ set in await_suspend. + int destroyed = 0; + pending_read_source ps{&destroyed}; + { + any_read_source ars(&ps); + char buf[1]; + auto aw = ars.read_some(mutable_buffer(buf, 1)); + BOOST_TEST(!aw.await_ready()); + + test::inline_executor ex; + aw.await_suspend( + std::noop_coroutine(), executor_ref(ex), {}); + } + BOOST_TEST_EQ(destroyed, 1); + } + + void + testMoveAssignWithActiveAwaitable() + { + int destroyed = 0; + pending_read_source ps{&destroyed}; + { + any_read_source ars(&ps); + char buf[1]; + auto aw = ars.read_some(mutable_buffer(buf, 1)); + BOOST_TEST(!aw.await_ready()); + + test::inline_executor ex; + aw.await_suspend( + std::noop_coroutine(), executor_ref(ex), {}); + + any_read_source empty; + ars = std::move(empty); + BOOST_TEST_EQ(destroyed, 1); + } + } + void run() { testConstruct(); + testConstructOwning(); testMove(); + testMoveAssignNonEmpty(); + testSelfAssign(); + testReadSome(); + testReadSomePartial(); + testReadSomeMultiple(); + testReadSomeEof(); + testReadSomeBufferSequence(); + testReadSomeEmptyBuffer(); testRead(); testReadPartial(); testReadMultiple(); @@ -386,6 +738,10 @@ class any_read_source_test testReadEmpty(); testReadWithMaxReadSize(); testReadWithMaxReadSizeMultiple(); + testReadManyBuffers(); + testReadManyBuffersEof(); + testDestroyWithActiveAwaitable(); + testMoveAssignWithActiveAwaitable(); } }; diff --git a/test/unit/io/any_read_stream.cpp b/test/unit/io/any_read_stream.cpp index 37cd2b81..fd7cdc61 100644 --- a/test/unit/io/any_read_stream.cpp +++ b/test/unit/io/any_read_stream.cpp @@ -12,18 +12,48 @@ #include #include +#include #include #include #include "test/unit/test_helpers.hpp" +#include + #include +#include #include +#include namespace boost { namespace capy { + +static_assert(ReadStream); + namespace { +struct pending_read_awaitable +{ + int* counter_; + pending_read_awaitable(int* c) : counter_(c) {} + pending_read_awaitable(pending_read_awaitable&& o) noexcept + : counter_(std::exchange(o.counter_, nullptr)) {} + ~pending_read_awaitable() { if(counter_) ++(*counter_); } + bool await_ready() const noexcept { return false; } + coro await_suspend(coro, executor_ref, std::stop_token) + { return std::noop_coroutine(); } + io_result await_resume() + { return {{}, 0}; } +}; + +struct pending_read_stream +{ + int* counter_; + pending_read_awaitable read_some( + MutableBufferSequence auto) + { return pending_read_awaitable{counter_}; } +}; + class any_read_stream_test { public: @@ -45,6 +75,14 @@ class any_read_stream_test BOOST_TEST(ars.has_value()); BOOST_TEST(static_cast(ars)); } + + // Owning construct + { + test::fuse f; + any_read_stream ars(test::read_stream{f}); + BOOST_TEST(ars.has_value()); + BOOST_TEST(static_cast(ars)); + } } void @@ -66,6 +104,21 @@ class any_read_stream_test ars3 = std::move(ars2); BOOST_TEST(ars3.has_value()); BOOST_TEST(!ars2.has_value()); + + // Move assign over live wrapper + { + test::fuse f2; + test::read_stream rs2(f2); + + any_read_stream a(&rs); + any_read_stream b(&rs2); + BOOST_TEST(a.has_value()); + BOOST_TEST(b.has_value()); + + a = std::move(b); + BOOST_TEST(a.has_value()); + BOOST_TEST(!b.has_value()); + } } void @@ -251,6 +304,196 @@ class any_read_stream_test BOOST_TEST(r.success); } + void + testReadSomeEmptyBuffer() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::read_stream rs(f); + rs.provide("data"); + + any_read_stream ars(&rs); + + auto [ec, n] = co_await ars.read_some(mutable_buffer()); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 0u); + BOOST_TEST_EQ(rs.available(), 4u); + }); + BOOST_TEST(r.success); + } + + // Trichotomy conformance: success implies !ec and n >= 1 + void + testTrichotomySuccess() + { + test::fuse f; + auto r = f.inert([&](test::fuse&) -> task<> { + test::read_stream rs(f); + rs.provide("hello"); + + any_read_stream ars(&rs); + + char buf[32] = {}; + auto [ec, n] = co_await ars.read_some( + mutable_buffer(buf, sizeof(buf))); + BOOST_TEST(!ec); + BOOST_TEST_GE(n, 1u); + BOOST_TEST_EQ(n, 5u); + }); + BOOST_TEST(r.success); + } + + // Trichotomy conformance: error implies n == 0 + void + testTrichotomyError() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::read_stream rs(f); + rs.provide("hello"); + + any_read_stream ars(&rs); + + char buf[32] = {}; + auto [ec, n] = co_await ars.read_some( + mutable_buffer(buf, sizeof(buf))); + if(ec) + { + BOOST_TEST_EQ(n, 0u); + co_return; + } + BOOST_TEST_GE(n, 1u); + }); + BOOST_TEST(r.success); + } + + // Trichotomy conformance: EOF after draining data + // returns {eof, 0}, not {eof, n} + void + testTrichotomyEofAfterDrain() + { + test::fuse f; + auto r = f.inert([&](test::fuse&) -> task<> { + test::read_stream rs(f); + rs.provide("hi"); + + any_read_stream ars(&rs); + + // Drain all data + char buf[32] = {}; + auto [ec1, n1] = co_await ars.read_some( + mutable_buffer(buf, sizeof(buf))); + BOOST_TEST(!ec1); + BOOST_TEST_EQ(n1, 2u); + + // Next read discovers EOF + auto [ec2, n2] = co_await ars.read_some( + mutable_buffer(buf, sizeof(buf))); + BOOST_TEST(ec2 == cond::eof); + BOOST_TEST_EQ(n2, 0u); + }); + BOOST_TEST(r.success); + } + + // Trichotomy conformance: empty buffer on exhausted + // stream returns {success, 0}, not {eof, 0} + void + testTrichotomyEmptyBufferExhausted() + { + test::fuse f; + auto r = f.inert([&](test::fuse&) -> task<> { + test::read_stream rs(f); + rs.provide("hi"); + + any_read_stream ars(&rs); + + // Drain all data + char buf[32] = {}; + auto [ec1, n1] = co_await ars.read_some( + mutable_buffer(buf, sizeof(buf))); + BOOST_TEST(!ec1); + BOOST_TEST_EQ(n1, 2u); + + // Empty buffer on exhausted stream is a no-op + auto [ec2, n2] = co_await ars.read_some( + mutable_buffer()); + BOOST_TEST(!ec2); + BOOST_TEST_EQ(n2, 0u); + }); + BOOST_TEST(r.success); + } + + void + testReadSomeManyBuffers() + { + // read_some is a partial operation — with more than + // max_iovec_ buffers it processes only the first window. + constexpr unsigned N = detail::max_iovec_ + 4; + + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + std::string data; + for(unsigned i = 0; i < N; ++i) + data.push_back(static_cast('a' + (i % 26))); + + test::read_stream rs(f); + rs.provide(data); + + any_read_stream ars(&rs); + + char storage[N] = {}; + std::vector buffers; + for(unsigned i = 0; i < N; ++i) + buffers.emplace_back(&storage[i], 1); + + auto [ec, n] = co_await ars.read_some(buffers); + if(ec) + co_return; + + // Partial — at most max_iovec_ bytes + BOOST_TEST(!ec); + BOOST_TEST(n >= 1u); + BOOST_TEST(n <= std::size_t(detail::max_iovec_)); + }); + BOOST_TEST(r.success); + } + + void + testDestroyWithActiveAwaitable() + { + // Verify destructor cleans up an in-flight awaitable. + // Flat vtable: await_ready constructs the inner awaitable + // and sets awaitable_active_ = true. + int destroyed = 0; + pending_read_stream ps{&destroyed}; + { + any_read_stream ars(&ps); + char buf[1]; + auto aw = ars.read_some(mutable_buffer(buf, 1)); + BOOST_TEST(!aw.await_ready()); + } + BOOST_TEST_EQ(destroyed, 1); + } + + void + testMoveAssignWithActiveAwaitable() + { + int destroyed = 0; + pending_read_stream ps{&destroyed}; + { + any_read_stream ars(&ps); + char buf[1]; + auto aw = ars.read_some(mutable_buffer(buf, 1)); + BOOST_TEST(!aw.await_ready()); + + any_read_stream empty; + ars = std::move(empty); + BOOST_TEST_EQ(destroyed, 1); + } + } + void run() { @@ -263,6 +506,14 @@ class any_read_stream_test testReadSomeBufferSequence(); testReadSomeSingleBuffer(); testReadSomeArray(); + testReadSomeEmptyBuffer(); + testTrichotomySuccess(); + testTrichotomyError(); + testTrichotomyEofAfterDrain(); + testTrichotomyEmptyBufferExhausted(); + testReadSomeManyBuffers(); + testDestroyWithActiveAwaitable(); + testMoveAssignWithActiveAwaitable(); } }; diff --git a/test/unit/io/any_stream.cpp b/test/unit/io/any_stream.cpp index 63b6fe4c..3271a377 100644 --- a/test/unit/io/any_stream.cpp +++ b/test/unit/io/any_stream.cpp @@ -23,6 +23,10 @@ namespace boost { namespace capy { + +static_assert(ReadStream); +static_assert(WriteStream); + namespace { // Simple bidirectional mock stream for testing any_stream diff --git a/test/unit/io/any_write_sink.cpp b/test/unit/io/any_write_sink.cpp index 229be29a..3399aeac 100644 --- a/test/unit/io/any_write_sink.cpp +++ b/test/unit/io/any_write_sink.cpp @@ -11,18 +11,73 @@ #include #include +#include +#include #include +#include #include #include "test/unit/test_helpers.hpp" +#include + #include +#include +#include #include +#include namespace boost { namespace capy { + +static_assert(WriteSink); + namespace { +struct pending_sink_awaitable +{ + int* counter_; + pending_sink_awaitable(int* c) : counter_(c) {} + pending_sink_awaitable(pending_sink_awaitable&& o) noexcept + : counter_(std::exchange(o.counter_, nullptr)) {} + ~pending_sink_awaitable() { if(counter_) ++(*counter_); } + bool await_ready() const noexcept { return false; } + coro await_suspend(coro, executor_ref, std::stop_token) + { return std::noop_coroutine(); } + io_result await_resume() + { return {{}, 0}; } +}; + +struct pending_sink_eof_awaitable +{ + int* counter_; + pending_sink_eof_awaitable(int* c) : counter_(c) {} + pending_sink_eof_awaitable(pending_sink_eof_awaitable&& o) noexcept + : counter_(std::exchange(o.counter_, nullptr)) {} + ~pending_sink_eof_awaitable() { if(counter_) ++(*counter_); } + bool await_ready() const noexcept { return false; } + coro await_suspend(coro, executor_ref, std::stop_token) + { return std::noop_coroutine(); } + io_result<> await_resume() + { return {}; } +}; + +struct pending_write_sink +{ + int* counter_; + pending_sink_awaitable write_some( + ConstBufferSequence auto) + { return pending_sink_awaitable{counter_}; } + pending_sink_awaitable write( + ConstBufferSequence auto) + { return pending_sink_awaitable{counter_}; } + pending_sink_awaitable write_eof( + ConstBufferSequence auto) + { return pending_sink_awaitable{counter_}; } + pending_sink_eof_awaitable write_eof() + { return pending_sink_eof_awaitable{counter_}; } +}; + class any_write_sink_test { public: @@ -67,6 +122,116 @@ class any_write_sink_test BOOST_TEST(!aws2.has_value()); } + void + testWriteSome() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_sink ws(f); + + any_write_sink aws(&ws); + + auto [ec, n] = co_await aws.write_some( + make_buffer("hello world", 11)); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 11u); + BOOST_TEST_EQ(ws.data(), "hello world"); + }); + BOOST_TEST(r.success); + } + + void + testWriteSomePartial() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_sink ws(f, 5); + + any_write_sink aws(&ws); + + auto [ec, n] = co_await aws.write_some( + make_buffer("hello world", 11)); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 5u); + BOOST_TEST_EQ(ws.data(), "hello"); + }); + BOOST_TEST(r.success); + } + + void + testWriteSomeMultiple() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_sink ws(f); + + any_write_sink aws(&ws); + + auto [ec1, n1] = co_await aws.write_some( + make_buffer("hello", 5)); + if(ec1) + co_return; + BOOST_TEST_EQ(n1, 5u); + + auto [ec2, n2] = co_await aws.write_some( + make_buffer(" ", 1)); + if(ec2) + co_return; + BOOST_TEST_EQ(n2, 1u); + + auto [ec3, n3] = co_await aws.write_some( + make_buffer("world", 5)); + if(ec3) + co_return; + BOOST_TEST_EQ(n3, 5u); + + BOOST_TEST_EQ(ws.data(), "hello world"); + }); + BOOST_TEST(r.success); + } + + void + testWriteSomeEmptyBuffer() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_sink ws(f); + + any_write_sink aws(&ws); + + auto [ec, n] = co_await aws.write_some(const_buffer()); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 0u); + BOOST_TEST(ws.data().empty()); + }); + BOOST_TEST(r.success); + } + + void + testWriteEmptyBuffer() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_sink ws(f); + + any_write_sink aws(&ws); + + auto [ec, n] = co_await aws.write(const_buffer()); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 0u); + BOOST_TEST(ws.data().empty()); + }); + BOOST_TEST(r.success); + } + void testWrite() { @@ -165,28 +330,7 @@ class any_write_sink_test } void - testWriteWithEofFalse() - { - test::fuse f; - auto r = f.armed([&](test::fuse&) -> task<> { - test::write_sink ws(f); - - any_write_sink aws(&ws); - - auto [ec, n] = co_await aws.write( - make_buffer("hello", 5), false); - if(ec) - co_return; - - BOOST_TEST_EQ(n, 5u); - BOOST_TEST_EQ(ws.data(), "hello"); - BOOST_TEST(!ws.eof_called()); - }); - BOOST_TEST(r.success); - } - - void - testWriteWithEofTrue() + testWriteEofWithBuffers() { test::fuse f; auto r = f.armed([&](test::fuse&) -> task<> { @@ -194,8 +338,8 @@ class any_write_sink_test any_write_sink aws(&ws); - auto [ec, n] = co_await aws.write( - make_buffer("hello", 5), true); + auto [ec, n] = co_await aws.write_eof( + make_buffer("hello", 5)); if(ec) co_return; @@ -207,7 +351,7 @@ class any_write_sink_test } void - testWriteWithEofEmpty() + testWriteEofWithEmptyBuffers() { test::fuse f; auto r = f.armed([&](test::fuse&) -> task<> { @@ -215,7 +359,7 @@ class any_write_sink_test any_write_sink aws(&ws); - auto [ec, n] = co_await aws.write(const_buffer(), true); + auto [ec, n] = co_await aws.write_eof(const_buffer()); if(ec) co_return; @@ -316,7 +460,7 @@ class any_write_sink_test } void - testWriteWithEofPartial() + testWriteEofWithBuffersPartial() { // Verify that any_write_sink loops to consume all data // and signals eof even when underlying sink has max_write_size @@ -326,8 +470,8 @@ class any_write_sink_test any_write_sink aws(&ws); - auto [ec, n] = co_await aws.write( - make_buffer("hello world", 11), true); + auto [ec, n] = co_await aws.write_eof( + make_buffer("hello world", 11)); if(ec) co_return; @@ -338,23 +482,179 @@ class any_write_sink_test BOOST_TEST(r.success); } + void + testConstructOwning() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_sink ws(f); + any_write_sink aws{std::move(ws)}; + BOOST_TEST(aws.has_value()); + + auto [ec, n] = co_await aws.write_some( + make_buffer("hello", 5)); + if(ec) + co_return; + BOOST_TEST_EQ(n, 5u); + }); + BOOST_TEST(r.success); + } + + void + testWriteManyBuffers() + { + // Buffer sequence exceeds max_iovec_ -- verifies the + // windowed loop writes every buffer in the sequence. + constexpr unsigned N = detail::max_iovec_ + 4; + + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_sink ws(f); + any_write_sink aws(&ws); + + std::string expected; + std::vector strings; + std::vector buffers; + for(unsigned i = 0; i < N; ++i) + { + strings.push_back(std::string(1, + static_cast('a' + (i % 26)))); + expected += strings.back(); + } + for(auto const& s : strings) + buffers.emplace_back(s.data(), s.size()); + + auto [ec, n] = co_await aws.write(buffers); + if(ec) + co_return; + + BOOST_TEST_EQ(n, std::size_t(N)); + BOOST_TEST_EQ(ws.data(), expected); + BOOST_TEST(!ws.eof_called()); + }); + BOOST_TEST(r.success); + } + + void + testWriteEofManyBuffers() + { + // Buffer sequence exceeds max_iovec_ -- verifies the + // last window is sent atomically with EOF via write_eof(buffers). + constexpr unsigned N = detail::max_iovec_ + 4; + + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_sink ws(f); + any_write_sink aws(&ws); + + std::string expected; + std::vector strings; + std::vector buffers; + for(unsigned i = 0; i < N; ++i) + { + strings.push_back(std::string(1, + static_cast('a' + (i % 26)))); + expected += strings.back(); + } + for(auto const& s : strings) + buffers.emplace_back(s.data(), s.size()); + + auto [ec, n] = co_await aws.write_eof(buffers); + if(ec) + co_return; + + BOOST_TEST_EQ(n, std::size_t(N)); + BOOST_TEST_EQ(ws.data(), expected); + BOOST_TEST(ws.eof_called()); + }); + BOOST_TEST(r.success); + } + + void + testDestroyWithActiveWriteAwaitable() + { + // Split vtable: active_write_ops_ set in await_suspend. + int destroyed = 0; + pending_write_sink ps{&destroyed}; + { + any_write_sink aws(&ps); + char const data[] = "x"; + auto aw = aws.write_some(const_buffer(data, 1)); + BOOST_TEST(!aw.await_ready()); + + test::inline_executor ex; + aw.await_suspend( + std::noop_coroutine(), executor_ref(ex), {}); + } + BOOST_TEST_EQ(destroyed, 1); + } + + void + testDestroyWithActiveEofAwaitable() + { + // Split vtable: active_eof_ops_ set in await_suspend. + int destroyed = 0; + pending_write_sink ps{&destroyed}; + { + any_write_sink aws(&ps); + auto aw = aws.write_eof(); + BOOST_TEST(!aw.await_ready()); + + test::inline_executor ex; + aw.await_suspend( + std::noop_coroutine(), executor_ref(ex), {}); + } + BOOST_TEST_EQ(destroyed, 1); + } + + void + testMoveAssignWithActiveAwaitable() + { + int destroyed = 0; + pending_write_sink ps{&destroyed}; + { + any_write_sink aws(&ps); + char const data[] = "x"; + auto aw = aws.write_some(const_buffer(data, 1)); + BOOST_TEST(!aw.await_ready()); + + test::inline_executor ex; + aw.await_suspend( + std::noop_coroutine(), executor_ref(ex), {}); + + any_write_sink empty; + aws = std::move(empty); + BOOST_TEST_EQ(destroyed, 1); + } + } + void run() { testConstruct(); + testConstructOwning(); testMove(); + testWriteSome(); + testWriteSomePartial(); + testWriteSomeMultiple(); + testWriteSomeEmptyBuffer(); + testWriteEmptyBuffer(); testWrite(); testWriteMultiple(); testWriteBufferSequence(); testWriteSingleBuffer(); - testWriteWithEofFalse(); - testWriteWithEofTrue(); - testWriteWithEofEmpty(); + testWriteManyBuffers(); + testWriteEofWithBuffers(); + testWriteEofWithEmptyBuffers(); testWriteEof(); testWriteThenWriteEof(); testWriteArray(); testWritePartial(); - testWriteWithEofPartial(); + testWriteEofWithBuffersPartial(); + testWriteEofManyBuffers(); + testDestroyWithActiveWriteAwaitable(); + testDestroyWithActiveEofAwaitable(); + testMoveAssignWithActiveAwaitable(); } }; diff --git a/test/unit/io/any_write_stream.cpp b/test/unit/io/any_write_stream.cpp index 9689e8e3..5f5e49b0 100644 --- a/test/unit/io/any_write_stream.cpp +++ b/test/unit/io/any_write_stream.cpp @@ -11,18 +11,51 @@ #include #include +#include +#include #include +#include #include #include "test/unit/test_helpers.hpp" +#include + #include +#include +#include #include +#include namespace boost { namespace capy { + +static_assert(WriteStream); + namespace { +struct pending_write_awaitable +{ + int* counter_; + pending_write_awaitable(int* c) : counter_(c) {} + pending_write_awaitable(pending_write_awaitable&& o) noexcept + : counter_(std::exchange(o.counter_, nullptr)) {} + ~pending_write_awaitable() { if(counter_) ++(*counter_); } + bool await_ready() const noexcept { return false; } + coro await_suspend(coro, executor_ref, std::stop_token) + { return std::noop_coroutine(); } + io_result await_resume() + { return {{}, 0}; } +}; + +struct pending_write_stream +{ + int* counter_; + pending_write_awaitable write_some( + ConstBufferSequence auto) + { return pending_write_awaitable{counter_}; } +}; + class any_write_stream_test { public: @@ -44,6 +77,14 @@ class any_write_stream_test BOOST_TEST(aws.has_value()); BOOST_TEST(static_cast(aws)); } + + // Owning construct + { + test::fuse f; + any_write_stream aws(test::write_stream{f}); + BOOST_TEST(aws.has_value()); + BOOST_TEST(static_cast(aws)); + } } void @@ -65,6 +106,21 @@ class any_write_stream_test aws3 = std::move(aws2); BOOST_TEST(aws3.has_value()); BOOST_TEST(!aws2.has_value()); + + // Move assign over live wrapper + { + test::fuse f2; + test::write_stream ws2(f2); + + any_write_stream a(&ws); + any_write_stream b(&ws2); + BOOST_TEST(a.has_value()); + BOOST_TEST(b.has_value()); + + a = std::move(b); + BOOST_TEST(a.has_value()); + BOOST_TEST(!b.has_value()); + } } void @@ -220,6 +276,165 @@ class any_write_stream_test BOOST_TEST(r.success); } + void + testWriteSomeEmptyBuffer() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_stream ws(f); + + any_write_stream aws(&ws); + + // Empty span of buffers + auto [ec, n] = co_await aws.write_some( + std::span{}); + BOOST_TEST(!ec); + BOOST_TEST_EQ(n, 0u); + BOOST_TEST_EQ(ws.data(), ""); + }); + BOOST_TEST(r.success); + } + + void + testWriteSomeZeroSizedBuffer() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_stream ws(f); + + any_write_stream aws(&ws); + + // Buffer with zero size + const_buffer cb(nullptr, 0); + auto [ec, n] = co_await aws.write_some( + std::span(&cb, 1)); + BOOST_TEST(!ec); + BOOST_TEST_EQ(n, 0u); + BOOST_TEST_EQ(ws.data(), ""); + }); + BOOST_TEST(r.success); + } + + // Trichotomy conformance: success implies !ec and n >= 1 + void + testTrichotomySuccess() + { + test::fuse f; + auto r = f.inert([&](test::fuse&) -> task<> { + test::write_stream ws(f); + + any_write_stream aws(&ws); + + char const data[] = "hello"; + const_buffer cb(data, 5); + auto [ec, n] = co_await aws.write_some( + std::span(&cb, 1)); + BOOST_TEST(!ec); + BOOST_TEST_GE(n, 1u); + BOOST_TEST_EQ(n, 5u); + }); + BOOST_TEST(r.success); + } + + // Trichotomy conformance: error implies n == 0 + void + testTrichotomyError() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_stream ws(f); + + any_write_stream aws(&ws); + + char const data[] = "hello"; + const_buffer cb(data, 5); + auto [ec, n] = co_await aws.write_some( + std::span(&cb, 1)); + if(ec) + { + BOOST_TEST_EQ(n, 0u); + co_return; + } + BOOST_TEST_GE(n, 1u); + }); + BOOST_TEST(r.success); + } + + void + testWriteSomeManyBuffers() + { + // write_some is a partial operation — with more than + // max_iovec_ buffers it processes only the first window. + constexpr unsigned N = detail::max_iovec_ + 4; + + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_stream ws(f); + any_write_stream aws(&ws); + + std::vector strings; + std::vector buffers; + for(unsigned i = 0; i < N; ++i) + { + strings.push_back(std::string(1, + static_cast('a' + (i % 26)))); + } + for(auto const& s : strings) + buffers.emplace_back(s.data(), s.size()); + + auto [ec, n] = co_await aws.write_some(buffers); + if(ec) + co_return; + + // Partial — at most max_iovec_ bytes + BOOST_TEST(!ec); + BOOST_TEST(n >= 1u); + BOOST_TEST(n <= std::size_t(detail::max_iovec_)); + }); + BOOST_TEST(r.success); + } + + void + testDestroyWithActiveAwaitable() + { + // Flat vtable, construct-in-await_suspend variant: + // await_suspend constructs the inner awaitable. + int destroyed = 0; + pending_write_stream ps{&destroyed}; + { + any_write_stream aws(&ps); + char const data[] = "x"; + auto aw = aws.write_some(const_buffer(data, 1)); + BOOST_TEST(!aw.await_ready()); + + test::inline_executor ex; + aw.await_suspend( + std::noop_coroutine(), executor_ref(ex), {}); + } + BOOST_TEST_EQ(destroyed, 1); + } + + void + testMoveAssignWithActiveAwaitable() + { + int destroyed = 0; + pending_write_stream ps{&destroyed}; + { + any_write_stream aws(&ps); + char const data[] = "x"; + auto aw = aws.write_some(const_buffer(data, 1)); + BOOST_TEST(!aw.await_ready()); + + test::inline_executor ex; + aw.await_suspend( + std::noop_coroutine(), executor_ref(ex), {}); + + any_write_stream empty; + aws = std::move(empty); + BOOST_TEST_EQ(destroyed, 1); + } + } + void run() { @@ -231,6 +446,13 @@ class any_write_stream_test testWriteSomeBufferSequence(); testWriteSomeSingleBuffer(); testWriteSomeArray(); + testWriteSomeEmptyBuffer(); + testWriteSomeZeroSizedBuffer(); + testWriteSomeManyBuffers(); + testTrichotomySuccess(); + testTrichotomyError(); + testDestroyWithActiveAwaitable(); + testMoveAssignWithActiveAwaitable(); } }; diff --git a/test/unit/io/write_now.cpp b/test/unit/io/write_now.cpp new file mode 100644 index 00000000..8ebd8069 --- /dev/null +++ b/test/unit/io/write_now.cpp @@ -0,0 +1,278 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +// Test that header file is self-contained. +#include + +#include +#include +#include +#include +#include + +#include "test/unit/test_helpers.hpp" + +#include +#include +#include + +namespace boost { +namespace capy { +namespace { + +class write_now_test +{ +public: + void + testSingleBuffer() + { + // Complete write + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_stream ws(f); + write_now wn(ws); + + std::string_view sv("hello world"); + auto [ec, n] = co_await wn(make_buffer(sv)); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 11u); + BOOST_TEST_EQ(ws.data(), "hello world"); + }); + BOOST_TEST(r.success); + } + + // Exact size + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_stream ws(f); + write_now wn(ws); + + std::string_view sv("exact"); + auto [ec, n] = co_await wn(make_buffer(sv)); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 5u); + BOOST_TEST_EQ(ws.data(), "exact"); + }); + BOOST_TEST(r.success); + } + } + + void + testEmptyBuffer() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_stream ws(f); + write_now wn(ws); + + auto [ec, n] = co_await wn(const_buffer()); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 0u); + BOOST_TEST(ws.data().empty()); + }); + BOOST_TEST(r.success); + } + + void + testBufferArray() + { + // Two buffers + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_stream ws(f); + write_now wn(ws); + + std::string s1("hello"); + std::string s2("world"); + std::array bufs{{ + const_buffer(s1.data(), s1.size()), + const_buffer(s2.data(), s2.size()) + }}; + + auto [ec, n] = co_await wn(bufs); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 10u); + BOOST_TEST_EQ(ws.data(), "helloworld"); + }); + BOOST_TEST(r.success); + } + + // First buffer empty + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_stream ws(f); + write_now wn(ws); + + std::string s2("world"); + std::array bufs{{ + const_buffer(), + const_buffer(s2.data(), s2.size()) + }}; + + auto [ec, n] = co_await wn(bufs); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 5u); + BOOST_TEST_EQ(ws.data(), "world"); + }); + BOOST_TEST(r.success); + } + } + + void + testBufferPair() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_stream ws(f); + write_now wn(ws); + + std::string s1("ab"); + std::string s2("cdefgh"); + const_buffer_pair bp{{ + const_buffer(s1.data(), s1.size()), + const_buffer(s2.data(), s2.size()) + }}; + + auto [ec, n] = co_await wn(bp); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 8u); + BOOST_TEST_EQ(ws.data(), "abcdefgh"); + }); + BOOST_TEST(r.success); + } + + void + testLargeData() + { + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_stream ws(f); + write_now wn(ws); + + std::string large(10000, 'x'); + auto [ec, n] = co_await wn(make_buffer(large)); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 10000u); + BOOST_TEST_EQ(ws.data().size(), 10000u); + BOOST_TEST(ws.data() == large); + }); + BOOST_TEST(r.success); + } + + void + testChunkedWrite() + { + // write_stream with max_write_size forces partial writes, + // which means multiple loop iterations in the fast path. + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_stream ws(f, 3); + write_now wn(ws); + + std::string_view sv("hello world"); + auto [ec, n] = co_await wn(make_buffer(sv)); + if(ec) + co_return; + + BOOST_TEST_EQ(n, 11u); + BOOST_TEST_EQ(ws.data(), "hello world"); + }); + BOOST_TEST(r.success); + } + + void + testFuseError() + { + int error_count = 0; + int success_count = 0; + + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_stream ws(f); + write_now wn(ws); + + std::string_view sv("test data"); + auto [ec, n] = co_await wn(make_buffer(sv)); + if(ec) + { + ++error_count; + co_return; + } + ++success_count; + }); + + BOOST_TEST(r.success); + BOOST_TEST(error_count > 0); + BOOST_TEST(success_count > 0); + } + + void + testFrameReuse() + { + // Multiple calls on the same write_now to exercise + // the frame cache. + test::fuse f; + auto r = f.armed([&](test::fuse&) -> task<> { + test::write_stream ws(f); + write_now wn(ws); + + std::string_view sv1("first"); + auto [ec1, n1] = co_await wn(make_buffer(sv1)); + if(ec1) + co_return; + + std::string_view sv2("second"); + auto [ec2, n2] = co_await wn(make_buffer(sv2)); + if(ec2) + co_return; + + BOOST_TEST_EQ(n1, 5u); + BOOST_TEST_EQ(n2, 6u); + BOOST_TEST_EQ(ws.data(), "firstsecond"); + }); + BOOST_TEST(r.success); + } + + void + run() + { + testSingleBuffer(); + testEmptyBuffer(); + testBufferArray(); + testBufferPair(); + testLargeData(); + testChunkedWrite(); + testFuseError(); + testFrameReuse(); + } +}; + +TEST_SUITE(write_now_test, "boost.capy.io.write_now"); + +} // namespace +} // capy +} // boost diff --git a/test/unit/test/buffer_sink.cpp b/test/unit/test/buffer_sink.cpp new file mode 100644 index 00000000..d04f8372 --- /dev/null +++ b/test/unit/test/buffer_sink.cpp @@ -0,0 +1,303 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +// Test that header file is self-contained. +#include + +#include +#include +#include + +#include "test/unit/test_helpers.hpp" + +#include +#include + +namespace boost { +namespace capy { +namespace test { + +static_assert(BufferSink); + +class buffer_sink_test +{ +public: + void + testConstruct() + { + fuse f; + auto r = f.armed([&](fuse&) { + buffer_sink bs(f); + BOOST_TEST_EQ(bs.size(), 0u); + BOOST_TEST(bs.data().empty()); + BOOST_TEST(! bs.eof_called()); + }); + BOOST_TEST(r.success); + } + + void + testPrepareCommit() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_sink bs(f); + + mutable_buffer arr[16]; + auto bufs = bs.prepare(arr); + BOOST_TEST_EQ(bufs.size(), 1u); + BOOST_TEST(bufs[0].size() > 0); + + std::memcpy(bufs[0].data(), "hello", 5); + + auto [ec] = co_await bs.commit(5); + if(ec) + co_return; + + BOOST_TEST_EQ(bs.data(), "hello"); + BOOST_TEST_EQ(bs.size(), 5u); + }); + BOOST_TEST(r.success); + } + + void + testPrepareEmpty() + { + fuse f; + auto r = f.armed([&](fuse&) { + buffer_sink bs(f); + + std::span empty_span; + auto bufs = bs.prepare(empty_span); + BOOST_TEST(bufs.empty()); + }); + BOOST_TEST(r.success); + } + + void + testMultipleCommits() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_sink bs(f); + + { + mutable_buffer arr[16]; + auto bufs = bs.prepare(arr); + std::memcpy(bufs[0].data(), "hello ", 6); + auto [ec] = co_await bs.commit(6); + if(ec) + co_return; + } + + { + mutable_buffer arr[16]; + auto bufs = bs.prepare(arr); + std::memcpy(bufs[0].data(), "world", 5); + auto [ec] = co_await bs.commit(5); + if(ec) + co_return; + } + + BOOST_TEST_EQ(bs.data(), "hello world"); + BOOST_TEST_EQ(bs.size(), 11u); + }); + BOOST_TEST(r.success); + } + + void + testCommitWithEof() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_sink bs(f); + + mutable_buffer arr[16]; + auto bufs = bs.prepare(arr); + std::memcpy(bufs[0].data(), "data", 4); + + auto [ec] = co_await bs.commit_eof(4); + if(ec) + co_return; + + BOOST_TEST_EQ(bs.data(), "data"); + BOOST_TEST(bs.eof_called()); + }); + BOOST_TEST(r.success); + } + + void + testCommitEof() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_sink bs(f); + + auto [ec] = co_await bs.commit_eof(0); + if(ec) + co_return; + + BOOST_TEST(bs.data().empty()); + BOOST_TEST(bs.eof_called()); + }); + BOOST_TEST(r.success); + } + + void + testCommitThenEof() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_sink bs(f); + + mutable_buffer arr[16]; + auto bufs = bs.prepare(arr); + std::memcpy(bufs[0].data(), "hello", 5); + + auto [ec1] = co_await bs.commit(5); + if(ec1) + co_return; + BOOST_TEST(! bs.eof_called()); + + auto [ec2] = co_await bs.commit_eof(0); + if(ec2) + co_return; + BOOST_TEST(bs.eof_called()); + BOOST_TEST_EQ(bs.data(), "hello"); + }); + BOOST_TEST(r.success); + } + + void + testMaxPrepareSize() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_sink bs(f, 8); + + mutable_buffer arr[16]; + auto bufs = bs.prepare(arr); + BOOST_TEST_EQ(bufs.size(), 1u); + BOOST_TEST_EQ(bufs[0].size(), 8u); + + std::memcpy(bufs[0].data(), "12345678", 8); + + auto [ec] = co_await bs.commit(8); + if(ec) + co_return; + + BOOST_TEST_EQ(bs.data(), "12345678"); + }); + BOOST_TEST(r.success); + } + + void + testClear() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_sink bs(f); + + mutable_buffer arr[16]; + auto bufs = bs.prepare(arr); + std::memcpy(bufs[0].data(), "hello", 5); + + auto [ec1] = co_await bs.commit(5); + if(ec1) + co_return; + + auto [ec2] = co_await bs.commit_eof(0); + if(ec2) + co_return; + + BOOST_TEST_EQ(bs.data(), "hello"); + BOOST_TEST(bs.eof_called()); + + bs.clear(); + + BOOST_TEST(bs.data().empty()); + BOOST_TEST_EQ(bs.size(), 0u); + BOOST_TEST(! bs.eof_called()); + }); + BOOST_TEST(r.success); + } + + void + testFuseErrorInjectionCommit() + { + int commit_success_count = 0; + int commit_error_count = 0; + + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_sink bs(f); + + mutable_buffer arr[16]; + auto bufs = bs.prepare(arr); + std::memcpy(bufs[0].data(), "data", 4); + + auto [ec] = co_await bs.commit(4); + if(ec) + { + ++commit_error_count; + co_return; + } + ++commit_success_count; + }); + + BOOST_TEST(r.success); + BOOST_TEST(commit_error_count > 0); + BOOST_TEST(commit_success_count > 0); + } + + void + testFuseErrorInjectionCommitEof() + { + int eof_success_count = 0; + int eof_error_count = 0; + + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_sink bs(f); + + auto [ec] = co_await bs.commit_eof(0); + if(ec) + { + ++eof_error_count; + co_return; + } + ++eof_success_count; + }); + + BOOST_TEST(r.success); + BOOST_TEST(eof_error_count > 0); + BOOST_TEST(eof_success_count > 0); + } + + void + run() + { + testConstruct(); + testPrepareCommit(); + testPrepareEmpty(); + testMultipleCommits(); + testCommitWithEof(); + testCommitEof(); + testCommitThenEof(); + testMaxPrepareSize(); + testClear(); + testFuseErrorInjectionCommit(); + testFuseErrorInjectionCommitEof(); + } +}; + +TEST_SUITE(buffer_sink_test, "boost.capy.test.buffer_sink"); + +} // test +} // capy +} // boost diff --git a/test/unit/test/buffer_source.cpp b/test/unit/test/buffer_source.cpp new file mode 100644 index 00000000..ba788579 --- /dev/null +++ b/test/unit/test/buffer_source.cpp @@ -0,0 +1,319 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +// Test that header file is self-contained. +#include + +#include +#include +#include +#include +#include + +#include "test/unit/test_helpers.hpp" + +#include + +namespace boost { +namespace capy { +namespace test { + +static_assert(BufferSource); + +class buffer_source_test +{ +public: + void + testConstruct() + { + fuse f; + auto r = f.armed([&](fuse&) { + buffer_source bs(f); + BOOST_TEST_EQ(bs.available(), 0u); + }); + BOOST_TEST(r.success); + } + + void + testProvide() + { + fuse f; + auto r = f.armed([&](fuse&) { + buffer_source bs(f); + bs.provide("hello"); + BOOST_TEST_EQ(bs.available(), 5u); + + bs.provide(" world"); + BOOST_TEST_EQ(bs.available(), 11u); + }); + BOOST_TEST(r.success); + } + + void + testClear() + { + fuse f; + auto r = f.armed([&](fuse&) { + buffer_source bs(f); + bs.provide("data"); + BOOST_TEST_EQ(bs.available(), 4u); + + bs.clear(); + BOOST_TEST_EQ(bs.available(), 0u); + }); + BOOST_TEST(r.success); + } + + void + testPull() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_source bs(f); + bs.provide("hello world"); + + const_buffer arr[16]; + auto [ec, bufs] = co_await bs.pull(arr); + if(ec) + co_return; + + BOOST_TEST_EQ(bufs.size(), 1u); + BOOST_TEST_EQ(bufs[0].size(), 11u); + BOOST_TEST_EQ( + buffer_to_string(bufs), "hello world"); + }); + BOOST_TEST(r.success); + } + + void + testConsume() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_source bs(f); + bs.provide("hello world"); + + const_buffer arr[16]; + + auto [ec1, bufs1] = co_await bs.pull(arr); + if(ec1) + co_return; + BOOST_TEST_EQ(bufs1.size(), 1u); + BOOST_TEST_EQ(bufs1[0].size(), 11u); + + bs.consume(5); + BOOST_TEST_EQ(bs.available(), 6u); + + auto [ec2, bufs2] = co_await bs.pull(arr); + if(ec2) + co_return; + BOOST_TEST_EQ(bufs2.size(), 1u); + BOOST_TEST_EQ(bufs2[0].size(), 6u); + BOOST_TEST_EQ( + buffer_to_string(bufs2), " world"); + + bs.consume(6); + + auto [ec3, bufs3] = co_await bs.pull(arr); + if(ec3 != cond::eof) + co_return; + BOOST_TEST(bufs3.empty()); + }); + BOOST_TEST(r.success); + } + + void + testPullWithoutConsume() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_source bs(f); + bs.provide("test"); + + const_buffer arr[16]; + + auto [ec1, bufs1] = co_await bs.pull(arr); + if(ec1) + co_return; + BOOST_TEST_EQ(bufs1.size(), 1u); + BOOST_TEST_EQ(bufs1[0].size(), 4u); + + auto [ec2, bufs2] = co_await bs.pull(arr); + if(ec2) + co_return; + BOOST_TEST_EQ(bufs2.size(), 1u); + BOOST_TEST_EQ(bufs2[0].size(), 4u); + + bs.consume(4); + }); + BOOST_TEST(r.success); + } + + void + testPullEmpty() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_source bs(f); + + const_buffer arr[16]; + auto [ec, bufs] = co_await bs.pull(arr); + if(ec != cond::eof) + co_return; + BOOST_TEST(bufs.empty()); + }); + BOOST_TEST(r.success); + } + + void + testPullEmptyDest() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_source bs(f); + bs.provide("data"); + + std::span empty_span; + auto [ec, bufs] = co_await bs.pull(empty_span); + if(ec) + co_return; + BOOST_TEST(bufs.empty()); + BOOST_TEST_EQ(bs.available(), 4u); + }); + BOOST_TEST(r.success); + } + + void + testMaxPullSize() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_source bs(f, 5); + bs.provide("hello world"); + + const_buffer arr[16]; + auto [ec, bufs] = co_await bs.pull(arr); + if(ec) + co_return; + + BOOST_TEST_EQ(bufs.size(), 1u); + BOOST_TEST_EQ(bufs[0].size(), 5u); + BOOST_TEST_EQ( + buffer_to_string(bufs), "hello"); + }); + BOOST_TEST(r.success); + } + + void + testMaxPullSizeMultiple() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_source bs(f, 5); + bs.provide("hello world"); + + std::size_t total = 0; + for(;;) + { + const_buffer arr[16]; + auto [ec, bufs] = co_await bs.pull(arr); + if(ec == cond::eof) + break; + if(ec) + co_return; + for(auto const& buf : bufs) + { + total += buf.size(); + bs.consume(buf.size()); + } + } + + BOOST_TEST_EQ(total, 11u); + }); + BOOST_TEST(r.success); + } + + void + testFuseErrorInjection() + { + int pull_success_count = 0; + int pull_error_count = 0; + + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_source bs(f); + bs.provide("test data"); + + const_buffer arr[16]; + auto [ec, bufs] = co_await bs.pull(arr); + if(ec) + { + ++pull_error_count; + co_return; + } + ++pull_success_count; + }); + + BOOST_TEST(r.success); + BOOST_TEST(pull_error_count > 0); + BOOST_TEST(pull_success_count > 0); + } + + void + testClearAndReuse() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + buffer_source bs(f); + bs.provide("first"); + + const_buffer arr[16]; + + auto [ec1, bufs1] = co_await bs.pull(arr); + if(ec1) + co_return; + BOOST_TEST_EQ( + buffer_to_string(bufs1), "first"); + + bs.consume(5); + bs.clear(); + bs.provide("second"); + + auto [ec2, bufs2] = co_await bs.pull(arr); + if(ec2) + co_return; + BOOST_TEST_EQ( + buffer_to_string(bufs2), "second"); + }); + BOOST_TEST(r.success); + } + + void + run() + { + testConstruct(); + testProvide(); + testClear(); + testPull(); + testConsume(); + testPullWithoutConsume(); + testPullEmpty(); + testPullEmptyDest(); + testMaxPullSize(); + testMaxPullSizeMultiple(); + testFuseErrorInjection(); + testClearAndReuse(); + } +}; + +TEST_SUITE(buffer_source_test, "boost.capy.test.buffer_source"); + +} // test +} // capy +} // boost diff --git a/test/unit/test/read_source.cpp b/test/unit/test/read_source.cpp index b742c34f..b9632471 100644 --- a/test/unit/test/read_source.cpp +++ b/test/unit/test/read_source.cpp @@ -135,9 +135,11 @@ class read_source_test BOOST_TEST_EQ(n3, 3u); BOOST_TEST_EQ(std::string_view(buf, n3), "ghi"); + // Last byte: read returns EOF with partial transfer auto [ec4, n4] = co_await rs.read(make_buffer(buf)); - if(ec4) + if(ec4 && ec4 != cond::eof) co_return; + BOOST_TEST(ec4 == cond::eof); BOOST_TEST_EQ(n4, 1u); BOOST_TEST_EQ(std::string_view(buf, n4), "j"); }); @@ -238,7 +240,7 @@ class read_source_test read_source rs(f); rs.provide("test data"); - char buf[32] = {}; + char buf[9] = {}; auto [ec, n] = co_await rs.read(make_buffer(buf)); if(ec) { @@ -282,13 +284,14 @@ class read_source_test void testMaxReadSize() { + // max_read_size only affects read_some, not read fuse f; auto r = f.armed([&](fuse&) -> task<> { - read_source rs(f, 5); // max 5 bytes per read + read_source rs(f, 5); rs.provide("hello world"); char buf[32] = {}; - auto [ec, n] = co_await rs.read(make_buffer(buf)); + auto [ec, n] = co_await rs.read_some(make_buffer(buf)); if(ec) co_return; BOOST_TEST_EQ(n, 5u); @@ -301,26 +304,27 @@ class read_source_test void testMaxReadSizeMultiple() { + // max_read_size only affects read_some, not read fuse f; auto r = f.armed([&](fuse&) -> task<> { - read_source rs(f, 3); // max 3 bytes per read + read_source rs(f, 3); rs.provide("abcdefgh"); char buf[32] = {}; - auto [ec1, n1] = co_await rs.read(make_buffer(buf)); + auto [ec1, n1] = co_await rs.read_some(make_buffer(buf)); if(ec1) co_return; BOOST_TEST_EQ(n1, 3u); BOOST_TEST_EQ(std::string_view(buf, n1), "abc"); - auto [ec2, n2] = co_await rs.read(make_buffer(buf)); + auto [ec2, n2] = co_await rs.read_some(make_buffer(buf)); if(ec2) co_return; BOOST_TEST_EQ(n2, 3u); BOOST_TEST_EQ(std::string_view(buf, n2), "def"); - auto [ec3, n3] = co_await rs.read(make_buffer(buf)); + auto [ec3, n3] = co_await rs.read_some(make_buffer(buf)); if(ec3) co_return; BOOST_TEST_EQ(n3, 2u); @@ -329,6 +333,169 @@ class read_source_test BOOST_TEST(r.success); } + //-------------------------------------------- + // + // read_some tests (ReadStream refinement) + // + //-------------------------------------------- + + void + testReadSome() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + read_source rs(f); + rs.provide("hello world"); + + char buf[32] = {}; + auto [ec, n] = co_await rs.read_some(make_buffer(buf)); + if(ec) + co_return; + BOOST_TEST_EQ(n, 11u); + BOOST_TEST_EQ(std::string_view(buf, n), "hello world"); + }); + BOOST_TEST(r.success); + } + + void + testReadSomePartial() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + read_source rs(f); + rs.provide("hello world"); + + char buf[5] = {}; + auto [ec, n] = co_await rs.read_some(make_buffer(buf)); + if(ec) + co_return; + BOOST_TEST_EQ(n, 5u); + BOOST_TEST_EQ(std::string_view(buf, n), "hello"); + BOOST_TEST_EQ(rs.available(), 6u); + }); + BOOST_TEST(r.success); + } + + void + testReadSomeEof() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + read_source rs(f); + + char buf[32] = {}; + auto [ec, n] = co_await rs.read_some(make_buffer(buf)); + if(ec && ec != cond::eof) + co_return; + BOOST_TEST(ec == cond::eof); + BOOST_TEST_EQ(n, 0u); + }); + BOOST_TEST(r.success); + } + + void + testReadSomeEmpty() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + read_source rs(f); + rs.provide("data"); + + auto [ec, n] = co_await rs.read_some(mutable_buffer()); + if(ec) + co_return; + BOOST_TEST_EQ(n, 0u); + BOOST_TEST_EQ(rs.available(), 4u); + }); + BOOST_TEST(r.success); + } + + void + testReadSomeEmptyExhausted() + { + // Empty buffers should succeed even when source is exhausted + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + read_source rs(f); + + auto [ec, n] = co_await rs.read_some(mutable_buffer()); + if(ec) + co_return; + BOOST_TEST_EQ(n, 0u); + }); + BOOST_TEST(r.success); + } + + void + testReadSomeMaxReadSize() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + read_source rs(f, 3); + rs.provide("hello world"); + + char buf[32] = {}; + auto [ec, n] = co_await rs.read_some(make_buffer(buf)); + if(ec) + co_return; + BOOST_TEST_EQ(n, 3u); + BOOST_TEST_EQ(std::string_view(buf, n), "hel"); + BOOST_TEST_EQ(rs.available(), 8u); + }); + BOOST_TEST(r.success); + } + + void + testReadSomeBufferSequence() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + read_source rs(f); + rs.provide("helloworld"); + + char buf1[5] = {}; + char buf2[5] = {}; + std::array buffers = {{ + make_buffer(buf1), + make_buffer(buf2) + }}; + + auto [ec, n] = co_await rs.read_some(buffers); + if(ec) + co_return; + BOOST_TEST_EQ(n, 10u); + BOOST_TEST_EQ(std::string_view(buf1, 5), "hello"); + BOOST_TEST_EQ(std::string_view(buf2, 5), "world"); + }); + BOOST_TEST(r.success); + } + + void + testReadSomeFuseErrorInjection() + { + int read_success_count = 0; + int read_error_count = 0; + + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + read_source rs(f); + rs.provide("test data"); + + char buf[32] = {}; + auto [ec, n] = co_await rs.read_some(make_buffer(buf)); + if(ec) + { + ++read_error_count; + co_return; + } + ++read_success_count; + }); + + BOOST_TEST(r.success); + BOOST_TEST(read_error_count > 0); + BOOST_TEST(read_success_count > 0); + } + void run() { @@ -346,6 +513,16 @@ class read_source_test testClearAndReuse(); testMaxReadSize(); testMaxReadSizeMultiple(); + + // read_some tests (ReadStream refinement) + testReadSome(); + testReadSomePartial(); + testReadSomeEof(); + testReadSomeEmpty(); + testReadSomeEmptyExhausted(); + testReadSomeMaxReadSize(); + testReadSomeBufferSequence(); + testReadSomeFuseErrorInjection(); } }; diff --git a/test/unit/test/read_stream.cpp b/test/unit/test/read_stream.cpp index c71dba9a..5e3f7dcc 100644 --- a/test/unit/test/read_stream.cpp +++ b/test/unit/test/read_stream.cpp @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -25,6 +26,7 @@ namespace capy { namespace test { static_assert(ReadStream); +static_assert(!ReadSource); class read_stream_test { @@ -279,6 +281,21 @@ class read_stream_test BOOST_TEST(r.success); } + void + testReadSomeEmptyExhausted() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + read_stream rs(f); + + auto [ec, n] = co_await rs.read_some(mutable_buffer()); + if(ec) + co_return; + BOOST_TEST_EQ(n, 0u); + }); + BOOST_TEST(r.success); + } + void testMaxReadSize() { @@ -342,6 +359,7 @@ class read_stream_test testReadSomeEofAfterData(); testReadSomeBufferSequence(); testReadSomeEmpty(); + testReadSomeEmptyExhausted(); testFuseErrorInjection(); testClearAndReuse(); testMaxReadSize(); diff --git a/test/unit/test/stream.cpp b/test/unit/test/stream.cpp index 7de0581c..77b0d4a9 100644 --- a/test/unit/test/stream.cpp +++ b/test/unit/test/stream.cpp @@ -12,9 +12,12 @@ #include #include +#include #include #include #include +#include +#include #include "test/unit/test_helpers.hpp" @@ -25,24 +28,9 @@ namespace boost { namespace capy { namespace test { -static_assert(ReadStream); -static_assert(WriteStream); - class stream_test { public: - void - testConstruct() - { - fuse f; - auto r = f.armed([&](fuse&) { - stream s(f); - BOOST_TEST(s.available() == 0); - BOOST_TEST(s.size() == 0); - BOOST_TEST(s.data().empty()); - }); - BOOST_TEST(r.success); - } //-------------------------------------------- // @@ -50,50 +38,22 @@ class stream_test // //-------------------------------------------- - void - testProvide() - { - fuse f; - auto r = f.armed([&](fuse&) { - stream s(f); - s.provide("hello"); - BOOST_TEST_EQ(s.available(), 5u); - - s.provide(" world"); - BOOST_TEST_EQ(s.available(), 11u); - }); - BOOST_TEST(r.success); - } - - void - testClear() - { - fuse f; - auto r = f.armed([&](fuse&) { - stream s(f); - s.provide("data"); - BOOST_TEST_EQ(s.available(), 4u); - - s.clear(); - BOOST_TEST_EQ(s.available(), 0u); - }); - BOOST_TEST(r.success); - } - void testReadSome() { fuse f; auto r = f.armed([&](fuse&) -> task<> { - stream s(f); - s.provide("hello world"); + auto [a, b] = make_stream_pair(f); + b.provide("hello world"); char buf[32] = {}; - auto [ec, n] = co_await s.read_some(make_buffer(buf)); + auto [ec, n] = co_await a.read_some( + make_buffer(buf)); if(ec) co_return; BOOST_TEST_EQ(n, 11u); - BOOST_TEST_EQ(std::string_view(buf, n), "hello world"); + BOOST_TEST_EQ( + std::string_view(buf, n), "hello world"); }); BOOST_TEST(r.success); } @@ -103,16 +63,17 @@ class stream_test { fuse f; auto r = f.armed([&](fuse&) -> task<> { - stream s(f); - s.provide("hello world"); + auto [a, b] = make_stream_pair(f); + b.provide("hello world"); char buf[5] = {}; - auto [ec, n] = co_await s.read_some(make_buffer(buf)); + auto [ec, n] = co_await a.read_some( + make_buffer(buf)); if(ec) co_return; BOOST_TEST_EQ(n, 5u); - BOOST_TEST_EQ(std::string_view(buf, n), "hello"); - BOOST_TEST_EQ(s.available(), 6u); + BOOST_TEST_EQ( + std::string_view(buf, n), "hello"); }); BOOST_TEST(r.success); } @@ -122,34 +83,42 @@ class stream_test { fuse f; auto r = f.armed([&](fuse&) -> task<> { - stream s(f); - s.provide("abcdefghij"); + auto [a, b] = make_stream_pair(f); + b.provide("abcdefghij"); char buf[3] = {}; - auto [ec1, n1] = co_await s.read_some(make_buffer(buf)); + auto [ec1, n1] = co_await a.read_some( + make_buffer(buf)); if(ec1) co_return; BOOST_TEST_EQ(n1, 3u); - BOOST_TEST_EQ(std::string_view(buf, n1), "abc"); + BOOST_TEST_EQ( + std::string_view(buf, n1), "abc"); - auto [ec2, n2] = co_await s.read_some(make_buffer(buf)); + auto [ec2, n2] = co_await a.read_some( + make_buffer(buf)); if(ec2) co_return; BOOST_TEST_EQ(n2, 3u); - BOOST_TEST_EQ(std::string_view(buf, n2), "def"); + BOOST_TEST_EQ( + std::string_view(buf, n2), "def"); - auto [ec3, n3] = co_await s.read_some(make_buffer(buf)); + auto [ec3, n3] = co_await a.read_some( + make_buffer(buf)); if(ec3) co_return; BOOST_TEST_EQ(n3, 3u); - BOOST_TEST_EQ(std::string_view(buf, n3), "ghi"); + BOOST_TEST_EQ( + std::string_view(buf, n3), "ghi"); - auto [ec4, n4] = co_await s.read_some(make_buffer(buf)); + auto [ec4, n4] = co_await a.read_some( + make_buffer(buf)); if(ec4) co_return; BOOST_TEST_EQ(n4, 1u); - BOOST_TEST_EQ(std::string_view(buf, n4), "j"); + BOOST_TEST_EQ( + std::string_view(buf, n4), "j"); }); BOOST_TEST(r.success); } @@ -157,82 +126,86 @@ class stream_test void testReadSomeEof() { - fuse f; - auto r = f.armed([&](fuse&) -> task<> { - stream s(f); + run_blocking()([]() -> task<> { + auto [a, b] = make_stream_pair(); + b.close(); char buf[32] = {}; - auto [ec, n] = co_await s.read_some(make_buffer(buf)); - if(ec && ec != cond::eof) - co_return; + auto [ec, n] = co_await a.read_some( + make_buffer(buf)); BOOST_TEST(ec == cond::eof); BOOST_TEST_EQ(n, 0u); - }); - BOOST_TEST(r.success); + }()); } void - testReadSomeEofAfterData() + testReadSomeBufferSequence() { fuse f; auto r = f.armed([&](fuse&) -> task<> { - stream s(f); - s.provide("x"); + auto [a, b] = make_stream_pair(f); + b.provide("helloworld"); - char buf[32] = {}; - - auto [ec1, n1] = co_await s.read_some(make_buffer(buf)); - if(ec1) - co_return; - BOOST_TEST_EQ(n1, 1u); + char buf1[5] = {}; + char buf2[5] = {}; + std::array buffers = {{ + make_buffer(buf1), + make_buffer(buf2) + }}; - auto [ec2, n2] = co_await s.read_some(make_buffer(buf)); - if(ec2 && ec2 != cond::eof) + auto [ec, n] = co_await a.read_some(buffers); + if(ec) co_return; - BOOST_TEST(ec2 == cond::eof); - BOOST_TEST_EQ(n2, 0u); + BOOST_TEST_EQ(n, 10u); + BOOST_TEST_EQ( + std::string_view(buf1, 5), "hello"); + BOOST_TEST_EQ( + std::string_view(buf2, 5), "world"); }); BOOST_TEST(r.success); } void - testReadSomeBufferSequence() + testReadSomeEmpty() { fuse f; auto r = f.armed([&](fuse&) -> task<> { - stream s(f); - s.provide("helloworld"); - - char buf1[5] = {}; - char buf2[5] = {}; - std::array buffers = {{ - make_buffer(buf1), - make_buffer(buf2) - }}; + auto [a, b] = make_stream_pair(f); + b.provide("data"); - auto [ec, n] = co_await s.read_some(buffers); + auto [ec, n] = co_await a.read_some( + mutable_buffer()); if(ec) co_return; - BOOST_TEST_EQ(n, 10u); - BOOST_TEST_EQ(std::string_view(buf1, 5), "hello"); - BOOST_TEST_EQ(std::string_view(buf2, 5), "world"); + BOOST_TEST_EQ(n, 0u); + + // Data is preserved + char buf[32] = {}; + auto [ec2, n2] = co_await a.read_some( + make_buffer(buf)); + if(ec2) + co_return; + BOOST_TEST_EQ(n2, 4u); + BOOST_TEST_EQ( + std::string_view(buf, n2), "data"); }); BOOST_TEST(r.success); } void - testReadSomeEmpty() + testReadSomeEmptyNoData() { + // Empty buffers must complete immediately even + // when no data is available (should not suspend). fuse f; auto r = f.armed([&](fuse&) -> task<> { - stream s(f); - s.provide("data"); + auto [a, b] = make_stream_pair(f); - auto [ec, n] = co_await s.read_some(mutable_buffer()); + auto [ec, n] = co_await a.read_some( + mutable_buffer()); if(ec) co_return; BOOST_TEST_EQ(n, 0u); - BOOST_TEST_EQ(s.available(), 4u); }); BOOST_TEST(r.success); } @@ -242,16 +215,18 @@ class stream_test { fuse f; auto r = f.armed([&](fuse&) -> task<> { - stream s(f, 3); - s.provide("hello world"); + auto [a, b] = make_stream_pair(f); + a.set_max_read_size(3); + b.provide("hello world"); char buf[32] = {}; - auto [ec, n] = co_await s.read_some(make_buffer(buf)); + auto [ec, n] = co_await a.read_some( + make_buffer(buf)); if(ec) co_return; BOOST_TEST_EQ(n, 3u); - BOOST_TEST_EQ(std::string_view(buf, n), "hel"); - BOOST_TEST_EQ(s.available(), 8u); + BOOST_TEST_EQ( + std::string_view(buf, n), "hel"); }); BOOST_TEST(r.success); } @@ -261,28 +236,35 @@ class stream_test { fuse f; auto r = f.armed([&](fuse&) -> task<> { - stream s(f, 4); - s.provide("abcdefghij"); + auto [a, b] = make_stream_pair(f); + a.set_max_read_size(4); + b.provide("abcdefghij"); char buf[32] = {}; - auto [ec1, n1] = co_await s.read_some(make_buffer(buf)); + auto [ec1, n1] = co_await a.read_some( + make_buffer(buf)); if(ec1) co_return; BOOST_TEST_EQ(n1, 4u); - BOOST_TEST_EQ(std::string_view(buf, n1), "abcd"); + BOOST_TEST_EQ( + std::string_view(buf, n1), "abcd"); - auto [ec2, n2] = co_await s.read_some(make_buffer(buf)); + auto [ec2, n2] = co_await a.read_some( + make_buffer(buf)); if(ec2) co_return; BOOST_TEST_EQ(n2, 4u); - BOOST_TEST_EQ(std::string_view(buf, n2), "efgh"); + BOOST_TEST_EQ( + std::string_view(buf, n2), "efgh"); - auto [ec3, n3] = co_await s.read_some(make_buffer(buf)); + auto [ec3, n3] = co_await a.read_some( + make_buffer(buf)); if(ec3) co_return; BOOST_TEST_EQ(n3, 2u); - BOOST_TEST_EQ(std::string_view(buf, n3), "ij"); + BOOST_TEST_EQ( + std::string_view(buf, n3), "ij"); }); BOOST_TEST(r.success); } @@ -297,15 +279,19 @@ class stream_test testWriteSome() { fuse f; - auto r = f.armed([&](fuse&) -> task { - stream s(f); + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); - auto [ec, n] = co_await s.write_some( + auto [ec, n] = co_await a.write_some( make_buffer("hello world", 11)); if(ec) co_return; BOOST_TEST_EQ(n, 11u); - BOOST_TEST_EQ(s.data(), "hello world"); + + auto [ec2, ok] = b.expect("hello world"); + if(ec2) + co_return; + BOOST_TEST(ok); }); BOOST_TEST(r.success); } @@ -314,29 +300,31 @@ class stream_test testWriteSomeMultiple() { fuse f; - auto r = f.armed([&](fuse&) -> task { - stream s(f); + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); - auto [ec1, n1] = co_await s.write_some( + auto [ec1, n1] = co_await a.write_some( make_buffer("hello", 5)); if(ec1) co_return; BOOST_TEST_EQ(n1, 5u); - auto [ec2, n2] = co_await s.write_some( + auto [ec2, n2] = co_await a.write_some( make_buffer(" ", 1)); if(ec2) co_return; BOOST_TEST_EQ(n2, 1u); - auto [ec3, n3] = co_await s.write_some( + auto [ec3, n3] = co_await a.write_some( make_buffer("world", 5)); if(ec3) co_return; BOOST_TEST_EQ(n3, 5u); - BOOST_TEST_EQ(s.data(), "hello world"); - BOOST_TEST_EQ(s.size(), 11u); + auto [ec4, ok] = b.expect("hello world"); + if(ec4) + co_return; + BOOST_TEST(ok); }); BOOST_TEST(r.success); } @@ -345,19 +333,23 @@ class stream_test testWriteSomeBufferSequence() { fuse f; - auto r = f.armed([&](fuse&) -> task { - stream s(f); + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); std::array buffers = {{ make_buffer("hello", 5), make_buffer("world", 5) }}; - auto [ec, n] = co_await s.write_some(buffers); + auto [ec, n] = co_await a.write_some(buffers); if(ec) co_return; BOOST_TEST_EQ(n, 10u); - BOOST_TEST_EQ(s.data(), "helloworld"); + + auto [ec2, ok] = b.expect("helloworld"); + if(ec2) + co_return; + BOOST_TEST(ok); }); BOOST_TEST(r.success); } @@ -366,14 +358,13 @@ class stream_test testWriteSomeEmpty() { fuse f; - auto r = f.armed([&](fuse&) -> task { - stream s(f); + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); - auto [ec, n] = co_await s.write_some(const_buffer()); - if(ec) - co_return; + auto [ec, n] = co_await a.write_some( + const_buffer()); + BOOST_TEST(! ec); BOOST_TEST_EQ(n, 0u); - BOOST_TEST(s.data().empty()); }); BOOST_TEST(r.success); } @@ -382,73 +373,39 @@ class stream_test testExpect() { fuse f; - auto r = f.armed([&](fuse&) -> task { - stream s(f); - auto ec = s.expect("hello"); - BOOST_TEST(! ec); + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); - auto [ec2, n] = co_await s.write_some( + auto [ec, n] = co_await a.write_some( make_buffer("hello", 5)); - if(ec2) + if(ec) co_return; BOOST_TEST_EQ(n, 5u); - BOOST_TEST(s.data().empty()); - }); - BOOST_TEST(r.success); - } - - void - testExpectMismatch() - { - fuse f; - auto r = f.armed([&](fuse&) -> task { - stream s(f); - auto ec = s.expect("hello"); - BOOST_TEST(! ec); - auto [ec2, n] = co_await s.write_some( - make_buffer("world", 5)); - if(! ec2) + auto [ec2, ok] = b.expect("hello"); + if(ec2) co_return; - BOOST_TEST(ec2 == error::test_failure); + BOOST_TEST(ok); }); BOOST_TEST(r.success); } void - testExpectWithExistingData() + testExpectMismatch() { fuse f; - auto r = f.armed([&](fuse&) -> task { - stream s(f); + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); - auto [ec, n] = co_await s.write_some( - make_buffer("hello", 5)); + auto [ec, n] = co_await a.write_some( + make_buffer("world", 5)); if(ec) co_return; - BOOST_TEST_EQ(s.data(), "hello"); - - auto ec2 = s.expect("hello"); - BOOST_TEST(! ec2); - BOOST_TEST(s.data().empty()); - }); - BOOST_TEST(r.success); - } - - void - testExpectMismatchWithExistingData() - { - fuse f; - auto r = f.armed([&](fuse&) -> task { - stream s(f); - auto [ec, n] = co_await s.write_some( - make_buffer("hello", 5)); - if(ec) + auto [ec2, ok] = b.expect("hello"); + if(ec2) co_return; - - auto ec2 = s.expect("world"); - BOOST_TEST(ec2 == error::test_failure); + BOOST_TEST(! ok); }); BOOST_TEST(r.success); } @@ -457,24 +414,25 @@ class stream_test testExpectPartialMatch() { fuse f; - auto r = f.armed([&](fuse&) -> task { - stream s(f); - auto ec = s.expect("helloworld"); - BOOST_TEST(! ec); + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); - auto [ec2, n] = co_await s.write_some( + auto [ec1, n1] = co_await a.write_some( make_buffer("hello", 5)); - if(ec2) + if(ec1) co_return; - BOOST_TEST_EQ(n, 5u); - BOOST_TEST(s.data().empty()); + BOOST_TEST_EQ(n1, 5u); - auto [ec3, n2] = co_await s.write_some( + auto [ec2, n2] = co_await a.write_some( make_buffer("world", 5)); - if(ec3) + if(ec2) co_return; BOOST_TEST_EQ(n2, 5u); - BOOST_TEST(s.data().empty()); + + auto [ec3, ok] = b.expect("helloworld"); + if(ec3) + co_return; + BOOST_TEST(ok); }); BOOST_TEST(r.success); } @@ -483,17 +441,29 @@ class stream_test testExpectExcessData() { fuse f; - auto r = f.armed([&](fuse&) -> task { - stream s(f); - auto ec = s.expect("hi"); - BOOST_TEST(! ec); + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); - auto [ec2, n] = co_await s.write_some( + auto [ec1, n1] = co_await a.write_some( make_buffer("hi there", 8)); + if(ec1) + co_return; + BOOST_TEST_EQ(n1, 8u); + + auto [ec2, ok] = b.expect("hi"); if(ec2) co_return; - BOOST_TEST_EQ(n, 8u); - BOOST_TEST_EQ(s.data(), " there"); + BOOST_TEST(ok); + + // Remaining data still readable + char buf[32] = {}; + auto [ec3, n3] = co_await b.read_some( + make_buffer(buf)); + if(ec3) + co_return; + BOOST_TEST_EQ(n3, 6u); + BOOST_TEST_EQ( + std::string_view(buf, n3), " there"); }); BOOST_TEST(r.success); } @@ -502,15 +472,24 @@ class stream_test testMaxWriteSize() { fuse f; - auto r = f.armed([&](fuse&) -> task { - stream s(f, std::size_t(-1), 3); + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); + b.set_max_read_size(3); - auto [ec, n] = co_await s.write_some( + auto [ec1, n1] = co_await a.write_some( make_buffer("hello world", 11)); - if(ec) + if(ec1) co_return; - BOOST_TEST_EQ(n, 3u); - BOOST_TEST_EQ(s.data(), "hel"); + BOOST_TEST_EQ(n1, 11u); + + char buf[32] = {}; + auto [ec2, n2] = co_await b.read_some( + make_buffer(buf)); + if(ec2) + co_return; + BOOST_TEST_EQ(n2, 3u); + BOOST_TEST_EQ( + std::string_view(buf, n2), "hel"); }); BOOST_TEST(r.success); } @@ -519,29 +498,41 @@ class stream_test testMaxWriteSizeMultiple() { fuse f; - auto r = f.armed([&](fuse&) -> task { - stream s(f, std::size_t(-1), 4); + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); + b.set_max_read_size(4); - auto [ec1, n1] = co_await s.write_some( + auto [ec1, n1] = co_await a.write_some( make_buffer("abcdefghij", 10)); if(ec1) co_return; - BOOST_TEST_EQ(n1, 4u); - BOOST_TEST_EQ(s.data(), "abcd"); + BOOST_TEST_EQ(n1, 10u); - auto [ec2, n2] = co_await s.write_some( - make_buffer("efghij", 6)); + char buf[32] = {}; + + auto [ec2, n2] = co_await b.read_some( + make_buffer(buf)); if(ec2) co_return; BOOST_TEST_EQ(n2, 4u); - BOOST_TEST_EQ(s.data(), "abcdefgh"); + BOOST_TEST_EQ( + std::string_view(buf, n2), "abcd"); - auto [ec3, n3] = co_await s.write_some( - make_buffer("ij", 2)); + auto [ec3, n3] = co_await b.read_some( + make_buffer(buf)); if(ec3) co_return; - BOOST_TEST_EQ(n3, 2u); - BOOST_TEST_EQ(s.data(), "abcdefghij"); + BOOST_TEST_EQ(n3, 4u); + BOOST_TEST_EQ( + std::string_view(buf, n3), "efgh"); + + auto [ec4, n4] = co_await b.read_some( + make_buffer(buf)); + if(ec4) + co_return; + BOOST_TEST_EQ(n4, 2u); + BOOST_TEST_EQ( + std::string_view(buf, n4), "ij"); }); BOOST_TEST(r.success); } @@ -556,23 +547,29 @@ class stream_test testReadWrite() { fuse f; - auto r = f.armed([&](fuse&) -> task { - stream s(f); - s.provide("request"); + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); + b.provide("request"); char buf[32] = {}; - auto [ec1, n1] = co_await s.read_some(make_buffer(buf)); + auto [ec1, n1] = co_await a.read_some( + make_buffer(buf)); if(ec1) co_return; BOOST_TEST_EQ(n1, 7u); - BOOST_TEST_EQ(std::string_view(buf, n1), "request"); + BOOST_TEST_EQ( + std::string_view(buf, n1), "request"); - auto [ec2, n2] = co_await s.write_some( + auto [ec2, n2] = co_await a.write_some( make_buffer("response", 8)); if(ec2) co_return; BOOST_TEST_EQ(n2, 8u); - BOOST_TEST_EQ(s.data(), "response"); + + auto [ec3, ok] = b.expect("response"); + if(ec3) + co_return; + BOOST_TEST(ok); }); BOOST_TEST(r.success); } @@ -581,25 +578,29 @@ class stream_test testLoopback() { fuse f; - auto r = f.armed([&](fuse&) -> task { - stream s(f, 4, 4); - s.provide("hello world!"); + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); + a.set_max_read_size(4); + b.provide("hello world!"); - std::string received; char buf[4]; - while(s.available() > 0) + for(int i = 0; i < 3; ++i) { - auto [ec, n] = co_await s.read_some(make_buffer(buf)); + auto [ec, n] = co_await a.read_some( + make_buffer(buf)); if(ec) co_return; - auto [ec2, n2] = co_await s.write_some( + auto [ec2, n2] = co_await a.write_some( make_buffer(buf, n)); if(ec2) co_return; } - BOOST_TEST_EQ(s.data(), "hello world!"); + auto [ec, ok] = b.expect("hello world!"); + if(ec) + co_return; + BOOST_TEST(ok); }); BOOST_TEST(r.success); } @@ -612,11 +613,12 @@ class stream_test fuse f; auto r = f.armed([&](fuse&) -> task<> { - stream s(f); - s.provide("test data"); + auto [a, b] = make_stream_pair(f); + b.provide("test data"); char buf[32] = {}; - auto [ec, n] = co_await s.read_some(make_buffer(buf)); + auto [ec, n] = co_await a.read_some( + make_buffer(buf)); if(ec) { ++read_error_count; @@ -637,10 +639,10 @@ class stream_test int write_error_count = 0; fuse f; - auto r = f.armed([&](fuse&) -> task { - stream s(f); + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); - auto [ec, n] = co_await s.write_some( + auto [ec, n] = co_await a.write_some( make_buffer("test data", 9)); if(ec) { @@ -655,47 +657,17 @@ class stream_test BOOST_TEST(write_success_count > 0); } - void - testClearAndReuse() - { - fuse f; - auto r = f.armed([&](fuse&) -> task<> { - stream s(f); - s.provide("first"); - - char buf[32] = {}; - - auto [ec1, n1] = co_await s.read_some(make_buffer(buf)); - if(ec1) - co_return; - BOOST_TEST_EQ(std::string_view(buf, n1), "first"); - - s.clear(); - s.provide("second"); - - auto [ec2, n2] = co_await s.read_some(make_buffer(buf)); - if(ec2) - co_return; - BOOST_TEST_EQ(std::string_view(buf, n2), "second"); - }); - BOOST_TEST(r.success); - } - void run() { - testConstruct(); - // Read operations - testProvide(); - testClear(); testReadSome(); testReadSomePartial(); testReadSomeMultiple(); testReadSomeEof(); - testReadSomeEofAfterData(); testReadSomeBufferSequence(); testReadSomeEmpty(); + testReadSomeEmptyNoData(); testMaxReadSize(); testMaxReadSizeMultiple(); @@ -706,8 +678,6 @@ class stream_test testWriteSomeEmpty(); testExpect(); testExpectMismatch(); - testExpectWithExistingData(); - testExpectMismatchWithExistingData(); testExpectPartialMatch(); testExpectExcessData(); testMaxWriteSize(); @@ -718,12 +688,467 @@ class stream_test testLoopback(); testFuseReadErrorInjection(); testFuseWriteErrorInjection(); - testClearAndReuse(); } }; TEST_SUITE(stream_test, "boost.capy.test.stream"); +//-------------------------------------------- + +static_assert(Stream); +static_assert(ReadStream); +static_assert(WriteStream); + +class stream_pair_test +{ +public: + void + testWriteThenRead() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); + + auto [ec1, n1] = co_await a.write_some( + make_buffer("hello", 5)); + if(ec1) + co_return; + BOOST_TEST_EQ(n1, 5u); + + char buf[32] = {}; + auto [ec2, n2] = co_await b.read_some( + make_buffer(buf)); + if(ec2) + co_return; + BOOST_TEST_EQ(n2, 5u); + BOOST_TEST_EQ( + std::string_view(buf, n2), "hello"); + }); + BOOST_TEST(r.success); + } + + void + testReadThenWrite() + { + // Suspension path: reader suspends, writer wakes it. + // Guard closes peer on fuse error so no deadlock. + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); + + co_await when_all( + [&a]() -> task<> { + char buf[32] = {}; + auto [ec, n] = co_await a.read_some( + make_buffer(buf)); + if(ec) + co_return; + BOOST_TEST_EQ(n, 5u); + BOOST_TEST_EQ( + std::string_view(buf, n), + "hello"); + }(), + [&b]() -> task<> { + auto [ec, n] = co_await b.write_some( + make_buffer("hello", 5)); + if(ec) + co_return; + BOOST_TEST_EQ(n, 5u); + }() + ); + }); + BOOST_TEST(r.success); + } + + void + testBidirectional() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); + + // a -> b + auto [ec1, n1] = co_await a.write_some( + make_buffer("from-a", 6)); + if(ec1) + co_return; + BOOST_TEST_EQ(n1, 6u); + + // b -> a + auto [ec2, n2] = co_await b.write_some( + make_buffer("from-b", 6)); + if(ec2) + co_return; + BOOST_TEST_EQ(n2, 6u); + + // read on b (data from a) + char buf1[32] = {}; + auto [ec3, n3] = co_await b.read_some( + make_buffer(buf1)); + if(ec3) + co_return; + BOOST_TEST_EQ(n3, 6u); + BOOST_TEST_EQ( + std::string_view(buf1, n3), "from-a"); + + // read on a (data from b) + char buf2[32] = {}; + auto [ec4, n4] = co_await a.read_some( + make_buffer(buf2)); + if(ec4) + co_return; + BOOST_TEST_EQ(n4, 6u); + BOOST_TEST_EQ( + std::string_view(buf2, n4), "from-b"); + }); + BOOST_TEST(r.success); + } + + void + testPartialRead() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); + + auto [ec1, n1] = co_await a.write_some( + make_buffer("hello world", 11)); + if(ec1) + co_return; + BOOST_TEST_EQ(n1, 11u); + + // Small buffer reads partial data + char buf[5] = {}; + auto [ec2, n2] = co_await b.read_some( + make_buffer(buf)); + if(ec2) + co_return; + BOOST_TEST_EQ(n2, 5u); + BOOST_TEST_EQ( + std::string_view(buf, n2), "hello"); + + // Remainder still available + char buf2[32] = {}; + auto [ec3, n3] = co_await b.read_some( + make_buffer(buf2)); + if(ec3) + co_return; + BOOST_TEST_EQ(n3, 6u); + BOOST_TEST_EQ( + std::string_view(buf2, n3), " world"); + }); + BOOST_TEST(r.success); + } + + void + testMultipleWritesAccumulate() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); + + auto [ec1, n1] = co_await a.write_some( + make_buffer("aaa", 3)); + if(ec1) + co_return; + BOOST_TEST_EQ(n1, 3u); + + auto [ec2, n2] = co_await a.write_some( + make_buffer("bbb", 3)); + if(ec2) + co_return; + BOOST_TEST_EQ(n2, 3u); + + char buf[32] = {}; + auto [ec3, n3] = co_await b.read_some( + make_buffer(buf)); + if(ec3) + co_return; + BOOST_TEST_EQ(n3, 6u); + BOOST_TEST_EQ( + std::string_view(buf, n3), "aaabbb"); + }); + BOOST_TEST(r.success); + } + + void + testEmptyWrite() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); + + auto [ec, n] = co_await a.write_some( + const_buffer()); + // Empty write does not consult fuse + BOOST_TEST(! ec); + BOOST_TEST_EQ(n, 0u); + }); + BOOST_TEST(r.success); + } + + void + testBufferSequenceRead() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); + + auto [ec1, n1] = co_await a.write_some( + make_buffer("helloworld", 10)); + if(ec1) + co_return; + + char buf1[5] = {}; + char buf2[5] = {}; + std::array bufs = {{ + make_buffer(buf1), + make_buffer(buf2) + }}; + + auto [ec2, n2] = co_await b.read_some(bufs); + if(ec2) + co_return; + BOOST_TEST_EQ(n2, 10u); + BOOST_TEST_EQ( + std::string_view(buf1, 5), "hello"); + BOOST_TEST_EQ( + std::string_view(buf2, 5), "world"); + }); + BOOST_TEST(r.success); + } + + void + testBufferSequenceWrite() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); + std::array bufs = {{ + make_buffer("hello", 5), + make_buffer("world", 5) + }}; + + auto [ec1, n1] = co_await a.write_some(bufs); + if(ec1) + co_return; + BOOST_TEST_EQ(n1, 10u); + + char buf[32] = {}; + auto [ec2, n2] = co_await b.read_some( + make_buffer(buf)); + if(ec2) + co_return; + BOOST_TEST_EQ(n2, 10u); + BOOST_TEST_EQ( + std::string_view(buf, n2), "helloworld"); + }); + BOOST_TEST(r.success); + } + + void + testSuspendedReadWithPartialBuffer() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); + + co_await when_all( + [&a]() -> task<> { + char buf[3] = {}; + auto [ec, n] = co_await a.read_some( + make_buffer(buf)); + if(ec) + co_return; + BOOST_TEST_EQ(n, 3u); + BOOST_TEST_EQ( + std::string_view(buf, n), + "hel"); + }(), + [&b]() -> task<> { + auto [ec, n] = co_await b.write_some( + make_buffer("hello", 5)); + if(ec) + co_return; + BOOST_TEST_EQ(n, 5u); + }() + ); + }); + BOOST_TEST(r.success); + } + + void + testFuseWriteErrorInjection() + { + int write_success_count = 0; + int write_error_count = 0; + + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); + + auto [ec, n] = co_await a.write_some( + make_buffer("data", 4)); + if(ec) + { + ++write_error_count; + co_return; + } + ++write_success_count; + }); + + BOOST_TEST(r.success); + BOOST_TEST(write_error_count > 0); + BOOST_TEST(write_success_count > 0); + } + + void + testFuseReadErrorInjection() + { + int read_success_count = 0; + int read_error_count = 0; + + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); + + // Write first so data is available + auto [ec1, n1] = co_await a.write_some( + make_buffer("data", 4)); + if(ec1) + co_return; + + char buf[32] = {}; + auto [ec2, n2] = co_await b.read_some( + make_buffer(buf)); + if(ec2) + { + ++read_error_count; + co_return; + } + ++read_success_count; + }); + + BOOST_TEST(r.success); + BOOST_TEST(read_error_count > 0); + BOOST_TEST(read_success_count > 0); + } + + void + testClose() + { + // close() resumes a suspended reader with eof + fuse f; + run_blocking()([&]() -> task<> { + auto [a, b] = make_stream_pair(f); + + co_await when_all( + [&a]() -> task<> { + char buf[32] = {}; + auto [ec, n] = co_await a.read_some( + make_buffer(buf)); + BOOST_TEST(ec == cond::eof); + BOOST_TEST_EQ(n, 0u); + }(), + [&b]() -> task<> { + b.close(); + co_return; + }() + ); + }()); + } + + void + testCloseSubsequentOps() + { + // Directional close: a.close() signals eof to + // b's reads, but b can still write to a. + fuse f; + run_blocking()([&]() -> task<> { + auto [a, b] = make_stream_pair(f); + a.close(); + + // b reads eof (no data was buffered) + char buf[32] = {}; + auto [ec1, n1] = co_await b.read_some( + make_buffer(buf)); + BOOST_TEST(ec1 == cond::eof); + BOOST_TEST_EQ(n1, 0u); + + // b can still write to a + auto [ec2, n2] = co_await b.write_some( + make_buffer("data", 4)); + BOOST_TEST(! ec2); + BOOST_TEST_EQ(n2, 4u); + + // a can still read b's writes + auto [ec3, n3] = co_await a.read_some( + make_buffer(buf)); + BOOST_TEST(! ec3); + BOOST_TEST_EQ(n3, 4u); + BOOST_TEST_EQ( + std::string_view(buf, n3), "data"); + }()); + } + + void + testFuseClosesOtherEnd() + { + // Fuse error on writer closes the suspended reader + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + auto [a, b] = make_stream_pair(f); + + co_await when_all( + [&a]() -> task<> { + // Reader suspends waiting for data. + // Gets data, eof from peer's guard, + // or its own fuse error on resume. + char buf[32] = {}; + auto [ec, n] = co_await a.read_some( + make_buffer(buf)); + if(ec) + co_return; + BOOST_TEST_EQ(n, 5u); + }(), + [&b]() -> task<> { + // Writer may get fuse error, which + // closes the peer via the guard + auto [ec, n] = co_await b.write_some( + make_buffer("hello", 5)); + if(ec) + co_return; + BOOST_TEST_EQ(n, 5u); + }() + ); + }); + BOOST_TEST(r.success); + } + + void + run() + { + testWriteThenRead(); + testReadThenWrite(); + testBidirectional(); + testPartialRead(); + testMultipleWritesAccumulate(); + testEmptyWrite(); + testBufferSequenceRead(); + testBufferSequenceWrite(); + testSuspendedReadWithPartialBuffer(); + testFuseWriteErrorInjection(); + testFuseReadErrorInjection(); + testClose(); + testCloseSubsequentOps(); + testFuseClosesOtherEnd(); + } +}; + +TEST_SUITE( + stream_pair_test, + "boost.capy.test.stream_pair"); + } // test } // capy } // boost diff --git a/test/unit/test/write_sink.cpp b/test/unit/test/write_sink.cpp index c101e021..0009a762 100644 --- a/test/unit/test/write_sink.cpp +++ b/test/unit/test/write_sink.cpp @@ -128,32 +128,14 @@ class write_sink_test } void - testWriteWithEofFalse() + testWriteEofWithBuffers() { fuse f; auto r = f.armed([&](fuse&) -> task<> { write_sink ws(f); - auto [ec, n] = co_await ws.write( - make_buffer("hello", 5), false); - if(ec) - co_return; - BOOST_TEST_EQ(n, 5u); - BOOST_TEST_EQ(ws.data(), "hello"); - BOOST_TEST(! ws.eof_called()); - }); - BOOST_TEST(r.success); - } - - void - testWriteWithEofTrue() - { - fuse f; - auto r = f.armed([&](fuse&) -> task<> { - write_sink ws(f); - - auto [ec, n] = co_await ws.write( - make_buffer("hello", 5), true); + auto [ec, n] = co_await ws.write_eof( + make_buffer("hello", 5)); if(ec) co_return; BOOST_TEST_EQ(n, 5u); @@ -164,13 +146,13 @@ class write_sink_test } void - testWriteWithEofEmpty() + testWriteEofWithEmptyBuffers() { fuse f; auto r = f.armed([&](fuse&) -> task<> { write_sink ws(f); - auto [ec, n] = co_await ws.write(const_buffer(), true); + auto [ec, n] = co_await ws.write_eof(const_buffer()); if(ec) co_return; BOOST_TEST_EQ(n, 0u); @@ -378,38 +360,166 @@ class write_sink_test void testWritePartial() { + // write() ignores max_write_size and writes all data fuse f; auto r = f.armed([&](fuse&) -> task<> { - write_sink ws(f, 5); // max 5 bytes per write + write_sink ws(f, 5); auto [ec, n] = co_await ws.write( make_buffer("hello world", 11)); if(ec) co_return; - BOOST_TEST_EQ(n, 5u); - BOOST_TEST_EQ(ws.data(), "hello"); + BOOST_TEST_EQ(n, 11u); + BOOST_TEST_EQ(ws.data(), "hello world"); }); BOOST_TEST(r.success); } void - testWriteWithEofPartial() + testWriteEofWithBuffersPartial() { + // write_eof(buffers) ignores max_write_size and writes all data fuse f; auto r = f.armed([&](fuse&) -> task<> { - write_sink ws(f, 5); // max 5 bytes per write + write_sink ws(f, 5); - auto [ec, n] = co_await ws.write( - make_buffer("hello world", 11), true); + auto [ec, n] = co_await ws.write_eof( + make_buffer("hello world", 11)); + if(ec) + co_return; + BOOST_TEST_EQ(n, 11u); + BOOST_TEST_EQ(ws.data(), "hello world"); + BOOST_TEST(ws.eof_called()); + }); + BOOST_TEST(r.success); + } + + //-------------------------------------------- + // + // write_some tests (WriteStream refinement) + // + //-------------------------------------------- + + void + testWriteSome() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + write_sink ws(f); + + auto [ec, n] = co_await ws.write_some( + make_buffer("hello world", 11)); + if(ec) + co_return; + BOOST_TEST_EQ(n, 11u); + BOOST_TEST_EQ(ws.data(), "hello world"); + }); + BOOST_TEST(r.success); + } + + void + testWriteSomePartial() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + write_sink ws(f, 5); + + auto [ec, n] = co_await ws.write_some( + make_buffer("hello world", 11)); if(ec) co_return; BOOST_TEST_EQ(n, 5u); BOOST_TEST_EQ(ws.data(), "hello"); - BOOST_TEST(ws.eof_called()); }); BOOST_TEST(r.success); } + void + testWriteSomeEmpty() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + write_sink ws(f); + + auto [ec, n] = co_await ws.write_some(const_buffer()); + if(ec) + co_return; + BOOST_TEST_EQ(n, 0u); + BOOST_TEST(ws.data().empty()); + }); + BOOST_TEST(r.success); + } + + void + testWriteSomeBufferSequence() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + write_sink ws(f); + + std::array buffers = {{ + make_buffer("hello", 5), + make_buffer("world", 5) + }}; + + auto [ec, n] = co_await ws.write_some(buffers); + if(ec) + co_return; + BOOST_TEST_EQ(n, 10u); + BOOST_TEST_EQ(ws.data(), "helloworld"); + }); + BOOST_TEST(r.success); + } + + void + testWriteSomeMaxWriteSize() + { + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + write_sink ws(f, 3); + + auto [ec1, n1] = co_await ws.write_some( + make_buffer("abcdefgh", 8)); + if(ec1) + co_return; + BOOST_TEST_EQ(n1, 3u); + BOOST_TEST_EQ(ws.data(), "abc"); + + auto [ec2, n2] = co_await ws.write_some( + make_buffer("defgh", 5)); + if(ec2) + co_return; + BOOST_TEST_EQ(n2, 3u); + BOOST_TEST_EQ(ws.data(), "abcdef"); + }); + BOOST_TEST(r.success); + } + + void + testWriteSomeFuseErrorInjection() + { + int write_success_count = 0; + int write_error_count = 0; + + fuse f; + auto r = f.armed([&](fuse&) -> task<> { + write_sink ws(f); + + auto [ec, n] = co_await ws.write_some( + make_buffer("test data", 9)); + if(ec) + { + ++write_error_count; + co_return; + } + ++write_success_count; + }); + + BOOST_TEST(r.success); + BOOST_TEST(write_error_count > 0); + BOOST_TEST(write_success_count > 0); + } + void run() { @@ -418,9 +528,8 @@ class write_sink_test testWriteMultiple(); testWriteBufferSequence(); testWriteEmpty(); - testWriteWithEofFalse(); - testWriteWithEofTrue(); - testWriteWithEofEmpty(); + testWriteEofWithBuffers(); + testWriteEofWithEmptyBuffers(); testWriteEof(); testWriteThenWriteEof(); testFuseErrorInjection(); @@ -431,7 +540,15 @@ class write_sink_test testExpectMismatchWithExistingData(); testClear(); testWritePartial(); - testWriteWithEofPartial(); + testWriteEofWithBuffersPartial(); + + // write_some tests (WriteStream refinement) + testWriteSome(); + testWriteSomePartial(); + testWriteSomeEmpty(); + testWriteSomeBufferSequence(); + testWriteSomeMaxWriteSize(); + testWriteSomeFuseErrorInjection(); } }; diff --git a/test/unit/test/write_stream.cpp b/test/unit/test/write_stream.cpp index f6a7273c..7c2249ec 100644 --- a/test/unit/test/write_stream.cpp +++ b/test/unit/test/write_stream.cpp @@ -11,6 +11,7 @@ #include #include +#include #include #include @@ -24,6 +25,7 @@ namespace capy { namespace test { static_assert(WriteStream); +static_assert(!WriteSink); class write_stream_test {