Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions python/rapidsmpf/rapidsmpf/coll/allgather.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations

from typing import Any

from rapidsmpf.communicator.communicator import Communicator
from rapidsmpf.memory.buffer_resource import BufferResource
from rapidsmpf.memory.packed_data import PackedData
Expand All @@ -17,6 +19,13 @@ class AllGather:
) -> None: ...
@property
def comm(self) -> Communicator: ...
def __enter__(self) -> AllGather: ...
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: Any | None,
) -> bool: ...
def insert(self, sequence_number: int, packed_data: PackedData) -> None: ...
def insert_finished(self) -> None: ...
def wait_and_extract(
Expand Down
7 changes: 7 additions & 0 deletions python/rapidsmpf/rapidsmpf/coll/allgather.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,13 @@ cdef class AllGather:
with nogil:
deref(self._handle).insert_finished()

def __enter__(self):
return self

def __exit__(self, exc_type, exc, tb):
self.insert_finished()
return False # do not suppress exceptions

def wait_and_extract(self, bool ordered = True, int timeout_ms = -1):
"""
Wait for completion and extract all gathered data.
Expand Down
10 changes: 10 additions & 0 deletions python/rapidsmpf/rapidsmpf/streaming/coll/allgather.pyi
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES.
# SPDX-License-Identifier: Apache-2.0

from typing import Any

from rapidsmpf.communicator.communicator import Communicator
from rapidsmpf.memory.packed_data import PackedData
from rapidsmpf.streaming.chunks.packed_data import PackedDataChunk
Expand All @@ -11,6 +14,13 @@ class AllGather:
def __init__(self, ctx: Context, comm: Communicator, op_id: int) -> None: ...
@property
def comm(self) -> Communicator: ...
def __enter__(self) -> AllGather: ...
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: Any | None,
) -> bool: ...
def insert(self, sequence_number: int, packed_data: PackedData) -> None: ...
def insert_finished(self) -> None: ...
async def extract_all(self, ctx: Context, *, ordered: bool) -> list[PackedData]: ...
Expand Down
7 changes: 7 additions & 0 deletions python/rapidsmpf/rapidsmpf/streaming/coll/allgather.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,13 @@ cdef class AllGather:
with nogil:
deref(self._handle).insert_finished()

def __enter__(self):
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we have to be careful in the streaming case because of

/**
* @brief Asynchronous (coroutine) interface to `coll::AllGather`.
*
* Once the AllGather is created, many tasks may insert data into it. If multiple tasks
* insert data, the user is responsible for arranging that `insert_finished` is only
* called after all `insert`ions have completed. A single consumer task should extract
* data.
*/

It's the callers responsibility to do this. We probably should only do the "bulk" case in this PR. And keep using AllGatherManager.Inserter in cudf-polars.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure we can still use the AllGatherManager.Inserter API in cudf_polars, but this would help simplify that implementation too, with this inserting could become

@contextmanager
def inserting(self):
    with self.allgather:
        yield self

and could move Inserter.insert onto AllGatherManager.insert if desired

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh ok I think that work, thanks.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah I think we can remove the Inserter class but we have to keep inserting cm, so the caller can control when insert_finished is called?

Copy link
Copy Markdown
Contributor Author

@mroeschke mroeschke May 15, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah it's kinda nice to keep an API like inserting to signal that "after exiting the cm this calls insert_finished"

return self

def __exit__(self, exc_type, exc, tb):
self.insert_finished()
return False # do not suppress exceptions

async def extract_all(self, Context ctx, *, bool ordered):
"""
Suspend and extract all data from the AllGather.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,10 +125,10 @@ async def allgather_and_concat(
op_id: int,
) -> None:
gather = AllGather(context, comm, op_id)
while (msg := await ch_in.recv(context)) is not None:
chunk = PackedDataChunk.from_message(msg, br=context.br()).to_packed_data()
gather.insert(msg.sequence_number, chunk)
gather.insert_finished()
with gather as ag:
while (msg := await ch_in.recv(context)) is not None:
chunk = PackedDataChunk.from_message(msg, br=context.br()).to_packed_data()
ag.insert(msg.sequence_number, chunk)
gathered = await gather.extract_all(context, ordered=True)
stream = context.get_stream_from_pool()
table = unpack_and_concat(gathered, stream, context.br())
Expand Down
16 changes: 7 additions & 9 deletions python/rapidsmpf/rapidsmpf/tests/test_allgather.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,15 +145,13 @@ def test_basic_allgather(
this_rank = comm.rank
n_ranks = comm.nranks

# Insert data from this rank
for i in range(n_inserts):
packed_data = generate_packed_data(
n_elements, gen_offset(i, this_rank), stream, br
)
allgather.insert(i, packed_data)

# Mark this rank as finished
allgather.insert_finished()
# Insert data from this rank and mark as finished
with allgather as ag:
for i in range(n_inserts):
packed_data = generate_packed_data(
n_elements, gen_offset(i, this_rank), stream, br
)
ag.insert(i, packed_data)

# Wait for completion and extract results
results = allgather.wait_and_extract(ordered=ordered)
Expand Down
Loading