Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions newsfragments/3657.feature.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
``RequestBatcher.execute`` and ``RequestBatcher.async_execute`` now accept
``raise_on_error`` (default ``True``). Passing ``False`` returns per-request
errors as raw RPC response dicts instead of aborting the whole batch on the
first failing entry, so callers can inspect the ``error`` field alongside
successful results.
30 changes: 30 additions & 0 deletions tests/core/providers/test_http_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,36 @@ def test_http_empty_batch_response(mock_post):
assert not w3.provider._is_batching


@patch(
"web3._utils.http_session_manager.HTTPSessionManager.make_post_request",
new_callable=Mock,
)
def test_http_batch_request_raise_on_error_false_returns_per_request_errors(
mock_post,
):
# Two requests, the first one fails, the second one succeeds. With the
# default behavior (raise_on_error=True) the failing entry would abort the
# whole batch via Web3RPCError, which is what #3657 is asking us to opt out
# of.
mock_post.return_value = (
b'[{"jsonrpc":"2.0","id":0,"error":{"code":-32000,"message":"bad request"}},'
b'{"jsonrpc":"2.0","id":1,"result":"0x539"}]'
)
w3 = Web3(HTTPProvider())
with w3.batch_requests() as batch:
batch.add(w3.eth.chain_id)
batch.add(w3.eth.chain_id)
responses = batch.execute(raise_on_error=False)

assert not w3.provider._is_batching
assert len(responses) == 2
assert "error" in responses[0]
assert responses[0]["error"]["message"] == "bad request"
# Second request still went through and is formatted as the chain_id would
# normally be (int after the result formatter).
assert responses[1] == 0x539


def test_user_provided_session_shared_across_threads():
"""
Test that when a user provides an explicit session to HTTPProvider,
Expand Down
29 changes: 25 additions & 4 deletions web3/_utils/batching.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,9 +131,20 @@ def add_mapping(
for param in params:
self.add(method(param))

def execute(self) -> list["RPCResponse"]:
def execute(self, raise_on_error: bool = True) -> list["RPCResponse"]:
"""
Execute the batch.

When ``raise_on_error`` is ``False``, per-request errors are returned
as raw RPC response dicts instead of raising, so a single failing
request does not discard the rest of the batch (#3657). The
batch-level envelope error (server returned a single error object
instead of an array) still raises.
"""
self._validate_is_batching()
responses = self.web3.manager._make_batch_request(self._requests_info)
responses = self.web3.manager._make_batch_request(
self._requests_info, raise_on_error=raise_on_error
)
self._end_batching()
return responses

Expand All @@ -160,15 +171,25 @@ def __exit__(

# -- async -- #

async def async_execute(self) -> list["RPCResponse"]:
async def async_execute(
self, raise_on_error: bool = True
) -> list["RPCResponse"]:
"""
Execute the batch asynchronously. See ``execute`` for ``raise_on_error``
semantics.

Persistent-connection providers always raise per-request errors at the
socket layer today; ``raise_on_error`` is only honored for stateless
(HTTP) providers.
"""
self._validate_is_batching()
if self._provider.has_persistent_connection:
responses = await self.web3.manager._async_make_socket_batch_request(
self._async_requests_info
)
else:
responses = await self.web3.manager._async_make_batch_request(
self._async_requests_info
self._async_requests_info, raise_on_error=raise_on_error
)
self._end_batching()
return responses
Expand Down
44 changes: 33 additions & 11 deletions web3/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,10 +258,18 @@ def _batch_requests(self) -> RequestBatcher[Method[Callable[..., Any]]]:
return RequestBatcher(self.w3)

def _make_batch_request(
self, requests_info: list[tuple[tuple["RPCEndpoint", Any], tuple[Any, ...]]]
self,
requests_info: list[tuple[tuple["RPCEndpoint", Any], tuple[Any, ...]]],
raise_on_error: bool = True,
) -> list[RPCResponse]:
"""
Make a batch request using the provider

When ``raise_on_error`` is ``False`` per-request errors in the batch are
returned as raw RPC response dicts instead of raising. The batch-level
envelope error (a JSON object instead of a list) still raises so the
caller can distinguish "no responses came back" from "some responses
carry an error".
"""
provider = cast(JSONBaseProvider, self.provider)
request_func = provider.batch_request_func(
Expand All @@ -277,7 +285,9 @@ def _make_batch_request(
if isinstance(response, list):
# expected format
formatted_responses = [
self._format_batched_response(info, cast(RPCResponse, resp))
self._format_batched_response(
info, cast(RPCResponse, resp), raise_on_error=raise_on_error
)
for info, resp in zip(requests_info, response)
]
return list(formatted_responses)
Expand All @@ -290,9 +300,12 @@ async def _async_make_batch_request(
requests_info: list[
Coroutine[Any, Any, tuple[tuple["RPCEndpoint", Any], tuple[Any]]]
],
raise_on_error: bool = True,
) -> list[RPCResponse]:
"""
Make an asynchronous batch request using the provider
Make an asynchronous batch request using the provider.

See ``_make_batch_request`` for ``raise_on_error`` semantics.
"""
provider = cast(AsyncJSONBaseProvider, self.provider)
request_func = await provider.batch_request_func(
Expand All @@ -313,7 +326,9 @@ async def _async_make_batch_request(
# expected format
response = cast(list[RPCResponse], response)
formatted_responses = [
self._format_batched_response(info, resp)
self._format_batched_response(
info, resp, raise_on_error=raise_on_error
)
for info, resp in zip(unpacked_requests_info, response)
]
return list(formatted_responses)
Expand Down Expand Up @@ -405,15 +420,22 @@ def _format_batched_response(
self,
requests_info: tuple[tuple[RPCEndpoint, Any], Sequence[Any]],
response: RPCResponse,
raise_on_error: bool = True,
) -> RPCResponse:
result_formatters, error_formatters, null_result_formatters = requests_info[1]
validate_rpc_response_and_raise_if_error(
response,
error_formatters,
is_subscription_response=False,
logger=self.logger,
params=requests_info[0][1],
)
if raise_on_error:
validate_rpc_response_and_raise_if_error(
response,
error_formatters,
is_subscription_response=False,
logger=self.logger,
params=requests_info[0][1],
)
elif "error" in response:
# Caller opted out of per-request raising. Return the raw RPC
# response dict so they can inspect ``error`` themselves without
# losing the rest of the batch (#3657).
return response
return apply_result_formatters(
result_formatters,
self.formatted_response(
Expand Down