From 872b2f68bc5a20390145e8f100f759ef49a7e5ff Mon Sep 17 00:00:00 2001 From: Peter Sharpe Date: Sat, 20 Dec 2025 12:09:01 -0500 Subject: [PATCH 01/11] Fixes a non-deterministic test in MeshGraphNet suite... --- test/models/meshgraphnet/test_meshgraphnet.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/models/meshgraphnet/test_meshgraphnet.py b/test/models/meshgraphnet/test_meshgraphnet.py index 17c299d3f7..7e9006abac 100644 --- a/test/models/meshgraphnet/test_meshgraphnet.py +++ b/test/models/meshgraphnet/test_meshgraphnet.py @@ -174,6 +174,11 @@ def test_meshgraphnet_checkpoint(device, pytestconfig, set_physicsnemo_force_te) from physicsnemo.models.meshgraphnet import MeshGraphNet + # Seed for reproducibility + torch.manual_seed(0) + np.random.seed(0) + random.seed(0) + # Construct MGN model model_1 = MeshGraphNet( input_dim_nodes=4, From e8a3e35d06d145cef71940735d765dbd74694563 Mon Sep 17 00:00:00 2001 From: Peter Sharpe Date: Sat, 20 Dec 2025 13:29:46 -0500 Subject: [PATCH 02/11] seeds randomness --- test/mesh/io/io_pyvista/test_error_handling.py | 1 + test/mesh/primitives/test_procedural_noise.py | 4 ++++ test/mesh/utilities/test_cache.py | 18 ++++++++++++++++++ test/mesh/utilities/test_scatter_ops.py | 1 + 4 files changed, 24 insertions(+) diff --git a/test/mesh/io/io_pyvista/test_error_handling.py b/test/mesh/io/io_pyvista/test_error_handling.py index 83c6a46ac6..7a86f7f44a 100644 --- a/test/mesh/io/io_pyvista/test_error_handling.py +++ b/test/mesh/io/io_pyvista/test_error_handling.py @@ -214,6 +214,7 @@ class TestToPyvistaErrors: def test_unsupported_manifold_dims_raises(self): """Test that unsupported manifold dimensions raise ValueError.""" # Create a mesh with 4 manifold dims (not supported by PyVista) + torch.manual_seed(0) points = torch.randn(10, 5) # 5D spatial cells = torch.randint(0, 10, (5, 5)) # 4-manifold (5 vertices per cell) mesh = Mesh(points=points, cells=cells) diff --git a/test/mesh/primitives/test_procedural_noise.py b/test/mesh/primitives/test_procedural_noise.py index fb1d8d29c9..681ad9684b 100644 --- a/test/mesh/primitives/test_procedural_noise.py +++ b/test/mesh/primitives/test_procedural_noise.py @@ -34,6 +34,7 @@ class TestPerlinNoiseND: def test_dimension_agnostic(self, n_dims): """Test that Perlin noise works for any number of dimensions.""" n_points = 50 + torch.manual_seed(0) points = torch.randn(n_points, n_dims) noise = perlin_noise_nd(points, scale=1.0, seed=42) @@ -50,6 +51,7 @@ def test_dimension_agnostic(self, n_dims): @pytest.mark.parametrize("seed", [0, 42, 123, 999]) def test_reproducibility(self, seed): """Test that same seed produces same output.""" + torch.manual_seed(0) points = torch.randn(100, 3) noise1 = perlin_noise_nd(points, scale=1.0, seed=seed) @@ -59,6 +61,7 @@ def test_reproducibility(self, seed): def test_different_seeds_produce_different_output(self): """Test that different seeds produce different noise patterns.""" + torch.manual_seed(0) points = torch.randn(100, 3) noise1 = perlin_noise_nd(points, scale=1.0, seed=42) @@ -73,6 +76,7 @@ def test_different_seeds_produce_different_output(self): @pytest.mark.parametrize("scale", [0.1, 0.5, 1.0, 2.0, 5.0]) def test_scale_parameter(self, scale): """Test that scale parameter affects noise frequency.""" + torch.manual_seed(0) points = torch.randn(100, 3) noise = perlin_noise_nd(points, scale=scale, seed=42) diff --git a/test/mesh/utilities/test_cache.py b/test/mesh/utilities/test_cache.py index 9225969fda..ae735acc8e 100644 --- a/test/mesh/utilities/test_cache.py +++ b/test/mesh/utilities/test_cache.py @@ -41,6 +41,7 @@ def test_get_cached_returns_none_when_not_set(self): def test_get_cached_returns_none_for_missing_key(self): """Test that get_cached returns None for missing key in existing cache.""" data = TensorDict({}, batch_size=[10]) + torch.manual_seed(0) data["_cache"] = TensorDict({"centroids": torch.randn(10, 3)}, batch_size=[10]) result = get_cached(data, "areas") @@ -50,6 +51,7 @@ def test_get_cached_returns_none_for_missing_key(self): def test_get_cached_returns_value_when_set(self): """Test that get_cached returns the cached value when present.""" data = TensorDict({}, batch_size=[10]) + torch.manual_seed(0) cached_value = torch.randn(10, 3) data["_cache"] = TensorDict({"centroids": cached_value}, batch_size=[10]) @@ -73,6 +75,7 @@ class TestSetCached: def test_set_cached_creates_cache_if_missing(self): """Test that set_cached creates _cache TensorDict if not present.""" data = TensorDict({}, batch_size=[10]) + torch.manual_seed(0) value = torch.randn(10, 3) set_cached(data, "centroids", value) @@ -83,6 +86,7 @@ def test_set_cached_creates_cache_if_missing(self): def test_set_cached_stores_value(self): """Test that set_cached stores the value correctly.""" data = TensorDict({}, batch_size=[10]) + torch.manual_seed(0) value = torch.randn(10, 3) set_cached(data, "areas", value) @@ -93,6 +97,7 @@ def test_set_cached_stores_value(self): def test_set_cached_overwrites_existing(self): """Test that set_cached overwrites existing cached value.""" data = TensorDict({}, batch_size=[10]) + torch.manual_seed(0) old_value = torch.randn(10, 3) new_value = torch.randn(10, 3) @@ -106,6 +111,7 @@ def test_set_cached_overwrites_existing(self): def test_set_cached_multiple_keys(self): """Test that set_cached can store multiple keys.""" data = TensorDict({}, batch_size=[10]) + torch.manual_seed(0) centroids = torch.randn(10, 3) areas = torch.randn(10) normals = torch.randn(10, 3) @@ -136,6 +142,7 @@ def test_roundtrip_scalar(self): def test_roundtrip_1d(self): """Test round-trip with 1D tensor.""" data = TensorDict({}, batch_size=[10]) + torch.manual_seed(0) value = torch.randn(10) set_cached(data, "areas", value) @@ -147,6 +154,7 @@ def test_roundtrip_1d(self): def test_roundtrip_2d(self): """Test round-trip with 2D tensor.""" data = TensorDict({}, batch_size=[10]) + torch.manual_seed(0) value = torch.randn(10, 3) set_cached(data, "centroids", value) @@ -158,6 +166,7 @@ def test_roundtrip_2d(self): def test_roundtrip_3d(self): """Test round-trip with 3D tensor.""" data = TensorDict({}, batch_size=[10]) + torch.manual_seed(0) value = torch.randn(10, 3, 3) set_cached(data, "stress", value) @@ -172,6 +181,7 @@ class TestCacheWithExistingData: def test_cache_does_not_affect_existing_data(self): """Test that caching doesn't affect existing non-cache data.""" + torch.manual_seed(0) data = TensorDict({"temperature": torch.randn(10)}, batch_size=[10]) original_temp = data["temperature"].clone() @@ -181,6 +191,7 @@ def test_cache_does_not_affect_existing_data(self): def test_get_cached_ignores_non_cache_keys(self): """Test that get_cached only looks in _cache namespace.""" + torch.manual_seed(0) data = TensorDict({"areas": torch.randn(10)}, batch_size=[10]) # Even though "areas" exists at top level, get_cached looks in _cache @@ -190,6 +201,7 @@ def test_get_cached_ignores_non_cache_keys(self): def test_cache_coexists_with_data(self): """Test that cache and regular data coexist.""" + torch.manual_seed(0) data = TensorDict( { "temperature": torch.randn(10), @@ -212,6 +224,7 @@ class TestCacheDevices: def test_cache_cpu(self): """Test caching on CPU TensorDict.""" data = TensorDict({}, batch_size=[10], device="cpu") + torch.manual_seed(0) value = torch.randn(10, 3, device="cpu") set_cached(data, "centroids", value) @@ -224,6 +237,7 @@ def test_cache_cpu(self): def test_cache_cuda(self): """Test caching on CUDA TensorDict.""" data = TensorDict({}, batch_size=[10], device="cuda") + torch.manual_seed(0) value = torch.randn(10, 3, device="cuda") set_cached(data, "centroids", value) @@ -242,6 +256,7 @@ class TestCacheDtypes: def test_cache_various_dtypes(self, dtype): """Test caching with various dtypes.""" data = TensorDict({}, batch_size=[10]) + torch.manual_seed(0) if dtype in [torch.float32, torch.float64]: value = torch.randn(10, dtype=dtype) else: @@ -267,6 +282,7 @@ def test_cell_data_cache_pattern(self): assert cached_areas is None # Compute and cache + torch.manual_seed(0) computed_areas = torch.randn(100) set_cached(cell_data, "areas", computed_areas) @@ -281,6 +297,7 @@ def test_point_data_cache_pattern(self): point_data = TensorDict({}, batch_size=[500]) # Cache point normals + torch.manual_seed(0) normals = torch.randn(500, 3) set_cached(point_data, "normals", normals) @@ -294,6 +311,7 @@ def test_multiple_caches_pattern(self): cell_data = TensorDict({}, batch_size=[100]) # Cache multiple properties + torch.manual_seed(0) set_cached(cell_data, "centroids", torch.randn(100, 3)) set_cached(cell_data, "areas", torch.randn(100)) set_cached(cell_data, "normals", torch.randn(100, 3)) diff --git a/test/mesh/utilities/test_scatter_ops.py b/test/mesh/utilities/test_scatter_ops.py index 0915a2e150..6be139de00 100644 --- a/test/mesh/utilities/test_scatter_ops.py +++ b/test/mesh/utilities/test_scatter_ops.py @@ -309,6 +309,7 @@ def test_all_aggregation_modes(self, aggregation): @pytest.mark.parametrize("n_dst", [1, 2, 5, 10]) def test_various_n_dst(self, n_dst): """Test with various destination counts.""" + torch.manual_seed(0) src_data = torch.randn(20) src_to_dst = torch.randint(0, n_dst, (20,)) From fab719989216e1f1cc633da01e01509c10b22f64 Mon Sep 17 00:00:00 2001 From: Peter Sharpe Date: Sat, 20 Dec 2025 13:37:37 -0500 Subject: [PATCH 03/11] need to seed this --- test/metrics/test_metrics_climate.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/metrics/test_metrics_climate.py b/test/metrics/test_metrics_climate.py index 1ccb359d2c..347b043d3c 100644 --- a/test/metrics/test_metrics_climate.py +++ b/test/metrics/test_metrics_climate.py @@ -105,6 +105,7 @@ def test_climate_acc_mse(test_data, device, rtol: float = 1e-3, atol: float = 1e def test_climate_reductions(test_data, device, rtol: float = 1e-3, atol: float = 1e-3): + torch.manual_seed(0) channels, lon, lat, pred_tensor_np, targ_tensor_np, time_means = test_data pred_tensor = torch.from_numpy(pred_tensor_np).expand(channels, -1, -1).to(device) lat = torch.from_numpy(lat).to(device) @@ -247,6 +248,7 @@ def test_climate_reductions(test_data, device, rtol: float = 1e-3, atol: float = def test_climate_efi(test_data, device, rtol: float = 1e-1, atol: float = 1e-1): + torch.manual_seed(0) one = torch.ones((1, 1), dtype=torch.float32, device=device) bin_edges = hist.linspace(-10 * one, 10 * one, 30) bin_mids = 0.5 * bin_edges[1:] + 0.5 * bin_edges[:-1] From 81d9815ab6d1157b10b1448b3cedda40146c91b2 Mon Sep 17 00:00:00 2001 From: Peter Sharpe Date: Sat, 20 Dec 2025 13:38:56 -0500 Subject: [PATCH 04/11] must seed these --- test/metrics/test_metrics_general.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/metrics/test_metrics_general.py b/test/metrics/test_metrics_general.py index 6e9559c9a1..63d37e0433 100644 --- a/test/metrics/test_metrics_general.py +++ b/test/metrics/test_metrics_general.py @@ -66,6 +66,8 @@ def get_disagreements(inputs, bins, counts, test): @pytest.mark.parametrize("input_shape", [(1, 72, 144)]) def test_histogram(device, input_shape, rtol: float = 1e-3, atol: float = 1e-3): + torch.manual_seed(0) + np.random.seed(0) x = torch.randn([10, *input_shape], device=device) y = torch.randn([5, *input_shape], device=device) @@ -224,6 +226,7 @@ def fair_crps(pred, obs, dim=-1): def test_fair_crps_greater_than_zero(device): + torch.manual_seed(0) pred = torch.randn(5, 10, device=device) obs = torch.tensor([0.0, 0.0, 0.0, 0.0, 0.0], device=device) assert torch.all(fair_crps(pred, obs, dim=-1) > 0) @@ -233,6 +236,7 @@ def test_fair_crps_is_fair(device): # fair means that a random prediction should outperform a non-random one on average # This is not always true of ``crps``...try replacing fair_crps function # below with ``crps`` + torch.manual_seed(0) n = 256 random_pred = torch.randn(n, 2, device=device) cheating_pred = torch.zeros((n, 2), device=device) @@ -341,6 +345,7 @@ def test_crps_finite(device, num, biased): def test_crps(device, rtol: float = 1e-3, atol: float = 1e-3): # Uses eq (5) from Gneiting et al. https://doi.org/10.1175/MWR2904.1 # crps(N(0, 1), 0.0) = 2 / sqrt(2*pi) - 1/sqrt(pi) ~= 0.23... + torch.manual_seed(0) x = torch.randn((1_000_000, 1), device=device, dtype=torch.float32) y = torch.zeros((1,), device=device, dtype=torch.float32) @@ -526,6 +531,7 @@ def test_crps(device, rtol: float = 1e-3, atol: float = 1e-3): @pytest.mark.parametrize("mean", [3.0]) @pytest.mark.parametrize("variance", [0.1]) def test_wasserstein(device, mean, variance, rtol: float = 1e-3, atol: float = 1e-3): + torch.manual_seed(0) mean = torch.as_tensor([mean], device=device, dtype=torch.float32) variance = torch.as_tensor([variance], device=device, dtype=torch.float32) @@ -596,6 +602,7 @@ def test_means_var(device, rtol: float = 1e-3, atol: float = 1e-3): if not torch.cuda.is_available(): pytest.skip("CUDA required for this test.") + torch.manual_seed(0) DistributedManager._shared_state = {} if (device == "cuda:0") and (not DistributedManager.is_initialized()): os.environ["MASTER_ADDR"] = "localhost" @@ -697,6 +704,8 @@ def test_means_var(device, rtol: float = 1e-3, atol: float = 1e-3): def test_calibration(device, rtol: float = 1e-2, atol: float = 1e-2): + torch.manual_seed(0) + np.random.seed(0) x = torch.randn((10_000, 30, 30), device=device, dtype=torch.float32) y = torch.randn((30, 30), device=device, dtype=torch.float32) @@ -766,6 +775,7 @@ def test_calibration(device, rtol: float = 1e-2, atol: float = 1e-2): def test_entropy(device, rtol: float = 1e-2, atol: float = 1e-2): + torch.manual_seed(0) one = torch.ones([1], device=device, dtype=torch.float32) x = torch.randn((50_000, 10, 10), device=device, dtype=torch.float32) From 37f1269b62a55779ab2042be492e9c6ccb150312 Mon Sep 17 00:00:00 2001 From: Peter Sharpe Date: Sat, 20 Dec 2025 13:43:08 -0500 Subject: [PATCH 05/11] must be seeded --- test/utils/test_checkpoint.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/utils/test_checkpoint.py b/test/utils/test_checkpoint.py index 0136334ed4..fcfa29e054 100644 --- a/test/utils/test_checkpoint.py +++ b/test/utils/test_checkpoint.py @@ -100,6 +100,7 @@ def test_model_checkpointing( # Initialize DistributedManager first since save_checkpoint instantiates it DistributedManager.initialize() + torch.manual_seed(0) mlp_model_1 = model_generator(8).to(device) mlp_model_2 = model_generator(4).to(device) @@ -187,6 +188,7 @@ def test_compiled_model_checkpointing( from physicsnemo.utils import load_checkpoint, save_checkpoint + torch.manual_seed(0) # Create and compile a simple model in_feats = 4 base_model = FullyConnected( From 862d0ae2ea859b153ae9a82dcc8945bdad4e5d40 Mon Sep 17 00:00:00 2001 From: Peter Sharpe Date: Sat, 20 Dec 2025 13:43:42 -0500 Subject: [PATCH 06/11] Should seed the points, since we are delegating to VTK here and don't want to debug any issues with that --- test/utils/test_mesh_utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/utils/test_mesh_utils.py b/test/utils/test_mesh_utils.py index 9abae4b4e8..b6e6a87fc5 100644 --- a/test/utils/test_mesh_utils.py +++ b/test/utils/test_mesh_utils.py @@ -72,6 +72,9 @@ def _create_random_vtp_mesh(num_points: int, num_triangles: int, dir: str) -> tu tuple: A tuple containing the random VTP mesh (vtk.vtkPolyData). """ + import random + random.seed(0) + # make directory if it does not exist os.makedirs(dir, exist_ok=True) From cf080e166a249b5c7f692e98a34a343ad6d5ff7b Mon Sep 17 00:00:00 2001 From: Peter Sharpe Date: Tue, 6 Jan 2026 09:56:21 -0500 Subject: [PATCH 07/11] format fix --- test/datapipes/test_mesh_datapipe.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/datapipes/test_mesh_datapipe.py b/test/datapipes/test_mesh_datapipe.py index 86cf6eeac0..4f52cab32c 100644 --- a/test/datapipes/test_mesh_datapipe.py +++ b/test/datapipes/test_mesh_datapipe.py @@ -15,7 +15,6 @@ # limitations under the License. import os -import random import pytest @@ -51,6 +50,10 @@ def _create_random_vtp_vtu_mesh( tuple: A tuple containing the random VTP mesh (vtk.vtkPolyData) and the random VTU mesh (vtk.vtkUnstructuredGrid). """ + import random + + random.seed(0) + # make directory if it does not exist os.makedirs(dir, exist_ok=True) From 9df7f15c129e172da905ea4d7964208277a35c11 Mon Sep 17 00:00:00 2001 From: Peter Sharpe Date: Tue, 6 Jan 2026 10:02:44 -0500 Subject: [PATCH 08/11] Ensure deterministic test results by seeding random number generators in various test files. --- test/active_learning/test_checkpointing.py | 4 ++++ test/active_learning/test_loop.py | 3 +++ test/core/test_from_torch.py | 6 ++++++ test/datapipes/test_domino_datapipe.py | 4 ++++ test/domain_parallel/test_initialization.py | 3 +++ test/domain_parallel/test_reductions.py | 1 + test/mesh/utilities/test_cache.py | 14 -------------- test/metrics/test_metrics_climate.py | 1 - .../models/dlwp_healpix/test_healpix_unet_model.py | 4 ++++ test/models/graphcast/test_graphcast.py | 8 ++++++++ test/models/meshgraphnet/test_meshgraphnet.py | 11 +++++++++++ test/nn/test_kan_layers.py | 1 + test/nn/test_layer_norm.py | 1 + test/nn/test_layers_activations.py | 10 ++++++++++ test/nn/test_layers_dgm.py | 1 + test/nn/test_nd_conv_layers.py | 2 ++ test/utils/test_mesh_utils.py | 1 + 17 files changed, 60 insertions(+), 15 deletions(-) diff --git a/test/active_learning/test_checkpointing.py b/test/active_learning/test_checkpointing.py index 5df0fe9db7..5dbbe177d2 100644 --- a/test/active_learning/test_checkpointing.py +++ b/test/active_learning/test_checkpointing.py @@ -235,6 +235,7 @@ def test_queue_serialization_with_to_list( ) # Add items to queues + torch.manual_seed(0) test_data = MockDataStructure(inputs=torch.randn(16, 64)) driver.query_queue.put(test_data) driver.label_queue.put(test_data) @@ -278,6 +279,7 @@ def test_queue_serialization_fallback( ) # Add item to queue + torch.manual_seed(0) test_data = MockDataStructure(inputs=torch.randn(16, 64)) driver.query_queue.put(test_data) @@ -617,6 +619,7 @@ def test_checkpoint_preserves_model_weights( driver_config, mock_module, strategies_config, temp_checkpoint_dir ): """Test that model weights are correctly saved and loaded.""" + torch.manual_seed(0) driver = Driver( config=driver_config, learner=mock_module, @@ -754,6 +757,7 @@ def test_resume_from_query_phase_skips_earlier_phases( # Create checkpoint at query phase driver.active_learning_step_idx = 1 driver.current_phase = p.ActiveLearningPhase.QUERY + torch.manual_seed(0) driver.query_queue.put(MockDataStructure(inputs=torch.randn(16, 64))) checkpoint_path = temp_checkpoint_dir / "resume_query_test" diff --git a/test/active_learning/test_loop.py b/test/active_learning/test_loop.py index 4d17163d03..40ec1d1ab0 100644 --- a/test/active_learning/test_loop.py +++ b/test/active_learning/test_loop.py @@ -189,6 +189,7 @@ def test_call_without_train_step_raises_error(self, mock_module): def test_basic_training_loop_execution(self, mock_module): """Test basic execution of training loop with mocked components.""" + torch.manual_seed(0) # Create a mock train step that returns a loss with backward method mock_loss = MagicMock() mock_loss.detach.return_value.item.return_value = 0.5 @@ -229,6 +230,7 @@ def mock_train_step(model, batch, *args, **kwargs): def test_training_with_validation(self, mock_module): """Test training loop with validation step.""" + torch.manual_seed(0) # Create mock train step mock_loss = MagicMock() mock_loss.detach.return_value.item.return_value = 0.5 @@ -279,6 +281,7 @@ def mock_train_step(model, batch, *args, **kwargs): def test_training_with_lr_scheduler(self, mock_module): """Test training loop with learning rate scheduler.""" + torch.manual_seed(0) mock_loss = MagicMock() mock_loss.detach.return_value.item.return_value = 0.5 mock_loss.backward = MagicMock() diff --git a/test/core/test_from_torch.py b/test/core/test_from_torch.py index ec38a3e81d..f7a269e840 100644 --- a/test/core/test_from_torch.py +++ b/test/core/test_from_torch.py @@ -87,6 +87,8 @@ def test_from_torch_optims(device): def setup_model(): """Set up fresh model and inputs for each optim test""" # Construct CustomPhysicsNeMoModel + torch.manual_seed(0) + random.seed(0) CustomPhysicsNeMoModel = Module.from_torch(CustomModel, CustomMetaData()) model = CustomPhysicsNeMoModel(in_features=32, out_features=8).to(device) @@ -121,6 +123,8 @@ def setup_model(): def test_from_torch_checkpoint(device): """Test checkpoint save/load from PyTorch""" + torch.manual_seed(0) + random.seed(0) # Construct CustomPhysicsNeMoModel CustomPhysicsNeMoModel = Module.from_torch( CustomModel, CustomMetaData(), register=True @@ -139,6 +143,8 @@ def test_from_torch_checkpoint(device): @common.check_ort_version() def test_from_torch_deploy(device): """Test deployment support from PyTorch""" + torch.manual_seed(0) + random.seed(0) # Construct CustomPhysicsNeMoModel CustomPhysicsNeMoModel = Module.from_torch(CustomModel, CustomMetaData()) model = CustomPhysicsNeMoModel(in_features=4, out_features=4).to(device) diff --git a/test/datapipes/test_domino_datapipe.py b/test/datapipes/test_domino_datapipe.py index 039f16596e..f462cd05d6 100644 --- a/test/datapipes/test_domino_datapipe.py +++ b/test/datapipes/test_domino_datapipe.py @@ -96,6 +96,10 @@ def synthetic_domino_data( ): """Generate synthetic domino data and save to temporary directory structure using zarr.""" + import numpy as np + + np.random.seed(0) + # Create temporary directory temp_dir = Path(tempfile.mkdtemp()) diff --git a/test/domain_parallel/test_initialization.py b/test/domain_parallel/test_initialization.py index a62e53ac5b..39bf5b8544 100644 --- a/test/domain_parallel/test_initialization.py +++ b/test/domain_parallel/test_initialization.py @@ -161,6 +161,9 @@ def shard_tensor_initialization_from_local_chunks_worker(mesh): dm = DistributedManager() + random.seed(0) + torch.manual_seed(0) + # Create a mesh right from the inputs: global_shape, placements = init_global_shape_and_placements( mesh, diff --git a/test/domain_parallel/test_reductions.py b/test/domain_parallel/test_reductions.py index 09fcb1ee9b..381461bf63 100644 --- a/test/domain_parallel/test_reductions.py +++ b/test/domain_parallel/test_reductions.py @@ -95,6 +95,7 @@ def test_shard_tensor_reduction( ): dm = DistributedManager() + torch.manual_seed(0) # Create a random-valued tensor of at least rank 3: full_input = torch.randn(2, 128, 2, requires_grad=backward).to(dm.device) diff --git a/test/mesh/utilities/test_cache.py b/test/mesh/utilities/test_cache.py index ae735acc8e..5f9ba89ff6 100644 --- a/test/mesh/utilities/test_cache.py +++ b/test/mesh/utilities/test_cache.py @@ -41,7 +41,6 @@ def test_get_cached_returns_none_when_not_set(self): def test_get_cached_returns_none_for_missing_key(self): """Test that get_cached returns None for missing key in existing cache.""" data = TensorDict({}, batch_size=[10]) - torch.manual_seed(0) data["_cache"] = TensorDict({"centroids": torch.randn(10, 3)}, batch_size=[10]) result = get_cached(data, "areas") @@ -51,7 +50,6 @@ def test_get_cached_returns_none_for_missing_key(self): def test_get_cached_returns_value_when_set(self): """Test that get_cached returns the cached value when present.""" data = TensorDict({}, batch_size=[10]) - torch.manual_seed(0) cached_value = torch.randn(10, 3) data["_cache"] = TensorDict({"centroids": cached_value}, batch_size=[10]) @@ -75,7 +73,6 @@ class TestSetCached: def test_set_cached_creates_cache_if_missing(self): """Test that set_cached creates _cache TensorDict if not present.""" data = TensorDict({}, batch_size=[10]) - torch.manual_seed(0) value = torch.randn(10, 3) set_cached(data, "centroids", value) @@ -86,7 +83,6 @@ def test_set_cached_creates_cache_if_missing(self): def test_set_cached_stores_value(self): """Test that set_cached stores the value correctly.""" data = TensorDict({}, batch_size=[10]) - torch.manual_seed(0) value = torch.randn(10, 3) set_cached(data, "areas", value) @@ -97,7 +93,6 @@ def test_set_cached_stores_value(self): def test_set_cached_overwrites_existing(self): """Test that set_cached overwrites existing cached value.""" data = TensorDict({}, batch_size=[10]) - torch.manual_seed(0) old_value = torch.randn(10, 3) new_value = torch.randn(10, 3) @@ -111,7 +106,6 @@ def test_set_cached_overwrites_existing(self): def test_set_cached_multiple_keys(self): """Test that set_cached can store multiple keys.""" data = TensorDict({}, batch_size=[10]) - torch.manual_seed(0) centroids = torch.randn(10, 3) areas = torch.randn(10) normals = torch.randn(10, 3) @@ -142,7 +136,6 @@ def test_roundtrip_scalar(self): def test_roundtrip_1d(self): """Test round-trip with 1D tensor.""" data = TensorDict({}, batch_size=[10]) - torch.manual_seed(0) value = torch.randn(10) set_cached(data, "areas", value) @@ -154,7 +147,6 @@ def test_roundtrip_1d(self): def test_roundtrip_2d(self): """Test round-trip with 2D tensor.""" data = TensorDict({}, batch_size=[10]) - torch.manual_seed(0) value = torch.randn(10, 3) set_cached(data, "centroids", value) @@ -166,7 +158,6 @@ def test_roundtrip_2d(self): def test_roundtrip_3d(self): """Test round-trip with 3D tensor.""" data = TensorDict({}, batch_size=[10]) - torch.manual_seed(0) value = torch.randn(10, 3, 3) set_cached(data, "stress", value) @@ -181,7 +172,6 @@ class TestCacheWithExistingData: def test_cache_does_not_affect_existing_data(self): """Test that caching doesn't affect existing non-cache data.""" - torch.manual_seed(0) data = TensorDict({"temperature": torch.randn(10)}, batch_size=[10]) original_temp = data["temperature"].clone() @@ -191,7 +181,6 @@ def test_cache_does_not_affect_existing_data(self): def test_get_cached_ignores_non_cache_keys(self): """Test that get_cached only looks in _cache namespace.""" - torch.manual_seed(0) data = TensorDict({"areas": torch.randn(10)}, batch_size=[10]) # Even though "areas" exists at top level, get_cached looks in _cache @@ -201,7 +190,6 @@ def test_get_cached_ignores_non_cache_keys(self): def test_cache_coexists_with_data(self): """Test that cache and regular data coexist.""" - torch.manual_seed(0) data = TensorDict( { "temperature": torch.randn(10), @@ -224,7 +212,6 @@ class TestCacheDevices: def test_cache_cpu(self): """Test caching on CPU TensorDict.""" data = TensorDict({}, batch_size=[10], device="cpu") - torch.manual_seed(0) value = torch.randn(10, 3, device="cpu") set_cached(data, "centroids", value) @@ -237,7 +224,6 @@ def test_cache_cpu(self): def test_cache_cuda(self): """Test caching on CUDA TensorDict.""" data = TensorDict({}, batch_size=[10], device="cuda") - torch.manual_seed(0) value = torch.randn(10, 3, device="cuda") set_cached(data, "centroids", value) diff --git a/test/metrics/test_metrics_climate.py b/test/metrics/test_metrics_climate.py index 347b043d3c..6220036be9 100644 --- a/test/metrics/test_metrics_climate.py +++ b/test/metrics/test_metrics_climate.py @@ -105,7 +105,6 @@ def test_climate_acc_mse(test_data, device, rtol: float = 1e-3, atol: float = 1e def test_climate_reductions(test_data, device, rtol: float = 1e-3, atol: float = 1e-3): - torch.manual_seed(0) channels, lon, lat, pred_tensor_np, targ_tensor_np, time_means = test_data pred_tensor = torch.from_numpy(pred_tensor_np).expand(channels, -1, -1).to(device) lat = torch.from_numpy(lat).to(device) diff --git a/test/models/dlwp_healpix/test_healpix_unet_model.py b/test/models/dlwp_healpix/test_healpix_unet_model.py index 50c4651307..fb0ac5d2c1 100644 --- a/test/models/dlwp_healpix/test_healpix_unet_model.py +++ b/test/models/dlwp_healpix/test_healpix_unet_model.py @@ -90,6 +90,7 @@ def test_data(): def generate_test_data( batch_size=8, time_dim=1, channels=7, img_size=16, device="cpu" ): + torch.manual_seed(42) test_data = torch.randn(batch_size, 12, time_dim, channels, img_size, img_size) return test_data.to(device) @@ -101,6 +102,7 @@ def generate_test_data( def constant_data(): # create dummy data def generate_constant_data(channels=2, img_size=16, device="cpu"): + torch.manual_seed(42) constants = torch.randn(12, channels, img_size, img_size) return constants.to(device) @@ -112,6 +114,7 @@ def generate_constant_data(channels=2, img_size=16, device="cpu"): def insolation_data(): # create dummy data def generate_insolation_data(batch_size=8, time_dim=1, img_size=16, device="cpu"): + torch.manual_seed(42) insolation = torch.randn(batch_size, 12, time_dim, 1, img_size, img_size) return insolation.to(device) @@ -232,6 +235,7 @@ def test_HEALPixUNet_initialize( def test_HEALPixUNet_integration_steps( device, unet_encoder_dict, unet_decoder_dict, pytestconfig ): + fix_random_seeds(seed=42) in_channels = 2 out_channels = 2 n_constants = 1 diff --git a/test/models/graphcast/test_graphcast.py b/test/models/graphcast/test_graphcast.py index 20fefb0ad1..577495a2e3 100644 --- a/test/models/graphcast/test_graphcast.py +++ b/test/models/graphcast/test_graphcast.py @@ -364,6 +364,8 @@ def test_graphcast_checkpoint( from physicsnemo.models.graphcast.graph_cast_net import GraphCastNet + fix_random_seeds() + model_kwds = { "mesh_level": 1, "input_res": (res_h, res_w), @@ -405,6 +407,8 @@ def test_graphcast_checkpoint_te( from physicsnemo.models.graphcast.graph_cast_net import GraphCastNet + fix_random_seeds() + device = "cuda:0" model_kwds = { @@ -455,6 +459,8 @@ def test_GraphCast_deploy( from physicsnemo.models.graphcast.graph_cast_net import GraphCastNet + fix_random_seeds() + model_kwds = { "mesh_level": 1, "input_res": (res_h, res_w), @@ -493,6 +499,8 @@ def test_GraphCast_deploy_te( from physicsnemo.models.graphcast.graph_cast_net import GraphCastNet + fix_random_seeds() + device = "cuda:0" model_kwds = { diff --git a/test/models/meshgraphnet/test_meshgraphnet.py b/test/models/meshgraphnet/test_meshgraphnet.py index 7e9006abac..709ef9ca0c 100644 --- a/test/models/meshgraphnet/test_meshgraphnet.py +++ b/test/models/meshgraphnet/test_meshgraphnet.py @@ -37,6 +37,9 @@ def test_meshgraphnet_forward(device, pytestconfig, set_physicsnemo_force_te): torch.manual_seed(0) np.random.seed(0) + import random + + random.seed(0) # Construct MGN model model = MeshGraphNet( input_dim_nodes=4, @@ -73,6 +76,9 @@ def test_mehsgraphnet_constructor(device, pytestconfig, set_physicsnemo_force_te """Test mehsgraphnet constructor options""" import torch_geometric as pyg + random.seed(0) + torch.manual_seed(0) + # Define dictionary of constructor args arg_list = [ { @@ -136,6 +142,8 @@ def test_meshgraphnet_optims(device, pytestconfig, set_physicsnemo_force_te): def setup_model(): """Set up fresh model and inputs for each optim test""" + torch.manual_seed(0) + random.seed(0) # Construct MGN model model = MeshGraphNet( input_dim_nodes=2, @@ -219,6 +227,9 @@ def test_meshgraphnet_deploy(device, pytestconfig, set_physicsnemo_force_te): from physicsnemo.models.meshgraphnet import MeshGraphNet + torch.manual_seed(0) + random.seed(0) + # Construct MGN model model = MeshGraphNet( input_dim_nodes=4, diff --git a/test/nn/test_kan_layers.py b/test/nn/test_kan_layers.py index 1919ec8bd0..aa2d7a3c7b 100644 --- a/test/nn/test_kan_layers.py +++ b/test/nn/test_kan_layers.py @@ -33,6 +33,7 @@ def test_kan_initialization(device): @pytest.mark.parametrize("bias_flag", [True, False]) def test_kan_forward_pass(device, bias_flag): + torch.manual_seed(0) batch, in_dim, out_dim = 8, 5, 2 kan = KolmogorovArnoldNetwork( in_dim, out_dim, num_harmonics=4, add_bias=bias_flag diff --git a/test/nn/test_layer_norm.py b/test/nn/test_layer_norm.py index afc205e3b1..19d22ef213 100644 --- a/test/nn/test_layer_norm.py +++ b/test/nn/test_layer_norm.py @@ -157,6 +157,7 @@ def forward(self, x): ln = FakeModel().cuda() print(ln.state_dict().keys()) + torch.manual_seed(0) x = torch.randn(2, 8).cuda() y = ln(x) print(f"Y shape: {y.shape}") diff --git a/test/nn/test_layers_activations.py b/test/nn/test_layers_activations.py index 9c6d24fffc..11abecfdd5 100644 --- a/test/nn/test_layers_activations.py +++ b/test/nn/test_layers_activations.py @@ -31,6 +31,8 @@ def test_activation_identity(device): """Test identity function in physicsnemo.nn""" + torch.manual_seed(0) + random.seed(0) func = Identity().to(device) # Random tensor of random size tensor_dim = random.randint(1, 5) @@ -43,6 +45,8 @@ def test_activation_identity(device): def test_activation_stan(device): """Test Stan function in physicsnemo.nn""" + torch.manual_seed(0) + random.seed(0) func = Stan(out_features=2).to(device) # Doc string example handles accuracy bsize = random.randint(1, 8) @@ -64,6 +68,8 @@ def test_activation_stan(device): def test_activation_squareplus(device): """Test square plus function in physicsnemo.nn""" + torch.manual_seed(0) + random.seed(0) func = SquarePlus().to(device) func.b = 0 # Ones tensor of random size @@ -77,6 +83,8 @@ def test_activation_squareplus(device): def test_activation_capped_leaky_relu(device): """Test capped_gelu function in physicsnemo.nn""" + torch.manual_seed(0) + random.seed(0) func = CappedLeakyReLU(cap_value=1.0).to(device) leaky_relu_func = torch.nn.LeakyReLU() @@ -101,6 +109,8 @@ def test_activation_capped_leaky_relu(device): def test_activation_capped_gelu(device): """Test capped_gelu function in physicsnemo.nn""" + torch.manual_seed(0) + random.seed(0) func = CappedGELU(cap_value=1.0).to(device) gelu_func = torch.nn.GELU() diff --git a/test/nn/test_layers_dgm.py b/test/nn/test_layers_dgm.py index 017fb6e7cb..9f5bf8efd8 100644 --- a/test/nn/test_layers_dgm.py +++ b/test/nn/test_layers_dgm.py @@ -33,6 +33,7 @@ def test_dgm_layer_forward_pass(device): def test_dgm_layer_parameters_update(device): + torch.manual_seed(0) input_tensor_1 = torch.Tensor([[1, 1]]).to(device) input_tensor_2 = torch.Tensor([[2, 2]]).to(device) layer = DGMLayer(2, 2, 2).to(device) diff --git a/test/nn/test_nd_conv_layers.py b/test/nn/test_nd_conv_layers.py index b68c3b95a6..0d885a1a75 100644 --- a/test/nn/test_nd_conv_layers.py +++ b/test/nn/test_nd_conv_layers.py @@ -228,6 +228,8 @@ def forward(self, x): def test_conv_nd(device, dimension): """compare output of ConvNdKernel1Layer with that of layer for specfic n_dim""" + torch.manual_seed(0) + random.seed(0) bsize = 2 in_channels = 4 out_channels = 2 diff --git a/test/utils/test_mesh_utils.py b/test/utils/test_mesh_utils.py index b6e6a87fc5..d40e509ae4 100644 --- a/test/utils/test_mesh_utils.py +++ b/test/utils/test_mesh_utils.py @@ -73,6 +73,7 @@ def _create_random_vtp_mesh(num_points: int, num_triangles: int, dir: str) -> tu """ import random + random.seed(0) # make directory if it does not exist From 56fd60aa71979f0a1e62348377b2cfdee5cb2e35 Mon Sep 17 00:00:00 2001 From: Peter Sharpe Date: Tue, 6 Jan 2026 14:57:51 -0500 Subject: [PATCH 09/11] Refactors with Kelvin's better fix, which seeds every test automatically. --- test/active_learning/test_checkpointing.py | 4 --- test/active_learning/test_loop.py | 3 --- test/conftest.py | 25 +++++++++++++++++++ test/core/test_from_torch.py | 6 ----- test/datapipes/test_domino_datapipe.py | 4 --- test/datapipes/test_mesh_datapipe.py | 5 +--- test/domain_parallel/test_initialization.py | 3 --- test/domain_parallel/test_reductions.py | 1 - .../mesh/io/io_pyvista/test_error_handling.py | 1 - test/mesh/primitives/test_procedural_noise.py | 4 --- test/mesh/utilities/test_cache.py | 4 --- test/mesh/utilities/test_scatter_ops.py | 1 - test/metrics/test_metrics_climate.py | 1 - test/metrics/test_metrics_general.py | 10 -------- .../dlwp_healpix/test_healpix_unet_model.py | 4 --- test/models/graphcast/test_graphcast.py | 8 ------ test/models/meshgraphnet/test_meshgraphnet.py | 16 ------------ test/nn/test_kan_layers.py | 1 - test/nn/test_layer_norm.py | 1 - test/nn/test_layers_activations.py | 10 -------- test/nn/test_layers_dgm.py | 1 - test/nn/test_nd_conv_layers.py | 2 -- test/utils/test_checkpoint.py | 2 -- test/utils/test_mesh_utils.py | 4 --- 24 files changed, 26 insertions(+), 95 deletions(-) diff --git a/test/active_learning/test_checkpointing.py b/test/active_learning/test_checkpointing.py index 5dbbe177d2..5df0fe9db7 100644 --- a/test/active_learning/test_checkpointing.py +++ b/test/active_learning/test_checkpointing.py @@ -235,7 +235,6 @@ def test_queue_serialization_with_to_list( ) # Add items to queues - torch.manual_seed(0) test_data = MockDataStructure(inputs=torch.randn(16, 64)) driver.query_queue.put(test_data) driver.label_queue.put(test_data) @@ -279,7 +278,6 @@ def test_queue_serialization_fallback( ) # Add item to queue - torch.manual_seed(0) test_data = MockDataStructure(inputs=torch.randn(16, 64)) driver.query_queue.put(test_data) @@ -619,7 +617,6 @@ def test_checkpoint_preserves_model_weights( driver_config, mock_module, strategies_config, temp_checkpoint_dir ): """Test that model weights are correctly saved and loaded.""" - torch.manual_seed(0) driver = Driver( config=driver_config, learner=mock_module, @@ -757,7 +754,6 @@ def test_resume_from_query_phase_skips_earlier_phases( # Create checkpoint at query phase driver.active_learning_step_idx = 1 driver.current_phase = p.ActiveLearningPhase.QUERY - torch.manual_seed(0) driver.query_queue.put(MockDataStructure(inputs=torch.randn(16, 64))) checkpoint_path = temp_checkpoint_dir / "resume_query_test" diff --git a/test/active_learning/test_loop.py b/test/active_learning/test_loop.py index 40ec1d1ab0..4d17163d03 100644 --- a/test/active_learning/test_loop.py +++ b/test/active_learning/test_loop.py @@ -189,7 +189,6 @@ def test_call_without_train_step_raises_error(self, mock_module): def test_basic_training_loop_execution(self, mock_module): """Test basic execution of training loop with mocked components.""" - torch.manual_seed(0) # Create a mock train step that returns a loss with backward method mock_loss = MagicMock() mock_loss.detach.return_value.item.return_value = 0.5 @@ -230,7 +229,6 @@ def mock_train_step(model, batch, *args, **kwargs): def test_training_with_validation(self, mock_module): """Test training loop with validation step.""" - torch.manual_seed(0) # Create mock train step mock_loss = MagicMock() mock_loss.detach.return_value.item.return_value = 0.5 @@ -281,7 +279,6 @@ def mock_train_step(model, batch, *args, **kwargs): def test_training_with_lr_scheduler(self, mock_module): """Test training loop with learning rate scheduler.""" - torch.manual_seed(0) mock_loss = MagicMock() mock_loss.detach.return_value.item.return_value = 0.5 mock_loss.backward = MagicMock() diff --git a/test/conftest.py b/test/conftest.py index 60b9dfa9b4..ec03bfc6c6 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -17,8 +17,10 @@ import importlib import os import pathlib +import random from collections import defaultdict +import numpy as np import pytest import torch @@ -171,3 +173,26 @@ def requires_module(names): def device(request): """Device fixture that automatically skips CUDA tests when not available.""" return request.param + + +@pytest.fixture(autouse=True, scope="function") +def seed_random_state(): + """Reset all random number generators to a fixed seed before each test. + + This ensures test reproducibility and isolation - each test starts with + identical RNG state regardless of test execution order or subset. + + Tests that need a specific seed can still call torch.manual_seed() etc. + explicitly, which will override this fixture's seeding. + """ + SEED = 95051 + + random.seed(SEED) + np.random.seed(SEED) + torch.manual_seed(SEED) + + # CUDA seeding (no-op if CUDA unavailable) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(SEED) + + yield diff --git a/test/core/test_from_torch.py b/test/core/test_from_torch.py index f7a269e840..ec38a3e81d 100644 --- a/test/core/test_from_torch.py +++ b/test/core/test_from_torch.py @@ -87,8 +87,6 @@ def test_from_torch_optims(device): def setup_model(): """Set up fresh model and inputs for each optim test""" # Construct CustomPhysicsNeMoModel - torch.manual_seed(0) - random.seed(0) CustomPhysicsNeMoModel = Module.from_torch(CustomModel, CustomMetaData()) model = CustomPhysicsNeMoModel(in_features=32, out_features=8).to(device) @@ -123,8 +121,6 @@ def setup_model(): def test_from_torch_checkpoint(device): """Test checkpoint save/load from PyTorch""" - torch.manual_seed(0) - random.seed(0) # Construct CustomPhysicsNeMoModel CustomPhysicsNeMoModel = Module.from_torch( CustomModel, CustomMetaData(), register=True @@ -143,8 +139,6 @@ def test_from_torch_checkpoint(device): @common.check_ort_version() def test_from_torch_deploy(device): """Test deployment support from PyTorch""" - torch.manual_seed(0) - random.seed(0) # Construct CustomPhysicsNeMoModel CustomPhysicsNeMoModel = Module.from_torch(CustomModel, CustomMetaData()) model = CustomPhysicsNeMoModel(in_features=4, out_features=4).to(device) diff --git a/test/datapipes/test_domino_datapipe.py b/test/datapipes/test_domino_datapipe.py index f462cd05d6..039f16596e 100644 --- a/test/datapipes/test_domino_datapipe.py +++ b/test/datapipes/test_domino_datapipe.py @@ -96,10 +96,6 @@ def synthetic_domino_data( ): """Generate synthetic domino data and save to temporary directory structure using zarr.""" - import numpy as np - - np.random.seed(0) - # Create temporary directory temp_dir = Path(tempfile.mkdtemp()) diff --git a/test/datapipes/test_mesh_datapipe.py b/test/datapipes/test_mesh_datapipe.py index 4f52cab32c..86cf6eeac0 100644 --- a/test/datapipes/test_mesh_datapipe.py +++ b/test/datapipes/test_mesh_datapipe.py @@ -15,6 +15,7 @@ # limitations under the License. import os +import random import pytest @@ -50,10 +51,6 @@ def _create_random_vtp_vtu_mesh( tuple: A tuple containing the random VTP mesh (vtk.vtkPolyData) and the random VTU mesh (vtk.vtkUnstructuredGrid). """ - import random - - random.seed(0) - # make directory if it does not exist os.makedirs(dir, exist_ok=True) diff --git a/test/domain_parallel/test_initialization.py b/test/domain_parallel/test_initialization.py index 39bf5b8544..a62e53ac5b 100644 --- a/test/domain_parallel/test_initialization.py +++ b/test/domain_parallel/test_initialization.py @@ -161,9 +161,6 @@ def shard_tensor_initialization_from_local_chunks_worker(mesh): dm = DistributedManager() - random.seed(0) - torch.manual_seed(0) - # Create a mesh right from the inputs: global_shape, placements = init_global_shape_and_placements( mesh, diff --git a/test/domain_parallel/test_reductions.py b/test/domain_parallel/test_reductions.py index 381461bf63..09fcb1ee9b 100644 --- a/test/domain_parallel/test_reductions.py +++ b/test/domain_parallel/test_reductions.py @@ -95,7 +95,6 @@ def test_shard_tensor_reduction( ): dm = DistributedManager() - torch.manual_seed(0) # Create a random-valued tensor of at least rank 3: full_input = torch.randn(2, 128, 2, requires_grad=backward).to(dm.device) diff --git a/test/mesh/io/io_pyvista/test_error_handling.py b/test/mesh/io/io_pyvista/test_error_handling.py index 7a86f7f44a..83c6a46ac6 100644 --- a/test/mesh/io/io_pyvista/test_error_handling.py +++ b/test/mesh/io/io_pyvista/test_error_handling.py @@ -214,7 +214,6 @@ class TestToPyvistaErrors: def test_unsupported_manifold_dims_raises(self): """Test that unsupported manifold dimensions raise ValueError.""" # Create a mesh with 4 manifold dims (not supported by PyVista) - torch.manual_seed(0) points = torch.randn(10, 5) # 5D spatial cells = torch.randint(0, 10, (5, 5)) # 4-manifold (5 vertices per cell) mesh = Mesh(points=points, cells=cells) diff --git a/test/mesh/primitives/test_procedural_noise.py b/test/mesh/primitives/test_procedural_noise.py index 681ad9684b..fb1d8d29c9 100644 --- a/test/mesh/primitives/test_procedural_noise.py +++ b/test/mesh/primitives/test_procedural_noise.py @@ -34,7 +34,6 @@ class TestPerlinNoiseND: def test_dimension_agnostic(self, n_dims): """Test that Perlin noise works for any number of dimensions.""" n_points = 50 - torch.manual_seed(0) points = torch.randn(n_points, n_dims) noise = perlin_noise_nd(points, scale=1.0, seed=42) @@ -51,7 +50,6 @@ def test_dimension_agnostic(self, n_dims): @pytest.mark.parametrize("seed", [0, 42, 123, 999]) def test_reproducibility(self, seed): """Test that same seed produces same output.""" - torch.manual_seed(0) points = torch.randn(100, 3) noise1 = perlin_noise_nd(points, scale=1.0, seed=seed) @@ -61,7 +59,6 @@ def test_reproducibility(self, seed): def test_different_seeds_produce_different_output(self): """Test that different seeds produce different noise patterns.""" - torch.manual_seed(0) points = torch.randn(100, 3) noise1 = perlin_noise_nd(points, scale=1.0, seed=42) @@ -76,7 +73,6 @@ def test_different_seeds_produce_different_output(self): @pytest.mark.parametrize("scale", [0.1, 0.5, 1.0, 2.0, 5.0]) def test_scale_parameter(self, scale): """Test that scale parameter affects noise frequency.""" - torch.manual_seed(0) points = torch.randn(100, 3) noise = perlin_noise_nd(points, scale=scale, seed=42) diff --git a/test/mesh/utilities/test_cache.py b/test/mesh/utilities/test_cache.py index 5f9ba89ff6..9225969fda 100644 --- a/test/mesh/utilities/test_cache.py +++ b/test/mesh/utilities/test_cache.py @@ -242,7 +242,6 @@ class TestCacheDtypes: def test_cache_various_dtypes(self, dtype): """Test caching with various dtypes.""" data = TensorDict({}, batch_size=[10]) - torch.manual_seed(0) if dtype in [torch.float32, torch.float64]: value = torch.randn(10, dtype=dtype) else: @@ -268,7 +267,6 @@ def test_cell_data_cache_pattern(self): assert cached_areas is None # Compute and cache - torch.manual_seed(0) computed_areas = torch.randn(100) set_cached(cell_data, "areas", computed_areas) @@ -283,7 +281,6 @@ def test_point_data_cache_pattern(self): point_data = TensorDict({}, batch_size=[500]) # Cache point normals - torch.manual_seed(0) normals = torch.randn(500, 3) set_cached(point_data, "normals", normals) @@ -297,7 +294,6 @@ def test_multiple_caches_pattern(self): cell_data = TensorDict({}, batch_size=[100]) # Cache multiple properties - torch.manual_seed(0) set_cached(cell_data, "centroids", torch.randn(100, 3)) set_cached(cell_data, "areas", torch.randn(100)) set_cached(cell_data, "normals", torch.randn(100, 3)) diff --git a/test/mesh/utilities/test_scatter_ops.py b/test/mesh/utilities/test_scatter_ops.py index 6be139de00..0915a2e150 100644 --- a/test/mesh/utilities/test_scatter_ops.py +++ b/test/mesh/utilities/test_scatter_ops.py @@ -309,7 +309,6 @@ def test_all_aggregation_modes(self, aggregation): @pytest.mark.parametrize("n_dst", [1, 2, 5, 10]) def test_various_n_dst(self, n_dst): """Test with various destination counts.""" - torch.manual_seed(0) src_data = torch.randn(20) src_to_dst = torch.randint(0, n_dst, (20,)) diff --git a/test/metrics/test_metrics_climate.py b/test/metrics/test_metrics_climate.py index 6220036be9..1ccb359d2c 100644 --- a/test/metrics/test_metrics_climate.py +++ b/test/metrics/test_metrics_climate.py @@ -247,7 +247,6 @@ def test_climate_reductions(test_data, device, rtol: float = 1e-3, atol: float = def test_climate_efi(test_data, device, rtol: float = 1e-1, atol: float = 1e-1): - torch.manual_seed(0) one = torch.ones((1, 1), dtype=torch.float32, device=device) bin_edges = hist.linspace(-10 * one, 10 * one, 30) bin_mids = 0.5 * bin_edges[1:] + 0.5 * bin_edges[:-1] diff --git a/test/metrics/test_metrics_general.py b/test/metrics/test_metrics_general.py index 63d37e0433..6e9559c9a1 100644 --- a/test/metrics/test_metrics_general.py +++ b/test/metrics/test_metrics_general.py @@ -66,8 +66,6 @@ def get_disagreements(inputs, bins, counts, test): @pytest.mark.parametrize("input_shape", [(1, 72, 144)]) def test_histogram(device, input_shape, rtol: float = 1e-3, atol: float = 1e-3): - torch.manual_seed(0) - np.random.seed(0) x = torch.randn([10, *input_shape], device=device) y = torch.randn([5, *input_shape], device=device) @@ -226,7 +224,6 @@ def fair_crps(pred, obs, dim=-1): def test_fair_crps_greater_than_zero(device): - torch.manual_seed(0) pred = torch.randn(5, 10, device=device) obs = torch.tensor([0.0, 0.0, 0.0, 0.0, 0.0], device=device) assert torch.all(fair_crps(pred, obs, dim=-1) > 0) @@ -236,7 +233,6 @@ def test_fair_crps_is_fair(device): # fair means that a random prediction should outperform a non-random one on average # This is not always true of ``crps``...try replacing fair_crps function # below with ``crps`` - torch.manual_seed(0) n = 256 random_pred = torch.randn(n, 2, device=device) cheating_pred = torch.zeros((n, 2), device=device) @@ -345,7 +341,6 @@ def test_crps_finite(device, num, biased): def test_crps(device, rtol: float = 1e-3, atol: float = 1e-3): # Uses eq (5) from Gneiting et al. https://doi.org/10.1175/MWR2904.1 # crps(N(0, 1), 0.0) = 2 / sqrt(2*pi) - 1/sqrt(pi) ~= 0.23... - torch.manual_seed(0) x = torch.randn((1_000_000, 1), device=device, dtype=torch.float32) y = torch.zeros((1,), device=device, dtype=torch.float32) @@ -531,7 +526,6 @@ def test_crps(device, rtol: float = 1e-3, atol: float = 1e-3): @pytest.mark.parametrize("mean", [3.0]) @pytest.mark.parametrize("variance", [0.1]) def test_wasserstein(device, mean, variance, rtol: float = 1e-3, atol: float = 1e-3): - torch.manual_seed(0) mean = torch.as_tensor([mean], device=device, dtype=torch.float32) variance = torch.as_tensor([variance], device=device, dtype=torch.float32) @@ -602,7 +596,6 @@ def test_means_var(device, rtol: float = 1e-3, atol: float = 1e-3): if not torch.cuda.is_available(): pytest.skip("CUDA required for this test.") - torch.manual_seed(0) DistributedManager._shared_state = {} if (device == "cuda:0") and (not DistributedManager.is_initialized()): os.environ["MASTER_ADDR"] = "localhost" @@ -704,8 +697,6 @@ def test_means_var(device, rtol: float = 1e-3, atol: float = 1e-3): def test_calibration(device, rtol: float = 1e-2, atol: float = 1e-2): - torch.manual_seed(0) - np.random.seed(0) x = torch.randn((10_000, 30, 30), device=device, dtype=torch.float32) y = torch.randn((30, 30), device=device, dtype=torch.float32) @@ -775,7 +766,6 @@ def test_calibration(device, rtol: float = 1e-2, atol: float = 1e-2): def test_entropy(device, rtol: float = 1e-2, atol: float = 1e-2): - torch.manual_seed(0) one = torch.ones([1], device=device, dtype=torch.float32) x = torch.randn((50_000, 10, 10), device=device, dtype=torch.float32) diff --git a/test/models/dlwp_healpix/test_healpix_unet_model.py b/test/models/dlwp_healpix/test_healpix_unet_model.py index fb0ac5d2c1..50c4651307 100644 --- a/test/models/dlwp_healpix/test_healpix_unet_model.py +++ b/test/models/dlwp_healpix/test_healpix_unet_model.py @@ -90,7 +90,6 @@ def test_data(): def generate_test_data( batch_size=8, time_dim=1, channels=7, img_size=16, device="cpu" ): - torch.manual_seed(42) test_data = torch.randn(batch_size, 12, time_dim, channels, img_size, img_size) return test_data.to(device) @@ -102,7 +101,6 @@ def generate_test_data( def constant_data(): # create dummy data def generate_constant_data(channels=2, img_size=16, device="cpu"): - torch.manual_seed(42) constants = torch.randn(12, channels, img_size, img_size) return constants.to(device) @@ -114,7 +112,6 @@ def generate_constant_data(channels=2, img_size=16, device="cpu"): def insolation_data(): # create dummy data def generate_insolation_data(batch_size=8, time_dim=1, img_size=16, device="cpu"): - torch.manual_seed(42) insolation = torch.randn(batch_size, 12, time_dim, 1, img_size, img_size) return insolation.to(device) @@ -235,7 +232,6 @@ def test_HEALPixUNet_initialize( def test_HEALPixUNet_integration_steps( device, unet_encoder_dict, unet_decoder_dict, pytestconfig ): - fix_random_seeds(seed=42) in_channels = 2 out_channels = 2 n_constants = 1 diff --git a/test/models/graphcast/test_graphcast.py b/test/models/graphcast/test_graphcast.py index 577495a2e3..20fefb0ad1 100644 --- a/test/models/graphcast/test_graphcast.py +++ b/test/models/graphcast/test_graphcast.py @@ -364,8 +364,6 @@ def test_graphcast_checkpoint( from physicsnemo.models.graphcast.graph_cast_net import GraphCastNet - fix_random_seeds() - model_kwds = { "mesh_level": 1, "input_res": (res_h, res_w), @@ -407,8 +405,6 @@ def test_graphcast_checkpoint_te( from physicsnemo.models.graphcast.graph_cast_net import GraphCastNet - fix_random_seeds() - device = "cuda:0" model_kwds = { @@ -459,8 +455,6 @@ def test_GraphCast_deploy( from physicsnemo.models.graphcast.graph_cast_net import GraphCastNet - fix_random_seeds() - model_kwds = { "mesh_level": 1, "input_res": (res_h, res_w), @@ -499,8 +493,6 @@ def test_GraphCast_deploy_te( from physicsnemo.models.graphcast.graph_cast_net import GraphCastNet - fix_random_seeds() - device = "cuda:0" model_kwds = { diff --git a/test/models/meshgraphnet/test_meshgraphnet.py b/test/models/meshgraphnet/test_meshgraphnet.py index 709ef9ca0c..17c299d3f7 100644 --- a/test/models/meshgraphnet/test_meshgraphnet.py +++ b/test/models/meshgraphnet/test_meshgraphnet.py @@ -37,9 +37,6 @@ def test_meshgraphnet_forward(device, pytestconfig, set_physicsnemo_force_te): torch.manual_seed(0) np.random.seed(0) - import random - - random.seed(0) # Construct MGN model model = MeshGraphNet( input_dim_nodes=4, @@ -76,9 +73,6 @@ def test_mehsgraphnet_constructor(device, pytestconfig, set_physicsnemo_force_te """Test mehsgraphnet constructor options""" import torch_geometric as pyg - random.seed(0) - torch.manual_seed(0) - # Define dictionary of constructor args arg_list = [ { @@ -142,8 +136,6 @@ def test_meshgraphnet_optims(device, pytestconfig, set_physicsnemo_force_te): def setup_model(): """Set up fresh model and inputs for each optim test""" - torch.manual_seed(0) - random.seed(0) # Construct MGN model model = MeshGraphNet( input_dim_nodes=2, @@ -182,11 +174,6 @@ def test_meshgraphnet_checkpoint(device, pytestconfig, set_physicsnemo_force_te) from physicsnemo.models.meshgraphnet import MeshGraphNet - # Seed for reproducibility - torch.manual_seed(0) - np.random.seed(0) - random.seed(0) - # Construct MGN model model_1 = MeshGraphNet( input_dim_nodes=4, @@ -227,9 +214,6 @@ def test_meshgraphnet_deploy(device, pytestconfig, set_physicsnemo_force_te): from physicsnemo.models.meshgraphnet import MeshGraphNet - torch.manual_seed(0) - random.seed(0) - # Construct MGN model model = MeshGraphNet( input_dim_nodes=4, diff --git a/test/nn/test_kan_layers.py b/test/nn/test_kan_layers.py index aa2d7a3c7b..1919ec8bd0 100644 --- a/test/nn/test_kan_layers.py +++ b/test/nn/test_kan_layers.py @@ -33,7 +33,6 @@ def test_kan_initialization(device): @pytest.mark.parametrize("bias_flag", [True, False]) def test_kan_forward_pass(device, bias_flag): - torch.manual_seed(0) batch, in_dim, out_dim = 8, 5, 2 kan = KolmogorovArnoldNetwork( in_dim, out_dim, num_harmonics=4, add_bias=bias_flag diff --git a/test/nn/test_layer_norm.py b/test/nn/test_layer_norm.py index 19d22ef213..afc205e3b1 100644 --- a/test/nn/test_layer_norm.py +++ b/test/nn/test_layer_norm.py @@ -157,7 +157,6 @@ def forward(self, x): ln = FakeModel().cuda() print(ln.state_dict().keys()) - torch.manual_seed(0) x = torch.randn(2, 8).cuda() y = ln(x) print(f"Y shape: {y.shape}") diff --git a/test/nn/test_layers_activations.py b/test/nn/test_layers_activations.py index 11abecfdd5..9c6d24fffc 100644 --- a/test/nn/test_layers_activations.py +++ b/test/nn/test_layers_activations.py @@ -31,8 +31,6 @@ def test_activation_identity(device): """Test identity function in physicsnemo.nn""" - torch.manual_seed(0) - random.seed(0) func = Identity().to(device) # Random tensor of random size tensor_dim = random.randint(1, 5) @@ -45,8 +43,6 @@ def test_activation_identity(device): def test_activation_stan(device): """Test Stan function in physicsnemo.nn""" - torch.manual_seed(0) - random.seed(0) func = Stan(out_features=2).to(device) # Doc string example handles accuracy bsize = random.randint(1, 8) @@ -68,8 +64,6 @@ def test_activation_stan(device): def test_activation_squareplus(device): """Test square plus function in physicsnemo.nn""" - torch.manual_seed(0) - random.seed(0) func = SquarePlus().to(device) func.b = 0 # Ones tensor of random size @@ -83,8 +77,6 @@ def test_activation_squareplus(device): def test_activation_capped_leaky_relu(device): """Test capped_gelu function in physicsnemo.nn""" - torch.manual_seed(0) - random.seed(0) func = CappedLeakyReLU(cap_value=1.0).to(device) leaky_relu_func = torch.nn.LeakyReLU() @@ -109,8 +101,6 @@ def test_activation_capped_leaky_relu(device): def test_activation_capped_gelu(device): """Test capped_gelu function in physicsnemo.nn""" - torch.manual_seed(0) - random.seed(0) func = CappedGELU(cap_value=1.0).to(device) gelu_func = torch.nn.GELU() diff --git a/test/nn/test_layers_dgm.py b/test/nn/test_layers_dgm.py index 9f5bf8efd8..017fb6e7cb 100644 --- a/test/nn/test_layers_dgm.py +++ b/test/nn/test_layers_dgm.py @@ -33,7 +33,6 @@ def test_dgm_layer_forward_pass(device): def test_dgm_layer_parameters_update(device): - torch.manual_seed(0) input_tensor_1 = torch.Tensor([[1, 1]]).to(device) input_tensor_2 = torch.Tensor([[2, 2]]).to(device) layer = DGMLayer(2, 2, 2).to(device) diff --git a/test/nn/test_nd_conv_layers.py b/test/nn/test_nd_conv_layers.py index 0d885a1a75..b68c3b95a6 100644 --- a/test/nn/test_nd_conv_layers.py +++ b/test/nn/test_nd_conv_layers.py @@ -228,8 +228,6 @@ def forward(self, x): def test_conv_nd(device, dimension): """compare output of ConvNdKernel1Layer with that of layer for specfic n_dim""" - torch.manual_seed(0) - random.seed(0) bsize = 2 in_channels = 4 out_channels = 2 diff --git a/test/utils/test_checkpoint.py b/test/utils/test_checkpoint.py index fcfa29e054..0136334ed4 100644 --- a/test/utils/test_checkpoint.py +++ b/test/utils/test_checkpoint.py @@ -100,7 +100,6 @@ def test_model_checkpointing( # Initialize DistributedManager first since save_checkpoint instantiates it DistributedManager.initialize() - torch.manual_seed(0) mlp_model_1 = model_generator(8).to(device) mlp_model_2 = model_generator(4).to(device) @@ -188,7 +187,6 @@ def test_compiled_model_checkpointing( from physicsnemo.utils import load_checkpoint, save_checkpoint - torch.manual_seed(0) # Create and compile a simple model in_feats = 4 base_model = FullyConnected( diff --git a/test/utils/test_mesh_utils.py b/test/utils/test_mesh_utils.py index d40e509ae4..9abae4b4e8 100644 --- a/test/utils/test_mesh_utils.py +++ b/test/utils/test_mesh_utils.py @@ -72,10 +72,6 @@ def _create_random_vtp_mesh(num_points: int, num_triangles: int, dir: str) -> tu tuple: A tuple containing the random VTP mesh (vtk.vtkPolyData). """ - import random - - random.seed(0) - # make directory if it does not exist os.makedirs(dir, exist_ok=True) From b8ffd7602089710ff9ce8632d77ca2f3b9db7444 Mon Sep 17 00:00:00 2001 From: Peter Sharpe Date: Tue, 6 Jan 2026 16:48:17 -0500 Subject: [PATCH 10/11] Remove np.random.seed(0) calls from various test files to ensure deterministic behavior is handled globally, improving test reliability. --- .../io/io_pyvista/test_data_array_shapes.py | 5 ----- .../io/io_pyvista/test_data_preservation.py | 3 --- test/mesh/io/io_pyvista/test_from_pyvista_0d.py | 3 --- test/mesh/io/io_pyvista/test_from_pyvista_1d.py | 1 - test/mesh/io/io_pyvista/test_round_trip.py | 3 --- test/mesh/io/io_pyvista/test_to_pyvista.py | 4 ---- test/mesh/visualization/test_visualization.py | 17 ----------------- 7 files changed, 36 deletions(-) diff --git a/test/mesh/io/io_pyvista/test_data_array_shapes.py b/test/mesh/io/io_pyvista/test_data_array_shapes.py index 504f8b9a52..937b3a2336 100644 --- a/test/mesh/io/io_pyvista/test_data_array_shapes.py +++ b/test/mesh/io/io_pyvista/test_data_array_shapes.py @@ -30,7 +30,6 @@ class TestDataArrayShapes: def test_scalar_data(self): """Test scalar data (1D array per point/cell).""" - np.random.seed(0) pv_mesh = pv.Sphere(radius=1.0, theta_resolution=10, phi_resolution=10) # Add scalar data @@ -53,7 +52,6 @@ def test_scalar_data(self): def test_vector_data(self): """Test vector data (Nx3 arrays).""" - np.random.seed(0) pv_mesh = pv.Sphere(radius=1.0, theta_resolution=10, phi_resolution=10) # Add vector data @@ -81,7 +79,6 @@ def test_matrix_data(self): For higher-dimensional data like 3x3 stress tensors, you must flatten them to (n, 9) before adding to PyVista. """ - np.random.seed(0) pv_mesh = pv.Sphere(radius=1.0, theta_resolution=10, phi_resolution=10) # For tensor data, must be pre-flattened to 2D @@ -113,7 +110,6 @@ def test_large_2d_array_data(self): NOTE: PyVista only accepts arrays with dimensionality ≤ 2. Higher-order tensors must be pre-flattened before adding to PyVista. """ - np.random.seed(0) pv_mesh = pv.Sphere(radius=1.0, theta_resolution=10, phi_resolution=10) # For higher-dimensional data, flatten to 2D before adding to PyVista @@ -137,7 +133,6 @@ def test_large_2d_array_data(self): def test_mixed_data_types(self): """Test mesh with multiple data arrays of different shapes and types.""" - np.random.seed(0) pv_mesh = pv.Sphere(radius=1.0, theta_resolution=10, phi_resolution=10) # Clear default data to have a clean slate diff --git a/test/mesh/io/io_pyvista/test_data_preservation.py b/test/mesh/io/io_pyvista/test_data_preservation.py index 8c0d1f8b6c..996c38fb83 100644 --- a/test/mesh/io/io_pyvista/test_data_preservation.py +++ b/test/mesh/io/io_pyvista/test_data_preservation.py @@ -49,7 +49,6 @@ class TestDataPreservation: def test_point_data_preserved(self): """Test that point_data is preserved during conversion.""" - np.random.seed(0) pv_mesh = pv.Sphere() # Explicitly create point data @@ -78,7 +77,6 @@ def test_point_data_preserved(self): def test_cell_data_preserved(self): """Test that cell_data is preserved as cell_data.""" - np.random.seed(0) pv_mesh = pv.Sphere() # Explicitly create cell data @@ -161,7 +159,6 @@ class TestDataPreservationParametrized: def test_data_preservation_with_device_transfer(self, device): """Test that data is preserved when transferring to different device.""" - np.random.seed(42) pv_mesh = pv.Sphere(theta_resolution=5, phi_resolution=5) pv_mesh.point_data["temp"] = np.random.rand(pv_mesh.n_points).astype(np.float32) pv_mesh.cell_data["pressure"] = np.random.rand(pv_mesh.n_cells).astype( diff --git a/test/mesh/io/io_pyvista/test_from_pyvista_0d.py b/test/mesh/io/io_pyvista/test_from_pyvista_0d.py index f49d5fa575..ed37d7de7f 100644 --- a/test/mesh/io/io_pyvista/test_from_pyvista_0d.py +++ b/test/mesh/io/io_pyvista/test_from_pyvista_0d.py @@ -30,7 +30,6 @@ class TestFromPyvista0D: def test_pointset_auto_detection(self): """Test automatic detection of 0D manifold from PointSet.""" - np.random.seed(0) points = np.random.rand(100, 3).astype(np.float32) pv_mesh = pv.PointSet(points) @@ -50,7 +49,6 @@ def test_pointset_auto_detection(self): def test_pointset_explicit_dim(self): """Test explicit manifold_dim specification for point cloud.""" - np.random.seed(0) points = np.random.rand(50, 3).astype(np.float32) pv_mesh = pv.PointSet(points) @@ -65,7 +63,6 @@ def test_polydata_points_only(self): PolyData can represent point clouds using vertex cells. """ - np.random.seed(0) points = np.random.rand(25, 3).astype(np.float32) pv_mesh = pv.PolyData(points) diff --git a/test/mesh/io/io_pyvista/test_from_pyvista_1d.py b/test/mesh/io/io_pyvista/test_from_pyvista_1d.py index 94fd5881d0..bdba5f5999 100644 --- a/test/mesh/io/io_pyvista/test_from_pyvista_1d.py +++ b/test/mesh/io/io_pyvista/test_from_pyvista_1d.py @@ -105,7 +105,6 @@ def test_spline_constructed(self): Create a spline through specific points and verify it converts correctly. """ - np.random.seed(0) # Create control points for the spline control_points = np.array( [ diff --git a/test/mesh/io/io_pyvista/test_round_trip.py b/test/mesh/io/io_pyvista/test_round_trip.py index c8654d810c..4ec5d7bce9 100644 --- a/test/mesh/io/io_pyvista/test_round_trip.py +++ b/test/mesh/io/io_pyvista/test_round_trip.py @@ -94,7 +94,6 @@ def test_round_trip_1d_spline(self): def test_round_trip_0d_pointset(self): """Test round-trip conversion for 0D mesh.""" - np.random.seed(0) points_orig = np.random.rand(25, 3).astype(np.float32) pv_original = pv.PointSet(points_orig) @@ -108,7 +107,6 @@ def test_round_trip_0d_pointset(self): def test_round_trip_with_data(self): """Test round-trip conversion preserves data arrays.""" - np.random.seed(0) pv_original = pv.Sphere(theta_resolution=10, phi_resolution=10) pv_original.clear_data() @@ -225,7 +223,6 @@ def test_round_trip_spline_device_parametrized(self, device): def test_device_transfer_preserves_data(self, device): """Test that device transfer preserves all data.""" - np.random.seed(42) # Create mesh with data pv_mesh = pv.Sphere(theta_resolution=5, phi_resolution=5) pv_mesh.point_data["temp"] = np.random.rand(pv_mesh.n_points).astype(np.float32) diff --git a/test/mesh/io/io_pyvista/test_to_pyvista.py b/test/mesh/io/io_pyvista/test_to_pyvista.py index a4f4784fa5..b6b66f6fb2 100644 --- a/test/mesh/io/io_pyvista/test_to_pyvista.py +++ b/test/mesh/io/io_pyvista/test_to_pyvista.py @@ -102,7 +102,6 @@ def test_1d_mesh_to_polydata(self): def test_0d_mesh_to_pointset(self): """Test converting 0D mesh to PointSet.""" - np.random.seed(0) points = torch.from_numpy(np.random.rand(50, 3).astype(np.float32)) cells = torch.empty((0, 1), dtype=torch.long) @@ -116,9 +115,6 @@ def test_0d_mesh_to_pointset(self): def test_data_preservation_to_pyvista(self): """Test that point_data, cell_data, and global_data are preserved.""" - np.random.seed(0) - torch.manual_seed(42) - # Create a mesh with data points = torch.rand(10, 3) cells = torch.tensor([[0, 1, 2], [2, 3, 4]], dtype=torch.long) diff --git a/test/mesh/visualization/test_visualization.py b/test/mesh/visualization/test_visualization.py index eddc3cd2b4..c38ab0e504 100644 --- a/test/mesh/visualization/test_visualization.py +++ b/test/mesh/visualization/test_visualization.py @@ -215,7 +215,6 @@ def test_pyvista_points_padded_to_3d(): def test_unsupported_spatial_dims(): """Test that meshes with >3 spatial dimensions raise error.""" - torch.manual_seed(42) # Create a 4D mesh points = torch.randn(10, 4) cells = torch.randint(0, 10, (5, 2)) @@ -241,7 +240,6 @@ def test_no_scalars(): def test_point_scalars_tensor(): """Test point scalars with direct tensor.""" - torch.manual_seed(42) mesh = create_2d_triangle_mesh() point_scalars = torch.rand(mesh.n_points) ax = mesh.draw(show=False, backend="matplotlib", point_scalars=point_scalars) @@ -251,7 +249,6 @@ def test_point_scalars_tensor(): def test_cell_scalars_tensor(): """Test cell scalars with direct tensor.""" - torch.manual_seed(42) mesh = create_2d_triangle_mesh() cell_scalars = torch.rand(mesh.n_cells) ax = mesh.draw(show=False, backend="matplotlib", cell_scalars=cell_scalars) @@ -261,7 +258,6 @@ def test_cell_scalars_tensor(): def test_point_scalars_key(): """Test point scalars with key lookup.""" - torch.manual_seed(42) mesh = create_2d_triangle_mesh() mesh.point_data["temperature"] = torch.rand(mesh.n_points) ax = mesh.draw(show=False, backend="matplotlib", point_scalars="temperature") @@ -271,7 +267,6 @@ def test_point_scalars_key(): def test_cell_scalars_key(): """Test cell scalars with key lookup.""" - torch.manual_seed(42) mesh = create_2d_triangle_mesh() mesh.cell_data["pressure"] = torch.rand(mesh.n_cells) ax = mesh.draw(show=False, backend="matplotlib", cell_scalars="pressure") @@ -283,7 +278,6 @@ def test_nested_tensordict_key(): """Test scalar lookup with nested TensorDict key.""" from tensordict import TensorDict - torch.manual_seed(42) mesh = create_2d_triangle_mesh() # Create nested structure @@ -300,7 +294,6 @@ def test_nested_tensordict_key(): def test_multidimensional_scalars_norm(): """Test that multidimensional scalars are L2-normed.""" - torch.manual_seed(42) mesh = create_2d_triangle_mesh() # Create 3D vector field @@ -313,7 +306,6 @@ def test_multidimensional_scalars_norm(): def test_mutual_exclusivity(): """Test that point_scalars and cell_scalars are mutually exclusive.""" - torch.manual_seed(42) mesh = create_2d_triangle_mesh() with pytest.raises(ValueError, match="mutually exclusive"): @@ -326,7 +318,6 @@ def test_mutual_exclusivity(): def test_scalar_wrong_shape(): """Test that scalars with wrong shape raise error.""" - torch.manual_seed(42) mesh = create_2d_triangle_mesh() with pytest.raises(ValueError, match="wrong first dimension"): @@ -350,7 +341,6 @@ def test_scalar_key_not_found(): def test_colormap(): """Test custom colormap.""" - torch.manual_seed(42) mesh = create_2d_triangle_mesh() mesh.cell_data["data"] = torch.rand(mesh.n_cells) @@ -361,7 +351,6 @@ def test_colormap(): def test_vmin_vmax(): """Test colormap range specification.""" - torch.manual_seed(42) mesh = create_2d_triangle_mesh() mesh.cell_data["data"] = torch.rand(mesh.n_cells) @@ -447,7 +436,6 @@ def test_draw_1d_in_2d(): def test_draw_empty_mesh(): """Test drawing mesh with no cells.""" - torch.manual_seed(42) points = torch.randn(10, 2) cells = torch.empty((0, 3), dtype=torch.long) mesh = Mesh(points=points, cells=cells) @@ -459,7 +447,6 @@ def test_draw_empty_mesh(): def test_pyvista_with_scalars(): """Test PyVista backend with scalar coloring.""" - torch.manual_seed(42) mesh = create_3d_surface_mesh() mesh.cell_data["pressure"] = torch.rand(mesh.n_cells) @@ -472,7 +459,6 @@ def test_pyvista_with_scalars(): def test_pyvista_with_point_scalars(): """Test PyVista backend with point scalar coloring.""" - torch.manual_seed(42) mesh = create_3d_surface_mesh() mesh.point_data["temperature"] = torch.rand(mesh.n_points) @@ -488,7 +474,6 @@ def test_pyvista_with_point_scalars(): def test_full_workflow_matplotlib(): """Test complete workflow with matplotlib backend.""" - torch.manual_seed(42) mesh = create_2d_triangle_mesh() # Add some data @@ -512,7 +497,6 @@ def test_full_workflow_matplotlib(): def test_full_workflow_pyvista(): """Test complete workflow with PyVista backend.""" - torch.manual_seed(42) mesh = create_3d_surface_mesh() # Add some data @@ -608,7 +592,6 @@ def test_basic_visualization_parametrized( @pytest.mark.parametrize("backend", ["matplotlib", "pyvista"]) def test_visualization_with_scalars_parametrized(self, backend): """Test visualization with scalar data across backends.""" - torch.manual_seed(42) if backend == "pyvista": # Use 3D mesh for PyVista points = torch.tensor([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]) From 7204fdf1e00b23ee91aa7a3eecd5b20d91680fed Mon Sep 17 00:00:00 2001 From: Peter Sharpe Date: Tue, 6 Jan 2026 16:48:41 -0500 Subject: [PATCH 11/11] Remove torch.manual_seed(0) calls from various test files to ensure deterministic behavior is handled globally, improving test reliability. --- test/models/domino/test_domino_encodings.py | 8 -------- test/models/domino/test_domino_geometry_rep.py | 6 ------ test/models/domino/test_domino_mlps.py | 4 ---- test/models/domino/test_domino_solutions.py | 8 -------- 4 files changed, 26 deletions(-) diff --git a/test/models/domino/test_domino_encodings.py b/test/models/domino/test_domino_encodings.py index f3a1485993..089d2401a1 100644 --- a/test/models/domino/test_domino_encodings.py +++ b/test/models/domino/test_domino_encodings.py @@ -28,8 +28,6 @@ def test_fourier_mlp(device, fourier_features, num_modes): """Test FourierMLP with various configurations""" from physicsnemo.nn import FourierMLP - torch.manual_seed(0) - model = FourierMLP( input_features=3, base_layer=64, @@ -48,8 +46,6 @@ def test_fourier_encode_vectorized(device): """Test fourier encoding function""" from physicsnemo.nn import fourier_encode - torch.manual_seed(0) - coords = torch.randn(4, 20, 3).to(device) freqs = torch.exp(torch.linspace(0, math.pi, 5)).to(device) @@ -66,8 +62,6 @@ def test_local_geometry_encoding(device): BATCH_SIZE = 1 - torch.manual_seed(0) - N_ENCODING_CHANNELS = 3 N_NEIGHBORS = 32 N_MESH_POINTS = 50 @@ -99,8 +93,6 @@ def test_multi_geometry_encoding(device, geo_encoding_type): from physicsnemo.models.domino.encodings import MultiGeometryEncoding from physicsnemo.models.domino.model import get_activation - torch.manual_seed(0) - BATCH_SIZE = 1 N_MESH_POINTS = 50 GRID_RESOLUTION = (32, 32, 32) diff --git a/test/models/domino/test_domino_geometry_rep.py b/test/models/domino/test_domino_geometry_rep.py index c1e2208059..c8502e9284 100644 --- a/test/models/domino/test_domino_geometry_rep.py +++ b/test/models/domino/test_domino_geometry_rep.py @@ -29,8 +29,6 @@ def test_geo_conv_out(device, act, fourier_features): """Test GeoConvOut layer""" from physicsnemo.models.domino.geometry_rep import GeoConvOut - torch.manual_seed(0) - @dataclass class TestParams: base_neurons: int = 32 @@ -71,8 +69,6 @@ def test_geo_processor(device, act): """Test GeoProcessor CNN""" from physicsnemo.models.domino.geometry_rep import GeoProcessor - torch.manual_seed(0) - @dataclass class TestParams: base_filters: int = 8 @@ -98,8 +94,6 @@ def test_geometry_rep( """Test GeometryRep module with different configurations""" from physicsnemo.models.domino.geometry_rep import GeometryRep - torch.manual_seed(0) - # Modify params for this test params = base_model_params() params.geometry_encoding_type = geometry_encoding_type diff --git a/test/models/domino/test_domino_mlps.py b/test/models/domino/test_domino_mlps.py index 0d22d35753..a7cb2f76bc 100644 --- a/test/models/domino/test_domino_mlps.py +++ b/test/models/domino/test_domino_mlps.py @@ -26,8 +26,6 @@ def test_aggregation_model(device, activation): from physicsnemo.models.domino.mlps import AggregationModel from physicsnemo.models.domino.model import get_activation - torch.manual_seed(0) - model = AggregationModel( input_features=100, output_features=1, @@ -47,8 +45,6 @@ def test_local_point_conv(device, activation): from physicsnemo.models.domino.mlps import LocalPointConv from physicsnemo.models.domino.model import get_activation - torch.manual_seed(0) - model = LocalPointConv( input_features=50, base_layer=128, diff --git a/test/models/domino/test_domino_solutions.py b/test/models/domino/test_domino_solutions.py index a6eba70a1b..2ac04281c6 100644 --- a/test/models/domino/test_domino_solutions.py +++ b/test/models/domino/test_domino_solutions.py @@ -32,8 +32,6 @@ def test_solution_calculator_volume( from physicsnemo.models.domino.solutions import SolutionCalculatorVolume from physicsnemo.nn import FourierMLP, get_activation - torch.manual_seed(0) - activation = get_activation("relu") # Create parameter model if needed @@ -116,8 +114,6 @@ def test_solution_calculator_surface( from physicsnemo.models.domino.solutions import SolutionCalculatorSurface from physicsnemo.nn import FourierMLP, get_activation - torch.manual_seed(0) - activation = get_activation("relu") # Determine input features based on surface configuration @@ -199,8 +195,6 @@ def test_sample_sphere(device, r, num_points): """Test sphere sampling function""" from physicsnemo.models.domino.solutions import sample_sphere - torch.manual_seed(0) - center = torch.randn(2, 30, 3).to(device) output = sample_sphere(center, r, num_points) @@ -215,8 +209,6 @@ def test_sample_sphere_shell(device): """Test spherical shell sampling function""" from physicsnemo.models.domino.solutions import sample_sphere_shell - torch.manual_seed(0) - center = torch.randn(2, 30, 3).to(device) r_inner, r_outer = 0.5, 1.5 num_points = 50