From 902b4d527e93f308e798fcc161c0c95c6e742592 Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Thu, 22 Jan 2026 11:22:57 -0800 Subject: [PATCH 01/22] Add feature spec for well data relationships Defines business requirements for: - Wells storing legacy NM_Aquifer identifiers (WellID, LocationID) - Related records (chemistry, hydraulics, stratigraphy, etc.) requiring a well - Cascade delete behavior when wells are removed Addresses #363 Co-Authored-By: Claude Opus 4.5 --- .../admin/well_data_relationships.feature | 104 ++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 features/admin/well_data_relationships.feature diff --git a/features/admin/well_data_relationships.feature b/features/admin/well_data_relationships.feature new file mode 100644 index 00000000..42d413ff --- /dev/null +++ b/features/admin/well_data_relationships.feature @@ -0,0 +1,104 @@ +@data-integrity +Feature: Well Data Relationships + As a NMBGMR data manager + I need well-related records to always belong to a well + So that data integrity is maintained and orphaned records are prevented + + Background: + Given the Ocotillo database is set up + + # ============================================================================ + # Wells Store Legacy Identifiers + # ============================================================================ + + @wells + Scenario: Wells store their legacy WellID + Given a well record exists + Then the well can store its original NM_Aquifer WellID + And the well can be found by its legacy WellID + + @wells + Scenario: Wells store their legacy LocationID + Given a well record exists + Then the well can store its original NM_Aquifer LocationID + And the well can be found by its legacy LocationID + + # ============================================================================ + # Related Records Require a Well + # ============================================================================ + + @chemistry + Scenario: Chemistry samples require a well + When I try to save chemistry sample information + Then a well must be specified + And orphaned chemistry records are not allowed + + @hydraulics + Scenario: Hydraulic test data requires a well + When I try to save hydraulic test data + Then a well must be specified + And orphaned hydraulic records are not allowed + + @stratigraphy + Scenario: Lithology logs require a well + When I try to save a lithology log + Then a well must be specified + And orphaned lithology records are not allowed + + @radionuclides + Scenario: Radionuclide results require a well + When I try to save radionuclide results + Then a well must be specified + And orphaned radionuclide records are not allowed + + @associated-data + Scenario: Associated data requires a well + When I try to save associated data + Then a well must be specified + And orphaned associated data records are not allowed + + @soil-rock + Scenario: Soil and rock results require a well + When I try to save soil or rock results + Then a well must be specified + And orphaned soil/rock records are not allowed + + # ============================================================================ + # Deleting a Well Removes Related Records + # ============================================================================ + + @cascade-delete + Scenario: Deleting a well removes its chemistry samples + Given a well has chemistry sample records + When the well is deleted + Then its chemistry samples are also deleted + + @cascade-delete + Scenario: Deleting a well removes its hydraulic data + Given a well has hydraulic test data + When the well is deleted + Then its hydraulic data is also deleted + + @cascade-delete + Scenario: Deleting a well removes its lithology logs + Given a well has lithology logs + When the well is deleted + Then its lithology logs are also deleted + + @cascade-delete + Scenario: Deleting a well removes its radionuclide results + Given a well has radionuclide results + When the well is deleted + Then its radionuclide results are also deleted + + @cascade-delete + Scenario: Deleting a well removes its associated data + Given a well has associated data + When the well is deleted + Then its associated data is also deleted + + @cascade-delete + Scenario: Deleting a well removes its soil/rock results + Given a well has soil and rock results + When the well is deleted + Then its soil/rock results are also deleted From ca6a820e0639325c1f03a6a52266b3fa4062f030 Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Thu, 22 Jan 2026 11:34:52 -0800 Subject: [PATCH 02/22] Add relationship navigation scenario to feature spec Adds scenario for navigating from a well to its related records through ORM relationships. Co-Authored-By: Claude Opus 4.5 --- .../admin/well_data_relationships.feature | 16 + .../features/steps/well-data-relationships.py | 596 ++++++++++++++++++ 2 files changed, 612 insertions(+) create mode 100644 tests/features/steps/well-data-relationships.py diff --git a/features/admin/well_data_relationships.feature b/features/admin/well_data_relationships.feature index 42d413ff..0eed2d6c 100644 --- a/features/admin/well_data_relationships.feature +++ b/features/admin/well_data_relationships.feature @@ -63,6 +63,22 @@ Feature: Well Data Relationships Then a well must be specified And orphaned soil/rock records are not allowed + # ============================================================================ + # Relationship Navigation + # ============================================================================ + + @relationships + Scenario: A well can access its related records through relationships + Given a well has chemistry sample records + And a well has hydraulic test data + And a well has lithology logs + And a well has radionuclide results + And a well has associated data + And a well has soil and rock results + When I access the well's relationships + Then I can navigate to all related record types + And each relationship returns the correct records + # ============================================================================ # Deleting a Well Removes Related Records # ============================================================================ diff --git a/tests/features/steps/well-data-relationships.py b/tests/features/steps/well-data-relationships.py new file mode 100644 index 00000000..6da40309 --- /dev/null +++ b/tests/features/steps/well-data-relationships.py @@ -0,0 +1,596 @@ +# =============================================================================== +# Copyright 2026 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =============================================================================== +""" +Step definitions for Well Data Relationships feature tests. +Tests FK relationships, orphan prevention, and cascade delete behavior. +""" + +import uuid +from datetime import datetime + +from behave import given, when, then +from behave.runner import Context +from sqlalchemy.exc import IntegrityError, StatementError + +from db import Thing +from db.engine import session_ctx +from db.nma_legacy import ( + ChemistrySampleInfo, + NMAHydraulicsData, + Stratigraphy, + NMARadionuclides, + AssociatedData, + SoilRockResults, +) + + +@given("the Ocotillo database is set up") +def step_given_database_setup(context: Context): + """Ensure database is ready for testing.""" + # Database connection is handled by session_ctx + context.test_wells = [] + context.test_records = {} + + +@given("a well record exists") +def step_given_well_exists(context: Context): + """Create a test well (Thing) record.""" + with session_ctx() as session: + well = Thing( + name=f"TEST_WELL_{uuid.uuid4().hex[:8]}", + thing_type="water well", + release_status="public", + nma_pk_welldata=str(uuid.uuid4()), + nma_pk_location=str(uuid.uuid4()), + ) + session.add(well) + session.commit() + session.refresh(well) + context.test_well = well + context.test_well_id = well.id + if not hasattr(context, "test_wells"): + context.test_wells = [] + context.test_wells.append(well) + + +@then("the well can store its original NM_Aquifer WellID") +def step_then_well_stores_wellid(context: Context): + """Verify well can store legacy WellID.""" + assert context.test_well.nma_pk_welldata is not None, "Well should store legacy WellID" + assert isinstance(context.test_well.nma_pk_welldata, str), "WellID should be a string" + + +@then("the well can be found by its legacy WellID") +def step_then_find_by_wellid(context: Context): + """Verify well can be queried by legacy WellID.""" + with session_ctx() as session: + found_well = session.query(Thing).filter( + Thing.nma_pk_welldata == context.test_well.nma_pk_welldata + ).first() + assert found_well is not None, "Well should be findable by legacy WellID" + assert found_well.id == context.test_well.id, "Found well should match original" + + +@then("the well can store its original NM_Aquifer LocationID") +def step_then_well_stores_locationid(context: Context): + """Verify well can store legacy LocationID.""" + assert context.test_well.nma_pk_location is not None, "Well should store legacy LocationID" + assert isinstance(context.test_well.nma_pk_location, str), "LocationID should be a string" + + +@then("the well can be found by its legacy LocationID") +def step_then_find_by_locationid(context: Context): + """Verify well can be queried by legacy LocationID.""" + with session_ctx() as session: + found_well = session.query(Thing).filter( + Thing.nma_pk_location == context.test_well.nma_pk_location + ).first() + assert found_well is not None, "Well should be findable by legacy LocationID" + assert found_well.id == context.test_well.id, "Found well should match original" + + +# ============================================================================ +# Chemistry Sample Info +# ============================================================================ + + +@when("I try to save chemistry sample information") +def step_when_save_chemistry(context: Context): + """Attempt to save chemistry sample info without a well.""" + context.orphan_error = None + context.record_saved = False + + try: + with session_ctx() as session: + chemistry = ChemistrySampleInfo( + sample_pt_id=uuid.uuid4(), + sample_point_id="TEST001", + thing_id=None, # No parent well + collection_date=datetime.now(), + ) + session.add(chemistry) + session.commit() + context.record_saved = True + except (ValueError, IntegrityError, StatementError) as e: + context.orphan_error = e + context.record_saved = False + + +@then("a well must be specified") +def step_then_well_required(context: Context): + """Verify that a well (thing_id) is required.""" + assert not context.record_saved, "Record should not be saved without a well" + assert context.orphan_error is not None, "Should raise error when well is missing" + + +@then("orphaned chemistry records are not allowed") +def step_then_no_orphan_chemistry(context: Context): + """Verify no orphan chemistry records exist.""" + with session_ctx() as session: + orphan_count = session.query(ChemistrySampleInfo).filter( + ChemistrySampleInfo.thing_id.is_(None) + ).count() + assert orphan_count == 0, f"Found {orphan_count} orphan chemistry records" + + +# ============================================================================ +# Hydraulics Data +# ============================================================================ + + +@when("I try to save hydraulic test data") +def step_when_save_hydraulics(context: Context): + """Attempt to save hydraulic data without a well.""" + context.orphan_error = None + context.record_saved = False + + try: + with session_ctx() as session: + hydraulics = NMAHydraulicsData( + global_id=uuid.uuid4(), + point_id="TEST001", + thing_id=None, # No parent well + test_top=100, + test_bottom=200, + ) + session.add(hydraulics) + session.commit() + context.record_saved = True + except (ValueError, IntegrityError, StatementError) as e: + context.orphan_error = e + context.record_saved = False + + +@then("orphaned hydraulic records are not allowed") +def step_then_no_orphan_hydraulics(context: Context): + """Verify no orphan hydraulic records exist.""" + with session_ctx() as session: + orphan_count = session.query(NMAHydraulicsData).filter( + NMAHydraulicsData.thing_id.is_(None) + ).count() + assert orphan_count == 0, f"Found {orphan_count} orphan hydraulic records" + + +# ============================================================================ +# Stratigraphy (Lithology) +# ============================================================================ + + +@when("I try to save a lithology log") +def step_when_save_lithology(context: Context): + """Attempt to save lithology log without a well.""" + context.orphan_error = None + context.record_saved = False + + try: + with session_ctx() as session: + stratigraphy = Stratigraphy( + global_id=uuid.uuid4(), + point_id="TEST001", + thing_id=None, # No parent well + strat_top=100.0, + strat_bottom=200.0, + ) + session.add(stratigraphy) + session.commit() + context.record_saved = True + except (ValueError, IntegrityError, StatementError) as e: + context.orphan_error = e + context.record_saved = False + + +@then("orphaned lithology records are not allowed") +def step_then_no_orphan_lithology(context: Context): + """Verify no orphan lithology records exist.""" + with session_ctx() as session: + orphan_count = session.query(Stratigraphy).filter( + Stratigraphy.thing_id.is_(None) + ).count() + assert orphan_count == 0, f"Found {orphan_count} orphan lithology records" + + +# ============================================================================ +# Radionuclides +# ============================================================================ + + +@when("I try to save radionuclide results") +def step_when_save_radionuclides(context: Context): + """Attempt to save radionuclide results without a well.""" + context.orphan_error = None + context.record_saved = False + + try: + with session_ctx() as session: + # First create a chemistry sample info for the radionuclide + chemistry_sample = ChemistrySampleInfo( + sample_pt_id=uuid.uuid4(), + sample_point_id="TEST001", + thing_id=context.test_well_id, + collection_date=datetime.now(), + ) + session.add(chemistry_sample) + session.flush() + + radionuclide = NMARadionuclides( + global_id=uuid.uuid4(), + thing_id=None, # No parent well + sample_pt_id=chemistry_sample.sample_pt_id, + analyte="U-238", + ) + session.add(radionuclide) + session.commit() + context.record_saved = True + except (ValueError, IntegrityError, StatementError) as e: + context.orphan_error = e + context.record_saved = False + + +@then("orphaned radionuclide records are not allowed") +def step_then_no_orphan_radionuclides(context: Context): + """Verify no orphan radionuclide records exist.""" + with session_ctx() as session: + orphan_count = session.query(NMARadionuclides).filter( + NMARadionuclides.thing_id.is_(None) + ).count() + assert orphan_count == 0, f"Found {orphan_count} orphan radionuclide records" + + +# ============================================================================ +# Associated Data +# ============================================================================ + + +@when("I try to save associated data") +def step_when_save_associated_data(context: Context): + """Attempt to save associated data without a well.""" + context.orphan_error = None + context.record_saved = False + + try: + with session_ctx() as session: + associated_data = AssociatedData( + assoc_id=uuid.uuid4(), + point_id="TEST001", + thing_id=None, # No parent well + notes="Test notes", + ) + session.add(associated_data) + session.commit() + context.record_saved = True + except (ValueError, IntegrityError, StatementError) as e: + context.orphan_error = e + context.record_saved = False + + +@then("orphaned associated data records are not allowed") +def step_then_no_orphan_associated_data(context: Context): + """Verify no orphan associated data records exist.""" + with session_ctx() as session: + orphan_count = session.query(AssociatedData).filter( + AssociatedData.thing_id.is_(None) + ).count() + assert orphan_count == 0, f"Found {orphan_count} orphan associated data records" + + +# ============================================================================ +# Soil/Rock Results +# ============================================================================ + + +@when("I try to save soil or rock results") +def step_when_save_soil_rock(context: Context): + """Attempt to save soil/rock results without a well.""" + context.orphan_error = None + context.record_saved = False + + try: + with session_ctx() as session: + soil_rock = SoilRockResults( + point_id="TEST001", + thing_id=None, # No parent well + sample_type="Soil", + date_sampled="2025-01-01", + ) + session.add(soil_rock) + session.commit() + context.record_saved = True + except (ValueError, IntegrityError, StatementError) as e: + context.orphan_error = e + context.record_saved = False + + +@then("orphaned soil/rock records are not allowed") +def step_then_no_orphan_soil_rock(context: Context): + """Verify no orphan soil/rock records exist.""" + with session_ctx() as session: + orphan_count = session.query(SoilRockResults).filter( + SoilRockResults.thing_id.is_(None) + ).count() + assert orphan_count == 0, f"Found {orphan_count} orphan soil/rock records" + + +# ============================================================================ +# Relationship Navigation Tests +# ============================================================================ + + +@when("I access the well's relationships") +def step_when_access_relationships(context: Context): + """Access the well's relationships.""" + with session_ctx() as session: + well = session.query(Thing).filter(Thing.id == context.test_well_id).first() + context.well_relationships = { + "chemistry_samples": well.chemistry_sample_infos, + "hydraulics_data": well.hydraulics_data, + "lithology_logs": well.stratigraphy_logs, + "radionuclides": well.radionuclides, + "associated_data": well.associated_data, + "soil_rock_results": well.soil_rock_results, + } + + +@then("I can navigate to all related record types") +def step_then_navigate_relationships(context: Context): + """Verify all relationship types are accessible.""" + assert "chemistry_samples" in context.well_relationships + assert "hydraulics_data" in context.well_relationships + assert "lithology_logs" in context.well_relationships + assert "radionuclides" in context.well_relationships + assert "associated_data" in context.well_relationships + assert "soil_rock_results" in context.well_relationships + + +@then("each relationship returns the correct records") +def step_then_relationships_correct(context: Context): + """Verify each relationship returns the expected records.""" + assert len(context.well_relationships["chemistry_samples"]) >= 1 + assert len(context.well_relationships["hydraulics_data"]) >= 1 + assert len(context.well_relationships["lithology_logs"]) >= 1 + assert len(context.well_relationships["radionuclides"]) >= 1 + assert len(context.well_relationships["associated_data"]) >= 1 + assert len(context.well_relationships["soil_rock_results"]) >= 1 + + +# ============================================================================ +# Cascade Delete Tests +# ============================================================================ + + +@given("a well has chemistry sample records") +def step_given_well_has_chemistry(context: Context): + """Create chemistry samples for a well.""" + if not hasattr(context, "test_well"): + step_given_well_exists(context) + + with session_ctx() as session: + chemistry1 = ChemistrySampleInfo( + sample_pt_id=uuid.uuid4(), + sample_point_id="TEST001", + thing_id=context.test_well_id, + collection_date=datetime.now(), + ) + chemistry2 = ChemistrySampleInfo( + sample_pt_id=uuid.uuid4(), + sample_point_id="TEST002", + thing_id=context.test_well_id, + collection_date=datetime.now(), + ) + session.add_all([chemistry1, chemistry2]) + session.commit() + context.chemistry_samples = [chemistry1, chemistry2] + + +@given("a well has hydraulic test data") +def step_given_well_has_hydraulics(context: Context): + """Create hydraulic data for a well.""" + if not hasattr(context, "test_well"): + step_given_well_exists(context) + + with session_ctx() as session: + hydraulics = NMAHydraulicsData( + global_id=uuid.uuid4(), + point_id="TEST001", + thing_id=context.test_well_id, + test_top=100, + test_bottom=200, + ) + session.add(hydraulics) + session.commit() + context.hydraulics_data = hydraulics + + +@given("a well has lithology logs") +def step_given_well_has_lithology(context: Context): + """Create lithology logs for a well.""" + if not hasattr(context, "test_well"): + step_given_well_exists(context) + + with session_ctx() as session: + lithology1 = Stratigraphy( + global_id=uuid.uuid4(), + point_id="TEST001", + thing_id=context.test_well_id, + strat_top=0.0, + strat_bottom=100.0, + ) + lithology2 = Stratigraphy( + global_id=uuid.uuid4(), + point_id="TEST001", + thing_id=context.test_well_id, + strat_top=100.0, + strat_bottom=200.0, + ) + session.add_all([lithology1, lithology2]) + session.commit() + context.lithology_logs = [lithology1, lithology2] + + +@given("a well has radionuclide results") +def step_given_well_has_radionuclides(context: Context): + """Create radionuclide results for a well.""" + if not hasattr(context, "test_well"): + step_given_well_exists(context) + + with session_ctx() as session: + chemistry_sample = ChemistrySampleInfo( + sample_pt_id=uuid.uuid4(), + sample_point_id="TEST001", + thing_id=context.test_well_id, + collection_date=datetime.now(), + ) + session.add(chemistry_sample) + session.flush() + + radionuclide = NMARadionuclides( + global_id=uuid.uuid4(), + thing_id=context.test_well_id, + sample_pt_id=chemistry_sample.sample_pt_id, + analyte="U-238", + ) + session.add(radionuclide) + session.commit() + context.radionuclide_results = radionuclide + + +@given("a well has associated data") +def step_given_well_has_associated_data(context: Context): + """Create associated data for a well.""" + if not hasattr(context, "test_well"): + step_given_well_exists(context) + + with session_ctx() as session: + associated_data = AssociatedData( + assoc_id=uuid.uuid4(), + point_id="TEST001", + thing_id=context.test_well_id, + notes="Test associated data", + ) + session.add(associated_data) + session.commit() + context.associated_data = associated_data + + +@given("a well has soil and rock results") +def step_given_well_has_soil_rock(context: Context): + """Create soil/rock results for a well.""" + if not hasattr(context, "test_well"): + step_given_well_exists(context) + + with session_ctx() as session: + soil_rock = SoilRockResults( + point_id="TEST001", + thing_id=context.test_well_id, + sample_type="Soil", + date_sampled="2025-01-01", + ) + session.add(soil_rock) + session.commit() + context.soil_rock_results = soil_rock + + +@when("the well is deleted") +def step_when_well_deleted(context: Context): + """Delete the test well.""" + with session_ctx() as session: + well = session.query(Thing).filter(Thing.id == context.test_well_id).first() + if well: + session.delete(well) + session.commit() + context.well_deleted = True + + +@then("its chemistry samples are also deleted") +def step_then_chemistry_deleted(context: Context): + """Verify chemistry samples are cascade deleted.""" + with session_ctx() as session: + remaining = session.query(ChemistrySampleInfo).filter( + ChemistrySampleInfo.thing_id == context.test_well_id + ).count() + assert remaining == 0, f"Expected 0 chemistry samples, found {remaining}" + + +@then("its hydraulic data is also deleted") +def step_then_hydraulics_deleted(context: Context): + """Verify hydraulic data is cascade deleted.""" + with session_ctx() as session: + remaining = session.query(NMAHydraulicsData).filter( + NMAHydraulicsData.thing_id == context.test_well_id + ).count() + assert remaining == 0, f"Expected 0 hydraulic records, found {remaining}" + + +@then("its lithology logs are also deleted") +def step_then_lithology_deleted(context: Context): + """Verify lithology logs are cascade deleted.""" + with session_ctx() as session: + remaining = session.query(Stratigraphy).filter( + Stratigraphy.thing_id == context.test_well_id + ).count() + assert remaining == 0, f"Expected 0 lithology logs, found {remaining}" + + +@then("its radionuclide results are also deleted") +def step_then_radionuclides_deleted(context: Context): + """Verify radionuclide results are cascade deleted.""" + with session_ctx() as session: + remaining = session.query(NMARadionuclides).filter( + NMARadionuclides.thing_id == context.test_well_id + ).count() + assert remaining == 0, f"Expected 0 radionuclide records, found {remaining}" + + +@then("its associated data is also deleted") +def step_then_associated_data_deleted(context: Context): + """Verify associated data is cascade deleted.""" + with session_ctx() as session: + remaining = session.query(AssociatedData).filter( + AssociatedData.thing_id == context.test_well_id + ).count() + assert remaining == 0, f"Expected 0 associated data records, found {remaining}" + + +@then("its soil/rock results are also deleted") +def step_then_soil_rock_deleted(context: Context): + """Verify soil/rock results are cascade deleted.""" + with session_ctx() as session: + remaining = session.query(SoilRockResults).filter( + SoilRockResults.thing_id == context.test_well_id + ).count() + assert remaining == 0, f"Expected 0 soil/rock records, found {remaining}" + + +# ============= EOF ============================================= From b2ab2c5436c6e7f3212c1f7fbaea6d97ee9f8599 Mon Sep 17 00:00:00 2001 From: kbighorse Date: Thu, 22 Jan 2026 19:35:09 +0000 Subject: [PATCH 03/22] Formatting changes --- .../features/steps/well-data-relationships.py | 154 +++++++++++------- 1 file changed, 94 insertions(+), 60 deletions(-) diff --git a/tests/features/steps/well-data-relationships.py b/tests/features/steps/well-data-relationships.py index 6da40309..19fb46f4 100644 --- a/tests/features/steps/well-data-relationships.py +++ b/tests/features/steps/well-data-relationships.py @@ -69,17 +69,23 @@ def step_given_well_exists(context: Context): @then("the well can store its original NM_Aquifer WellID") def step_then_well_stores_wellid(context: Context): """Verify well can store legacy WellID.""" - assert context.test_well.nma_pk_welldata is not None, "Well should store legacy WellID" - assert isinstance(context.test_well.nma_pk_welldata, str), "WellID should be a string" + assert ( + context.test_well.nma_pk_welldata is not None + ), "Well should store legacy WellID" + assert isinstance( + context.test_well.nma_pk_welldata, str + ), "WellID should be a string" @then("the well can be found by its legacy WellID") def step_then_find_by_wellid(context: Context): """Verify well can be queried by legacy WellID.""" with session_ctx() as session: - found_well = session.query(Thing).filter( - Thing.nma_pk_welldata == context.test_well.nma_pk_welldata - ).first() + found_well = ( + session.query(Thing) + .filter(Thing.nma_pk_welldata == context.test_well.nma_pk_welldata) + .first() + ) assert found_well is not None, "Well should be findable by legacy WellID" assert found_well.id == context.test_well.id, "Found well should match original" @@ -87,17 +93,23 @@ def step_then_find_by_wellid(context: Context): @then("the well can store its original NM_Aquifer LocationID") def step_then_well_stores_locationid(context: Context): """Verify well can store legacy LocationID.""" - assert context.test_well.nma_pk_location is not None, "Well should store legacy LocationID" - assert isinstance(context.test_well.nma_pk_location, str), "LocationID should be a string" + assert ( + context.test_well.nma_pk_location is not None + ), "Well should store legacy LocationID" + assert isinstance( + context.test_well.nma_pk_location, str + ), "LocationID should be a string" @then("the well can be found by its legacy LocationID") def step_then_find_by_locationid(context: Context): """Verify well can be queried by legacy LocationID.""" with session_ctx() as session: - found_well = session.query(Thing).filter( - Thing.nma_pk_location == context.test_well.nma_pk_location - ).first() + found_well = ( + session.query(Thing) + .filter(Thing.nma_pk_location == context.test_well.nma_pk_location) + .first() + ) assert found_well is not None, "Well should be findable by legacy LocationID" assert found_well.id == context.test_well.id, "Found well should match original" @@ -112,7 +124,7 @@ def step_when_save_chemistry(context: Context): """Attempt to save chemistry sample info without a well.""" context.orphan_error = None context.record_saved = False - + try: with session_ctx() as session: chemistry = ChemistrySampleInfo( @@ -140,9 +152,11 @@ def step_then_well_required(context: Context): def step_then_no_orphan_chemistry(context: Context): """Verify no orphan chemistry records exist.""" with session_ctx() as session: - orphan_count = session.query(ChemistrySampleInfo).filter( - ChemistrySampleInfo.thing_id.is_(None) - ).count() + orphan_count = ( + session.query(ChemistrySampleInfo) + .filter(ChemistrySampleInfo.thing_id.is_(None)) + .count() + ) assert orphan_count == 0, f"Found {orphan_count} orphan chemistry records" @@ -156,7 +170,7 @@ def step_when_save_hydraulics(context: Context): """Attempt to save hydraulic data without a well.""" context.orphan_error = None context.record_saved = False - + try: with session_ctx() as session: hydraulics = NMAHydraulicsData( @@ -178,9 +192,11 @@ def step_when_save_hydraulics(context: Context): def step_then_no_orphan_hydraulics(context: Context): """Verify no orphan hydraulic records exist.""" with session_ctx() as session: - orphan_count = session.query(NMAHydraulicsData).filter( - NMAHydraulicsData.thing_id.is_(None) - ).count() + orphan_count = ( + session.query(NMAHydraulicsData) + .filter(NMAHydraulicsData.thing_id.is_(None)) + .count() + ) assert orphan_count == 0, f"Found {orphan_count} orphan hydraulic records" @@ -194,7 +210,7 @@ def step_when_save_lithology(context: Context): """Attempt to save lithology log without a well.""" context.orphan_error = None context.record_saved = False - + try: with session_ctx() as session: stratigraphy = Stratigraphy( @@ -216,9 +232,9 @@ def step_when_save_lithology(context: Context): def step_then_no_orphan_lithology(context: Context): """Verify no orphan lithology records exist.""" with session_ctx() as session: - orphan_count = session.query(Stratigraphy).filter( - Stratigraphy.thing_id.is_(None) - ).count() + orphan_count = ( + session.query(Stratigraphy).filter(Stratigraphy.thing_id.is_(None)).count() + ) assert orphan_count == 0, f"Found {orphan_count} orphan lithology records" @@ -232,7 +248,7 @@ def step_when_save_radionuclides(context: Context): """Attempt to save radionuclide results without a well.""" context.orphan_error = None context.record_saved = False - + try: with session_ctx() as session: # First create a chemistry sample info for the radionuclide @@ -244,7 +260,7 @@ def step_when_save_radionuclides(context: Context): ) session.add(chemistry_sample) session.flush() - + radionuclide = NMARadionuclides( global_id=uuid.uuid4(), thing_id=None, # No parent well @@ -263,9 +279,11 @@ def step_when_save_radionuclides(context: Context): def step_then_no_orphan_radionuclides(context: Context): """Verify no orphan radionuclide records exist.""" with session_ctx() as session: - orphan_count = session.query(NMARadionuclides).filter( - NMARadionuclides.thing_id.is_(None) - ).count() + orphan_count = ( + session.query(NMARadionuclides) + .filter(NMARadionuclides.thing_id.is_(None)) + .count() + ) assert orphan_count == 0, f"Found {orphan_count} orphan radionuclide records" @@ -279,7 +297,7 @@ def step_when_save_associated_data(context: Context): """Attempt to save associated data without a well.""" context.orphan_error = None context.record_saved = False - + try: with session_ctx() as session: associated_data = AssociatedData( @@ -300,9 +318,11 @@ def step_when_save_associated_data(context: Context): def step_then_no_orphan_associated_data(context: Context): """Verify no orphan associated data records exist.""" with session_ctx() as session: - orphan_count = session.query(AssociatedData).filter( - AssociatedData.thing_id.is_(None) - ).count() + orphan_count = ( + session.query(AssociatedData) + .filter(AssociatedData.thing_id.is_(None)) + .count() + ) assert orphan_count == 0, f"Found {orphan_count} orphan associated data records" @@ -316,7 +336,7 @@ def step_when_save_soil_rock(context: Context): """Attempt to save soil/rock results without a well.""" context.orphan_error = None context.record_saved = False - + try: with session_ctx() as session: soil_rock = SoilRockResults( @@ -337,9 +357,11 @@ def step_when_save_soil_rock(context: Context): def step_then_no_orphan_soil_rock(context: Context): """Verify no orphan soil/rock records exist.""" with session_ctx() as session: - orphan_count = session.query(SoilRockResults).filter( - SoilRockResults.thing_id.is_(None) - ).count() + orphan_count = ( + session.query(SoilRockResults) + .filter(SoilRockResults.thing_id.is_(None)) + .count() + ) assert orphan_count == 0, f"Found {orphan_count} orphan soil/rock records" @@ -395,7 +417,7 @@ def step_given_well_has_chemistry(context: Context): """Create chemistry samples for a well.""" if not hasattr(context, "test_well"): step_given_well_exists(context) - + with session_ctx() as session: chemistry1 = ChemistrySampleInfo( sample_pt_id=uuid.uuid4(), @@ -419,7 +441,7 @@ def step_given_well_has_hydraulics(context: Context): """Create hydraulic data for a well.""" if not hasattr(context, "test_well"): step_given_well_exists(context) - + with session_ctx() as session: hydraulics = NMAHydraulicsData( global_id=uuid.uuid4(), @@ -438,7 +460,7 @@ def step_given_well_has_lithology(context: Context): """Create lithology logs for a well.""" if not hasattr(context, "test_well"): step_given_well_exists(context) - + with session_ctx() as session: lithology1 = Stratigraphy( global_id=uuid.uuid4(), @@ -464,7 +486,7 @@ def step_given_well_has_radionuclides(context: Context): """Create radionuclide results for a well.""" if not hasattr(context, "test_well"): step_given_well_exists(context) - + with session_ctx() as session: chemistry_sample = ChemistrySampleInfo( sample_pt_id=uuid.uuid4(), @@ -474,7 +496,7 @@ def step_given_well_has_radionuclides(context: Context): ) session.add(chemistry_sample) session.flush() - + radionuclide = NMARadionuclides( global_id=uuid.uuid4(), thing_id=context.test_well_id, @@ -491,7 +513,7 @@ def step_given_well_has_associated_data(context: Context): """Create associated data for a well.""" if not hasattr(context, "test_well"): step_given_well_exists(context) - + with session_ctx() as session: associated_data = AssociatedData( assoc_id=uuid.uuid4(), @@ -509,7 +531,7 @@ def step_given_well_has_soil_rock(context: Context): """Create soil/rock results for a well.""" if not hasattr(context, "test_well"): step_given_well_exists(context) - + with session_ctx() as session: soil_rock = SoilRockResults( point_id="TEST001", @@ -537,9 +559,11 @@ def step_when_well_deleted(context: Context): def step_then_chemistry_deleted(context: Context): """Verify chemistry samples are cascade deleted.""" with session_ctx() as session: - remaining = session.query(ChemistrySampleInfo).filter( - ChemistrySampleInfo.thing_id == context.test_well_id - ).count() + remaining = ( + session.query(ChemistrySampleInfo) + .filter(ChemistrySampleInfo.thing_id == context.test_well_id) + .count() + ) assert remaining == 0, f"Expected 0 chemistry samples, found {remaining}" @@ -547,9 +571,11 @@ def step_then_chemistry_deleted(context: Context): def step_then_hydraulics_deleted(context: Context): """Verify hydraulic data is cascade deleted.""" with session_ctx() as session: - remaining = session.query(NMAHydraulicsData).filter( - NMAHydraulicsData.thing_id == context.test_well_id - ).count() + remaining = ( + session.query(NMAHydraulicsData) + .filter(NMAHydraulicsData.thing_id == context.test_well_id) + .count() + ) assert remaining == 0, f"Expected 0 hydraulic records, found {remaining}" @@ -557,9 +583,11 @@ def step_then_hydraulics_deleted(context: Context): def step_then_lithology_deleted(context: Context): """Verify lithology logs are cascade deleted.""" with session_ctx() as session: - remaining = session.query(Stratigraphy).filter( - Stratigraphy.thing_id == context.test_well_id - ).count() + remaining = ( + session.query(Stratigraphy) + .filter(Stratigraphy.thing_id == context.test_well_id) + .count() + ) assert remaining == 0, f"Expected 0 lithology logs, found {remaining}" @@ -567,9 +595,11 @@ def step_then_lithology_deleted(context: Context): def step_then_radionuclides_deleted(context: Context): """Verify radionuclide results are cascade deleted.""" with session_ctx() as session: - remaining = session.query(NMARadionuclides).filter( - NMARadionuclides.thing_id == context.test_well_id - ).count() + remaining = ( + session.query(NMARadionuclides) + .filter(NMARadionuclides.thing_id == context.test_well_id) + .count() + ) assert remaining == 0, f"Expected 0 radionuclide records, found {remaining}" @@ -577,9 +607,11 @@ def step_then_radionuclides_deleted(context: Context): def step_then_associated_data_deleted(context: Context): """Verify associated data is cascade deleted.""" with session_ctx() as session: - remaining = session.query(AssociatedData).filter( - AssociatedData.thing_id == context.test_well_id - ).count() + remaining = ( + session.query(AssociatedData) + .filter(AssociatedData.thing_id == context.test_well_id) + .count() + ) assert remaining == 0, f"Expected 0 associated data records, found {remaining}" @@ -587,9 +619,11 @@ def step_then_associated_data_deleted(context: Context): def step_then_soil_rock_deleted(context: Context): """Verify soil/rock results are cascade deleted.""" with session_ctx() as session: - remaining = session.query(SoilRockResults).filter( - SoilRockResults.thing_id == context.test_well_id - ).count() + remaining = ( + session.query(SoilRockResults) + .filter(SoilRockResults.thing_id == context.test_well_id) + .count() + ) assert remaining == 0, f"Expected 0 soil/rock records, found {remaining}" From e355b30e8395b45c8cf37932ad20e0f6bf2fedd2 Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Mon, 26 Jan 2026 10:17:13 -0800 Subject: [PATCH 04/22] test: add failing tests for Thing FK enforcement (Issue #363) Add integration and unit tests for well data relationships feature: - Integration tests (test_well_data_relationships.py): - Wells store legacy identifiers (nma_pk_welldata, nma_pk_location) - Related records require a well (thing_id cannot be None) - Relationship navigation from Thing to NMA legacy models - Cascade delete behavior - Unit tests added to existing files: - test_thing.py: Thing column and relationship assertions - test_hydraulics_data_legacy.py: validator and back_populates - test_associated_data_legacy.py: validator and back_populates - test_soil_rock_results_legacy.py: validator and back_populates - test_radionuclides_legacy.py: FK cascade and back_populates - test_stratigraphy_legacy.py (new): validator and back_populates These tests are expected to fail until the model changes are implemented. Co-Authored-By: Claude Opus 4.5 --- .../test_well_data_relationships.py | 595 ++++++++++++++++++ tests/test_associated_data_legacy.py | 48 ++ tests/test_hydraulics_data_legacy.py | 50 ++ tests/test_radionuclides_legacy.py | 41 ++ tests/test_soil_rock_results_legacy.py | 46 ++ tests/test_stratigraphy_legacy.py | 107 ++++ tests/test_thing.py | 66 ++ tests/unit/__init__.py | 1 + 8 files changed, 954 insertions(+) create mode 100644 tests/integration/test_well_data_relationships.py create mode 100644 tests/test_stratigraphy_legacy.py create mode 100644 tests/unit/__init__.py diff --git a/tests/integration/test_well_data_relationships.py b/tests/integration/test_well_data_relationships.py new file mode 100644 index 00000000..e0f68a98 --- /dev/null +++ b/tests/integration/test_well_data_relationships.py @@ -0,0 +1,595 @@ +# =============================================================================== +# Copyright 2026 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =============================================================================== +""" +Integration tests for Well Data Relationships feature. + +These tests verify the business requirements from: + features/admin/well_data_relationships.feature + +Feature: Well Data Relationships + As a NMBGMR data manager + I need well-related records to always belong to a well + So that data integrity is maintained and orphaned records are prevented +""" + +import uuid +from datetime import datetime + +import pytest + +from db.engine import session_ctx +from db.nma_legacy import ( + NMA_AssociatedData, + NMA_Chemistry_SampleInfo, + NMA_HydraulicsData, + NMA_Radionuclides, + NMA_Soil_Rock_Results, + NMA_Stratigraphy, +) +from db.thing import Thing + + +# ============================================================================= +# Fixtures +# ============================================================================= + + +@pytest.fixture +def well_for_relationships(): + """Create a well specifically for relationship testing.""" + with session_ctx() as session: + well = Thing( + name="FK Test Well", + thing_type="water well", + release_status="draft", + nma_pk_welldata="TEST-WELLDATA-GUID-12345", + nma_pk_location="TEST-LOCATION-GUID-67890", + ) + session.add(well) + session.commit() + session.refresh(well) + yield well + # Cleanup: delete the well (should cascade to children) + session.delete(well) + session.commit() + + +# ============================================================================= +# Wells Store Legacy Identifiers +# ============================================================================= + + +class TestWellsStoreLegacyIdentifiers: + """ + @wells + Scenario: Wells store their legacy WellID + Scenario: Wells store their legacy LocationID + """ + + def test_well_stores_legacy_welldata_id(self): + """Wells can store their original NM_Aquifer WellID.""" + with session_ctx() as session: + well = Thing( + name="Legacy WellID Test", + thing_type="water well", + release_status="draft", + nma_pk_welldata="LEGACY-WELLID-12345", + ) + session.add(well) + session.commit() + session.refresh(well) + + assert well.nma_pk_welldata == "LEGACY-WELLID-12345" + + # Cleanup + session.delete(well) + session.commit() + + def test_well_found_by_legacy_welldata_id(self): + """Wells can be found by their legacy WellID.""" + legacy_id = f"FINDME-WELL-{uuid.uuid4().hex[:8]}" + with session_ctx() as session: + well = Thing( + name="Findable Well", + thing_type="water well", + release_status="draft", + nma_pk_welldata=legacy_id, + ) + session.add(well) + session.commit() + + # Query by legacy ID + found = ( + session.query(Thing).filter(Thing.nma_pk_welldata == legacy_id).first() + ) + assert found is not None + assert found.name == "Findable Well" + + session.delete(well) + session.commit() + + def test_well_stores_legacy_location_id(self): + """Wells can store their original NM_Aquifer LocationID.""" + with session_ctx() as session: + well = Thing( + name="Legacy LocationID Test", + thing_type="water well", + release_status="draft", + nma_pk_location="LEGACY-LOCATIONID-67890", + ) + session.add(well) + session.commit() + session.refresh(well) + + assert well.nma_pk_location == "LEGACY-LOCATIONID-67890" + + # Cleanup + session.delete(well) + session.commit() + + def test_well_found_by_legacy_location_id(self): + """Wells can be found by their legacy LocationID.""" + legacy_id = f"FINDME-LOC-{uuid.uuid4().hex[:8]}" + with session_ctx() as session: + well = Thing( + name="Findable by Location", + thing_type="water well", + release_status="draft", + nma_pk_location=legacy_id, + ) + session.add(well) + session.commit() + + # Query by legacy ID + found = ( + session.query(Thing).filter(Thing.nma_pk_location == legacy_id).first() + ) + assert found is not None + assert found.name == "Findable by Location" + + session.delete(well) + session.commit() + + +# ============================================================================= +# Related Records Require a Well +# ============================================================================= + + +class TestRelatedRecordsRequireWell: + """ + @chemistry, @hydraulics, @stratigraphy, @radionuclides, @associated-data, @soil-rock + Scenarios: Various record types require a well (thing_id cannot be None) + """ + + def test_chemistry_sample_requires_well(self): + """ + @chemistry + Scenario: Chemistry samples require a well + """ + with session_ctx() as session: + with pytest.raises(ValueError, match="requires a parent Thing"): + record = NMA_Chemistry_SampleInfo( + sample_pt_id=uuid.uuid4(), + sample_point_id="ORPHAN-CHEM", + thing_id=None, # This should raise ValueError + ) + session.add(record) + session.flush() + + def test_hydraulics_data_requires_well(self): + """ + @hydraulics + Scenario: Hydraulic test data requires a well + """ + with session_ctx() as session: + with pytest.raises(ValueError, match="requires a parent Thing"): + record = NMA_HydraulicsData( + point_id="ORPHAN-HYD", + date_measured=datetime.now(), + test_bottom=100, + test_top=50, + thing_id=None, # This should raise ValueError + ) + session.add(record) + session.flush() + + def test_stratigraphy_requires_well(self): + """ + @stratigraphy + Scenario: Lithology logs require a well + """ + with session_ctx() as session: + with pytest.raises(ValueError, match="requires a parent Thing"): + record = NMA_Stratigraphy( + point_id="ORPHAN-STRAT", + thing_id=None, # This should raise ValueError + ) + session.add(record) + session.flush() + + def test_radionuclides_requires_well(self): + """ + @radionuclides + Scenario: Radionuclide results require a well + """ + with session_ctx() as session: + with pytest.raises(ValueError, match="requires a parent Thing"): + record = NMA_Radionuclides( + sample_pt_id=uuid.uuid4(), + thing_id=None, # This should raise ValueError + ) + session.add(record) + session.flush() + + def test_associated_data_requires_well(self): + """ + @associated-data + Scenario: Associated data requires a well + """ + with session_ctx() as session: + with pytest.raises(ValueError, match="requires a parent Thing"): + record = NMA_AssociatedData( + point_id="ORPHAN-ASSOC", + thing_id=None, # This should raise ValueError + ) + session.add(record) + session.flush() + + def test_soil_rock_results_requires_well(self): + """ + @soil-rock + Scenario: Soil and rock results require a well + """ + with session_ctx() as session: + with pytest.raises(ValueError, match="requires a parent Thing"): + record = NMA_Soil_Rock_Results( + point_id="ORPHAN-SOIL", + thing_id=None, # This should raise ValueError + ) + session.add(record) + session.flush() + + +# ============================================================================= +# Relationship Navigation +# ============================================================================= + + +class TestRelationshipNavigation: + """ + @relationships + Scenario: A well can access its related records through relationships + """ + + def test_well_navigates_to_chemistry_samples(self, well_for_relationships): + """Well can navigate to its chemistry sample records.""" + with session_ctx() as session: + well = session.merge(well_for_relationships) + + # Create a chemistry sample for this well + sample = NMA_Chemistry_SampleInfo( + sample_pt_id=uuid.uuid4(), + sample_point_id="NAV-CHEM-01", + thing_id=well.id, + ) + session.add(sample) + session.commit() + session.refresh(well) + + # Navigate through relationship + assert hasattr(well, "chemistry_sample_infos") + assert len(well.chemistry_sample_infos) >= 1 + assert any(s.sample_point_id == "NAV-CHEM-01" for s in well.chemistry_sample_infos) + + def test_well_navigates_to_hydraulics_data(self, well_for_relationships): + """Well can navigate to its hydraulic test data.""" + with session_ctx() as session: + well = session.merge(well_for_relationships) + + # Create hydraulics data for this well + hydraulics = NMA_HydraulicsData( + point_id="NAV-HYD-01", + date_measured=datetime.now(), + test_bottom=100, + test_top=50, + thing_id=well.id, + ) + session.add(hydraulics) + session.commit() + session.refresh(well) + + # Navigate through relationship + assert hasattr(well, "hydraulics_data") + assert len(well.hydraulics_data) >= 1 + assert any(h.point_id == "NAV-HYD-01" for h in well.hydraulics_data) + + def test_well_navigates_to_stratigraphy_logs(self, well_for_relationships): + """Well can navigate to its lithology logs.""" + with session_ctx() as session: + well = session.merge(well_for_relationships) + + # Create stratigraphy log for this well + strat = NMA_Stratigraphy( + point_id="NAV-STRAT-01", + thing_id=well.id, + ) + session.add(strat) + session.commit() + session.refresh(well) + + # Navigate through relationship + assert hasattr(well, "stratigraphy_logs") + assert len(well.stratigraphy_logs) >= 1 + assert any(s.point_id == "NAV-STRAT-01" for s in well.stratigraphy_logs) + + def test_well_navigates_to_radionuclides(self, well_for_relationships): + """Well can navigate to its radionuclide results.""" + with session_ctx() as session: + well = session.merge(well_for_relationships) + + # Create radionuclide record for this well + radio = NMA_Radionuclides( + sample_pt_id=uuid.uuid4(), + thing_id=well.id, + ) + session.add(radio) + session.commit() + session.refresh(well) + + # Navigate through relationship + assert hasattr(well, "radionuclides") + assert len(well.radionuclides) >= 1 + + def test_well_navigates_to_associated_data(self, well_for_relationships): + """Well can navigate to its associated data.""" + with session_ctx() as session: + well = session.merge(well_for_relationships) + + # Create associated data for this well + assoc = NMA_AssociatedData( + point_id="NAV-ASSOC-01", + thing_id=well.id, + ) + session.add(assoc) + session.commit() + session.refresh(well) + + # Navigate through relationship + assert hasattr(well, "associated_data") + assert len(well.associated_data) >= 1 + assert any(a.point_id == "NAV-ASSOC-01" for a in well.associated_data) + + def test_well_navigates_to_soil_rock_results(self, well_for_relationships): + """Well can navigate to its soil/rock results.""" + with session_ctx() as session: + well = session.merge(well_for_relationships) + + # Create soil/rock result for this well + soil = NMA_Soil_Rock_Results( + point_id="NAV-SOIL-01", + thing_id=well.id, + ) + session.add(soil) + session.commit() + session.refresh(well) + + # Navigate through relationship + assert hasattr(well, "soil_rock_results") + assert len(well.soil_rock_results) >= 1 + assert any(s.point_id == "NAV-SOIL-01" for s in well.soil_rock_results) + + +# ============================================================================= +# Deleting a Well Removes Related Records (Cascade Delete) +# ============================================================================= + + +class TestCascadeDelete: + """ + @cascade-delete + Scenarios: Deleting a well removes its related records + """ + + def test_deleting_well_cascades_to_chemistry_samples(self): + """ + @cascade-delete + Scenario: Deleting a well removes its chemistry samples + """ + with session_ctx() as session: + # Create well with chemistry sample + well = Thing( + name="Cascade Chemistry Test", + thing_type="water well", + release_status="draft", + ) + session.add(well) + session.commit() + + sample = NMA_Chemistry_SampleInfo( + sample_pt_id=uuid.uuid4(), + sample_point_id="CASCADE-CHEM-01", + thing_id=well.id, + ) + session.add(sample) + session.commit() + sample_id = sample.id + + # Delete the well + session.delete(well) + session.commit() + + # Verify chemistry sample was also deleted + orphan = session.get(NMA_Chemistry_SampleInfo, sample_id) + assert orphan is None, "Chemistry sample should be deleted with well" + + def test_deleting_well_cascades_to_hydraulics_data(self): + """ + @cascade-delete + Scenario: Deleting a well removes its hydraulic data + """ + with session_ctx() as session: + # Create well with hydraulics data + well = Thing( + name="Cascade Hydraulics Test", + thing_type="water well", + release_status="draft", + ) + session.add(well) + session.commit() + + hydraulics = NMA_HydraulicsData( + point_id="CASCADE-HYD-01", + date_measured=datetime.now(), + test_bottom=100, + test_top=50, + thing_id=well.id, + ) + session.add(hydraulics) + session.commit() + hyd_id = hydraulics.id + + # Delete the well + session.delete(well) + session.commit() + + # Verify hydraulics data was also deleted + orphan = session.get(NMA_HydraulicsData, hyd_id) + assert orphan is None, "Hydraulics data should be deleted with well" + + def test_deleting_well_cascades_to_stratigraphy_logs(self): + """ + @cascade-delete + Scenario: Deleting a well removes its lithology logs + """ + with session_ctx() as session: + # Create well with stratigraphy log + well = Thing( + name="Cascade Stratigraphy Test", + thing_type="water well", + release_status="draft", + ) + session.add(well) + session.commit() + + strat = NMA_Stratigraphy( + point_id="CASCADE-STRAT-01", + thing_id=well.id, + ) + session.add(strat) + session.commit() + strat_id = strat.id + + # Delete the well + session.delete(well) + session.commit() + + # Verify stratigraphy was also deleted + orphan = session.get(NMA_Stratigraphy, strat_id) + assert orphan is None, "Stratigraphy log should be deleted with well" + + def test_deleting_well_cascades_to_radionuclides(self): + """ + @cascade-delete + Scenario: Deleting a well removes its radionuclide results + """ + with session_ctx() as session: + # Create well with radionuclide record + well = Thing( + name="Cascade Radionuclides Test", + thing_type="water well", + release_status="draft", + ) + session.add(well) + session.commit() + + radio = NMA_Radionuclides( + sample_pt_id=uuid.uuid4(), + thing_id=well.id, + ) + session.add(radio) + session.commit() + radio_id = radio.id + + # Delete the well + session.delete(well) + session.commit() + + # Verify radionuclide record was also deleted + orphan = session.get(NMA_Radionuclides, radio_id) + assert orphan is None, "Radionuclide record should be deleted with well" + + def test_deleting_well_cascades_to_associated_data(self): + """ + @cascade-delete + Scenario: Deleting a well removes its associated data + """ + with session_ctx() as session: + # Create well with associated data + well = Thing( + name="Cascade Associated Test", + thing_type="water well", + release_status="draft", + ) + session.add(well) + session.commit() + + assoc = NMA_AssociatedData( + point_id="CASCADE-ASSOC-01", + thing_id=well.id, + ) + session.add(assoc) + session.commit() + assoc_id = assoc.id + + # Delete the well + session.delete(well) + session.commit() + + # Verify associated data was also deleted + orphan = session.get(NMA_AssociatedData, assoc_id) + assert orphan is None, "Associated data should be deleted with well" + + def test_deleting_well_cascades_to_soil_rock_results(self): + """ + @cascade-delete + Scenario: Deleting a well removes its soil/rock results + """ + with session_ctx() as session: + # Create well with soil/rock results + well = Thing( + name="Cascade Soil Rock Test", + thing_type="water well", + release_status="draft", + ) + session.add(well) + session.commit() + + soil = NMA_Soil_Rock_Results( + point_id="CASCADE-SOIL-01", + thing_id=well.id, + ) + session.add(soil) + session.commit() + soil_id = soil.id + + # Delete the well + session.delete(well) + session.commit() + + # Verify soil/rock results were also deleted + orphan = session.get(NMA_Soil_Rock_Results, soil_id) + assert orphan is None, "Soil/rock results should be deleted with well" diff --git a/tests/test_associated_data_legacy.py b/tests/test_associated_data_legacy.py index 7919b049..ae47b45b 100644 --- a/tests/test_associated_data_legacy.py +++ b/tests/test_associated_data_legacy.py @@ -79,4 +79,52 @@ def test_create_associated_data_minimal(): session.commit() +# ===================== FK Enforcement tests (Issue #363) ========================== + + +def test_associated_data_validator_rejects_none_thing_id(): + """NMA_AssociatedData validator rejects None thing_id.""" + import pytest + + with pytest.raises(ValueError, match="requires a parent Thing"): + NMA_AssociatedData( + assoc_id=uuid4(), + point_id="ORPHAN-TEST", + thing_id=None, + ) + + +def test_associated_data_thing_id_not_nullable(): + """NMA_AssociatedData.thing_id column is NOT NULL.""" + col = NMA_AssociatedData.__table__.c.thing_id + assert col.nullable is False, "thing_id should be NOT NULL" + + +def test_associated_data_fk_has_cascade(): + """NMA_AssociatedData.thing_id FK has ondelete=CASCADE.""" + col = NMA_AssociatedData.__table__.c.thing_id + fk = list(col.foreign_keys)[0] + assert fk.ondelete == "CASCADE" + + +def test_associated_data_back_populates_thing(water_well_thing): + """NMA_AssociatedData.thing navigates back to Thing.""" + with session_ctx() as session: + well = session.merge(water_well_thing) + record = NMA_AssociatedData( + assoc_id=uuid4(), + point_id="BP-ASSOC-01", + thing_id=well.id, + ) + session.add(record) + session.commit() + session.refresh(record) + + assert record.thing is not None + assert record.thing.id == well.id + + session.delete(record) + session.commit() + + # ============= EOF ============================================= diff --git a/tests/test_hydraulics_data_legacy.py b/tests/test_hydraulics_data_legacy.py index a2493337..b2cef985 100644 --- a/tests/test_hydraulics_data_legacy.py +++ b/tests/test_hydraulics_data_legacy.py @@ -260,4 +260,54 @@ def test_hydraulics_data_table_name(): assert NMA_HydraulicsData.__tablename__ == "NMA_HydraulicsData" +# ===================== FK Enforcement tests (Issue #363) ========================== + + +def test_hydraulics_data_validator_rejects_none_thing_id(): + """NMA_HydraulicsData validator rejects None thing_id.""" + import pytest + + with pytest.raises(ValueError, match="requires a parent Thing"): + NMA_HydraulicsData( + global_id=_next_global_id(), + test_top=5, + test_bottom=15, + thing_id=None, + ) + + +def test_hydraulics_data_thing_id_not_nullable(): + """NMA_HydraulicsData.thing_id column is NOT NULL.""" + col = NMA_HydraulicsData.__table__.c.thing_id + assert col.nullable is False, "thing_id should be NOT NULL" + + +def test_hydraulics_data_fk_has_cascade(): + """NMA_HydraulicsData.thing_id FK has ondelete=CASCADE.""" + col = NMA_HydraulicsData.__table__.c.thing_id + fk = list(col.foreign_keys)[0] + assert fk.ondelete == "CASCADE" + + +def test_hydraulics_data_back_populates_thing(water_well_thing): + """NMA_HydraulicsData.thing navigates back to Thing.""" + with session_ctx() as session: + well = session.merge(water_well_thing) + record = NMA_HydraulicsData( + global_id=_next_global_id(), + test_top=5, + test_bottom=15, + thing_id=well.id, + ) + session.add(record) + session.commit() + session.refresh(record) + + assert record.thing is not None + assert record.thing.id == well.id + + session.delete(record) + session.commit() + + # ============= EOF ============================================= diff --git a/tests/test_radionuclides_legacy.py b/tests/test_radionuclides_legacy.py index 1e13e5b6..efaec941 100644 --- a/tests/test_radionuclides_legacy.py +++ b/tests/test_radionuclides_legacy.py @@ -289,4 +289,45 @@ def test_radionuclides_table_name(): assert NMA_Radionuclides.__tablename__ == "NMA_Radionuclides" +# ===================== FK Enforcement tests (Issue #363) ========================== + + +def test_radionuclides_fk_has_cascade(): + """NMA_Radionuclides.thing_id FK has ondelete=CASCADE.""" + col = NMA_Radionuclides.__table__.c.thing_id + fk = list(col.foreign_keys)[0] + assert fk.ondelete == "CASCADE" + + +def test_radionuclides_back_populates_thing(water_well_thing): + """NMA_Radionuclides.thing navigates back to Thing.""" + with session_ctx() as session: + well = session.merge(water_well_thing) + + # Radionuclides requires a chemistry_sample_info + sample_info = NMA_Chemistry_SampleInfo( + sample_pt_id=uuid4(), + sample_point_id=_next_sample_point_id(), + thing_id=well.id, + ) + session.add(sample_info) + session.commit() + + record = NMA_Radionuclides( + global_id=uuid4(), + sample_pt_id=sample_info.sample_pt_id, + thing_id=well.id, + ) + session.add(record) + session.commit() + session.refresh(record) + + assert record.thing is not None + assert record.thing.id == well.id + + session.delete(record) + session.delete(sample_info) + session.commit() + + # ============= EOF ============================================= diff --git a/tests/test_soil_rock_results_legacy.py b/tests/test_soil_rock_results_legacy.py index 72ac70df..78c9ea1b 100644 --- a/tests/test_soil_rock_results_legacy.py +++ b/tests/test_soil_rock_results_legacy.py @@ -78,4 +78,50 @@ def test_create_soil_rock_results_minimal(): session.commit() +# ===================== FK Enforcement tests (Issue #363) ========================== + + +def test_soil_rock_results_validator_rejects_none_thing_id(): + """NMA_Soil_Rock_Results validator rejects None thing_id.""" + import pytest + + with pytest.raises(ValueError, match="requires a parent Thing"): + NMA_Soil_Rock_Results( + point_id="ORPHAN-TEST", + thing_id=None, + ) + + +def test_soil_rock_results_thing_id_not_nullable(): + """NMA_Soil_Rock_Results.thing_id column is NOT NULL.""" + col = NMA_Soil_Rock_Results.__table__.c.thing_id + assert col.nullable is False, "thing_id should be NOT NULL" + + +def test_soil_rock_results_fk_has_cascade(): + """NMA_Soil_Rock_Results.thing_id FK has ondelete=CASCADE.""" + col = NMA_Soil_Rock_Results.__table__.c.thing_id + fk = list(col.foreign_keys)[0] + assert fk.ondelete == "CASCADE" + + +def test_soil_rock_results_back_populates_thing(water_well_thing): + """NMA_Soil_Rock_Results.thing navigates back to Thing.""" + with session_ctx() as session: + well = session.merge(water_well_thing) + record = NMA_Soil_Rock_Results( + point_id="BP-SOIL-01", + thing_id=well.id, + ) + session.add(record) + session.commit() + session.refresh(record) + + assert record.thing is not None + assert record.thing.id == well.id + + session.delete(record) + session.commit() + + # ============= EOF ============================================= diff --git a/tests/test_stratigraphy_legacy.py b/tests/test_stratigraphy_legacy.py new file mode 100644 index 00000000..ee99915e --- /dev/null +++ b/tests/test_stratigraphy_legacy.py @@ -0,0 +1,107 @@ +# =============================================================================== +# Copyright 2026 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =============================================================================== +""" +Unit tests for NMA_Stratigraphy (lithology log) legacy model. + +These tests verify FK enforcement for Issue #363. +""" + +from uuid import uuid4 + +import pytest + +from db.engine import session_ctx +from db.nma_legacy import NMA_Stratigraphy + + +def _next_global_id(): + return uuid4() + + +# ===================== CREATE tests ========================== + + +def test_create_stratigraphy_with_thing(water_well_thing): + """Test creating a stratigraphy record with a parent Thing.""" + with session_ctx() as session: + well = session.merge(water_well_thing) + record = NMA_Stratigraphy( + global_id=_next_global_id(), + point_id="STRAT-01", + thing_id=well.id, + strat_top=0.0, + strat_bottom=10.0, + lithology="Sandstone", + ) + session.add(record) + session.commit() + session.refresh(record) + + assert record.global_id is not None + assert record.point_id == "STRAT-01" + assert record.thing_id == well.id + + session.delete(record) + session.commit() + + +# ===================== FK Enforcement tests (Issue #363) ========================== + + +def test_stratigraphy_validator_rejects_none_thing_id(): + """NMA_Stratigraphy validator rejects None thing_id.""" + with pytest.raises(ValueError, match="requires a parent Thing"): + NMA_Stratigraphy( + global_id=_next_global_id(), + point_id="ORPHAN-STRAT", + thing_id=None, + ) + + +def test_stratigraphy_thing_id_not_nullable(): + """NMA_Stratigraphy.thing_id column is NOT NULL.""" + col = NMA_Stratigraphy.__table__.c.thing_id + assert col.nullable is False, "thing_id should be NOT NULL" + + +def test_stratigraphy_fk_has_cascade(): + """NMA_Stratigraphy.thing_id FK has ondelete=CASCADE.""" + col = NMA_Stratigraphy.__table__.c.thing_id + fk = list(col.foreign_keys)[0] + assert fk.ondelete == "CASCADE" + + +def test_stratigraphy_back_populates_thing(water_well_thing): + """NMA_Stratigraphy.thing navigates back to Thing.""" + with session_ctx() as session: + well = session.merge(water_well_thing) + record = NMA_Stratigraphy( + global_id=_next_global_id(), + point_id="BP-STRAT-01", + thing_id=well.id, + ) + session.add(record) + session.commit() + session.refresh(record) + + assert record.thing is not None + assert record.thing.id == well.id + + session.delete(record) + session.commit() + + +# ============= EOF ============================================= diff --git a/tests/test_thing.py b/tests/test_thing.py index f60a32f7..343f24db 100644 --- a/tests/test_thing.py +++ b/tests/test_thing.py @@ -1139,3 +1139,69 @@ def test_delete_thing_id_link_404_not_found(second_thing_id_link): assert response.status_code == 404 data = response.json() assert data["detail"] == f"ThingIdLink with ID {bad_id} not found." + + +# ============================================================================= +# FK Enforcement Tests - Issue #363 +# Feature: features/admin/well_data_relationships.feature +# ============================================================================= + + +class TestThingLegacyIdentifierColumns: + """Tests for Thing's legacy identifier columns (nma_pk_welldata, nma_pk_location).""" + + def test_thing_has_nma_pk_welldata_column(self): + """Thing model has nma_pk_welldata column for legacy WellID.""" + assert hasattr(Thing, "nma_pk_welldata") + + def test_thing_has_nma_pk_location_column(self): + """Thing model has nma_pk_location column for legacy LocationID.""" + assert hasattr(Thing, "nma_pk_location") + + +class TestThingNMARelationshipCollections: + """Tests for Thing's relationship collections to NMA legacy models.""" + + def test_thing_has_hydraulics_data_relationship(self): + """Thing model has hydraulics_data relationship collection.""" + assert hasattr(Thing, "hydraulics_data") + + def test_thing_has_radionuclides_relationship(self): + """Thing model has radionuclides relationship collection.""" + assert hasattr(Thing, "radionuclides") + + def test_thing_has_associated_data_relationship(self): + """Thing model has associated_data relationship collection.""" + assert hasattr(Thing, "associated_data") + + def test_thing_has_soil_rock_results_relationship(self): + """Thing model has soil_rock_results relationship collection.""" + assert hasattr(Thing, "soil_rock_results") + + +class TestThingNMACascadeDeleteConfiguration: + """Tests for cascade delete-orphan configuration on Thing relationships.""" + + def test_hydraulics_data_has_cascade_delete(self): + """hydraulics_data relationship has cascade delete configured.""" + rel = Thing.__mapper__.relationships.get("hydraulics_data") + assert rel is not None, "hydraulics_data relationship should exist" + assert "delete" in rel.cascade or "all" in rel.cascade + + def test_radionuclides_has_cascade_delete(self): + """radionuclides relationship has cascade delete configured.""" + rel = Thing.__mapper__.relationships.get("radionuclides") + assert rel is not None, "radionuclides relationship should exist" + assert "delete" in rel.cascade or "all" in rel.cascade + + def test_associated_data_has_cascade_delete(self): + """associated_data relationship has cascade delete configured.""" + rel = Thing.__mapper__.relationships.get("associated_data") + assert rel is not None, "associated_data relationship should exist" + assert "delete" in rel.cascade or "all" in rel.cascade + + def test_soil_rock_results_has_cascade_delete(self): + """soil_rock_results relationship has cascade delete configured.""" + rel = Thing.__mapper__.relationships.get("soil_rock_results") + assert rel is not None, "soil_rock_results relationship should exist" + assert "delete" in rel.cascade or "all" in rel.cascade diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 00000000..4a5d2636 --- /dev/null +++ b/tests/unit/__init__.py @@ -0,0 +1 @@ +# Unit tests package From 7e738054f307cc5662d00148e88e0a1586ba7245 Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Mon, 26 Jan 2026 12:49:24 -0800 Subject: [PATCH 05/22] test: update minimal creation tests to require thing_id Update NMA_AssociatedData and NMA_Soil_Rock_Results minimal creation tests to include a thing_id, preparing for NOT NULL constraint. Co-Authored-By: Claude Opus 4.5 --- tests/test_associated_data_legacy.py | 6 ++++-- tests/test_soil_rock_results_legacy.py | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/test_associated_data_legacy.py b/tests/test_associated_data_legacy.py index ae47b45b..83359052 100644 --- a/tests/test_associated_data_legacy.py +++ b/tests/test_associated_data_legacy.py @@ -60,15 +60,17 @@ def test_create_associated_data_all_fields(water_well_thing): session.commit() -def test_create_associated_data_minimal(): +def test_create_associated_data_minimal(water_well_thing): """Test creating an associated data record with required fields only.""" with session_ctx() as session: - record = NMA_AssociatedData(assoc_id=uuid4()) + well = session.merge(water_well_thing) + record = NMA_AssociatedData(assoc_id=uuid4(), thing_id=well.id) session.add(record) session.commit() session.refresh(record) assert record.assoc_id is not None + assert record.thing_id == well.id assert record.location_id is None assert record.point_id is None assert record.notes is None diff --git a/tests/test_soil_rock_results_legacy.py b/tests/test_soil_rock_results_legacy.py index 78c9ea1b..3ec2091c 100644 --- a/tests/test_soil_rock_results_legacy.py +++ b/tests/test_soil_rock_results_legacy.py @@ -59,15 +59,17 @@ def test_create_soil_rock_results_all_fields(water_well_thing): session.commit() -def test_create_soil_rock_results_minimal(): +def test_create_soil_rock_results_minimal(water_well_thing): """Test creating a soil/rock results record with required fields only.""" with session_ctx() as session: - record = NMA_Soil_Rock_Results() + well = session.merge(water_well_thing) + record = NMA_Soil_Rock_Results(thing_id=well.id) session.add(record) session.commit() session.refresh(record) assert record.id is not None + assert record.thing_id == well.id assert record.point_id is None assert record.sample_type is None assert record.date_sampled is None From 8230cd8f411b6adaa927105e9095dade2612041f Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Mon, 26 Jan 2026 12:49:44 -0800 Subject: [PATCH 06/22] feat: add FK enforcement to NMA legacy models (Issue #363) db/thing.py: - Add nma_pk_location column for legacy NM_Aquifer LocationID - Add relationship collections: hydraulics_data, radionuclides, associated_data, soil_rock_results - Configure cascade="all, delete-orphan" on all NMA relationships db/nma_legacy.py: - Add @validates("thing_id") to NMA_HydraulicsData, NMA_Stratigraphy, NMA_AssociatedData, NMA_Soil_Rock_Results - Add back_populates to NMA_HydraulicsData, NMA_AssociatedData, NMA_Soil_Rock_Results, NMA_Radionuclides - Change thing_id to NOT NULL on NMA_AssociatedData, NMA_Soil_Rock_Results Co-Authored-By: Claude Opus 4.5 --- db/nma_legacy.py | 55 ++++++++++++++++++++++++++++++++++++++++-------- db/thing.py | 45 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 90 insertions(+), 10 deletions(-) diff --git a/db/nma_legacy.py b/db/nma_legacy.py index 72f39804..3d4f5d48 100644 --- a/db/nma_legacy.py +++ b/db/nma_legacy.py @@ -204,7 +204,16 @@ class NMA_HydraulicsData(Base): "Hydraulic Remarks", String(200) ) - thing: Mapped["Thing"] = relationship("Thing") + thing: Mapped["Thing"] = relationship("Thing", back_populates="hydraulics_data") + + @validates("thing_id") + def validate_thing_id(self, key, value): + """Prevent orphan NMA_HydraulicsData - must have a parent Thing.""" + if value is None: + raise ValueError( + "NMA_HydraulicsData requires a parent Thing (thing_id cannot be None)" + ) + return value class NMA_Stratigraphy(Base): @@ -237,6 +246,15 @@ class NMA_Stratigraphy(Base): thing: Mapped["Thing"] = relationship("Thing", back_populates="stratigraphy_logs") + @validates("thing_id") + def validate_thing_id(self, key, value): + """Prevent orphan NMA_Stratigraphy - must have a parent Thing.""" + if value is None: + raise ValueError( + "NMA_Stratigraphy requires a parent Thing (thing_id cannot be None)" + ) + return value + class NMA_Chemistry_SampleInfo(Base): """ @@ -351,11 +369,20 @@ class NMA_AssociatedData(Base): notes: Mapped[Optional[str]] = mapped_column("Notes", String(255)) formation: Mapped[Optional[str]] = mapped_column("Formation", String(15)) object_id: Mapped[Optional[int]] = mapped_column("OBJECTID", Integer, unique=True) - thing_id: Mapped[Optional[int]] = mapped_column( - Integer, ForeignKey("thing.id", ondelete="CASCADE") + thing_id: Mapped[int] = mapped_column( + Integer, ForeignKey("thing.id", ondelete="CASCADE"), nullable=False ) - thing: Mapped["Thing"] = relationship("Thing") + thing: Mapped["Thing"] = relationship("Thing", back_populates="associated_data") + + @validates("thing_id") + def validate_thing_id(self, key, value): + """Prevent orphan NMA_AssociatedData - must have a parent Thing.""" + if value is None: + raise ValueError( + "NMA_AssociatedData requires a parent Thing (thing_id cannot be None)" + ) + return value class NMA_SurfaceWaterData(Base): @@ -458,11 +485,20 @@ class NMA_Soil_Rock_Results(Base): d13c: Mapped[Optional[float]] = mapped_column("d13C", Float) d18o: Mapped[Optional[float]] = mapped_column("d18O", Float) sampled_by: Mapped[Optional[str]] = mapped_column("Sampled by", String(255)) - thing_id: Mapped[Optional[int]] = mapped_column( - Integer, ForeignKey("thing.id", ondelete="CASCADE") + thing_id: Mapped[int] = mapped_column( + Integer, ForeignKey("thing.id", ondelete="CASCADE"), nullable=False ) - thing: Mapped["Thing"] = relationship("Thing") + thing: Mapped["Thing"] = relationship("Thing", back_populates="soil_rock_results") + + @validates("thing_id") + def validate_thing_id(self, key, value): + """Prevent orphan NMA_Soil_Rock_Results - must have a parent Thing.""" + if value is None: + raise ValueError( + "NMA_Soil_Rock_Results requires a parent Thing (thing_id cannot be None)" + ) + return value class NMA_MinorTraceChemistry(Base): @@ -562,16 +598,17 @@ class NMA_Radionuclides(Base): analyses_agency: Mapped[Optional[str]] = mapped_column("AnalysesAgency", String(50)) wclab_id: Mapped[Optional[str]] = mapped_column("WCLab_ID", String(25)) - thing: Mapped["Thing"] = relationship("Thing") + thing: Mapped["Thing"] = relationship("Thing", back_populates="radionuclides") chemistry_sample_info: Mapped["NMA_Chemistry_SampleInfo"] = relationship( "NMA_Chemistry_SampleInfo", back_populates="radionuclides" ) @validates("thing_id") def validate_thing_id(self, key, value): + """Prevent orphan NMA_Radionuclides - must have a parent Thing.""" if value is None: raise ValueError( - "NMA_Radionuclides requires a Thing (thing_id cannot be None)" + "NMA_Radionuclides requires a parent Thing (thing_id cannot be None)" ) return value diff --git a/db/thing.py b/db/thing.py index 8c3f4d31..71134d49 100644 --- a/db/thing.py +++ b/db/thing.py @@ -47,7 +47,14 @@ from db.thing_geologic_formation_association import ( ThingGeologicFormationAssociation, ) - from db.nma_legacy import NMA_Chemistry_SampleInfo, NMA_Stratigraphy + from db.nma_legacy import ( + NMA_AssociatedData, + NMA_Chemistry_SampleInfo, + NMA_HydraulicsData, + NMA_Radionuclides, + NMA_Soil_Rock_Results, + NMA_Stratigraphy, + ) class Thing( @@ -71,6 +78,10 @@ class Thing( nullable=True, comment="To audit where the data came from in NM_Aquifer if it was transferred over", ) + nma_pk_location: Mapped[str] = mapped_column( + nullable=True, + comment="To audit the original NM_Aquifer LocationID if it was transferred over", + ) # TODO: should `name` be unique? name: Mapped[str] = mapped_column( @@ -319,6 +330,38 @@ class Thing( passive_deletes=True, ) + # One-To-Many: A Thing can have many NMA_HydraulicsData records (legacy NMA data). + hydraulics_data: Mapped[List["NMA_HydraulicsData"]] = relationship( + "NMA_HydraulicsData", + back_populates="thing", + cascade="all, delete-orphan", + passive_deletes=True, + ) + + # One-To-Many: A Thing can have many NMA_Radionuclides records (legacy NMA data). + radionuclides: Mapped[List["NMA_Radionuclides"]] = relationship( + "NMA_Radionuclides", + back_populates="thing", + cascade="all, delete-orphan", + passive_deletes=True, + ) + + # One-To-Many: A Thing can have many NMA_AssociatedData records (legacy NMA data). + associated_data: Mapped[List["NMA_AssociatedData"]] = relationship( + "NMA_AssociatedData", + back_populates="thing", + cascade="all, delete-orphan", + passive_deletes=True, + ) + + # One-To-Many: A Thing can have many NMA_Soil_Rock_Results records (legacy NMA data). + soil_rock_results: Mapped[List["NMA_Soil_Rock_Results"]] = relationship( + "NMA_Soil_Rock_Results", + back_populates="thing", + cascade="all, delete-orphan", + passive_deletes=True, + ) + # --- Association Proxies --- assets: AssociationProxy[list["Asset"]] = association_proxy( "asset_associations", "asset" From 6e297d550ff735c508ee94672a6a61ae23fa6b05 Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Mon, 26 Jan 2026 12:50:05 -0800 Subject: [PATCH 07/22] migrate: add nma_pk_location and enforce thing_id NOT NULL - Add nma_pk_location column to thing table - Delete orphan records from NMA_AssociatedData and NMA_Soil_Rock_Results - Make thing_id NOT NULL on NMA_AssociatedData and NMA_Soil_Rock_Results Co-Authored-By: Claude Opus 4.5 --- ..._enforce_thing_fk_for_nma_legacy_models.py | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 alembic/versions/76e3ae8b99cb_enforce_thing_fk_for_nma_legacy_models.py diff --git a/alembic/versions/76e3ae8b99cb_enforce_thing_fk_for_nma_legacy_models.py b/alembic/versions/76e3ae8b99cb_enforce_thing_fk_for_nma_legacy_models.py new file mode 100644 index 00000000..22b7fb05 --- /dev/null +++ b/alembic/versions/76e3ae8b99cb_enforce_thing_fk_for_nma_legacy_models.py @@ -0,0 +1,87 @@ +"""enforce_thing_fk_for_nma_legacy_models + +Revision ID: 76e3ae8b99cb +Revises: c1d2e3f4a5b6 +Create Date: 2026-01-26 11:56:28.744603 + +Issue: #363 +Feature: features/admin/well_data_relationships.feature + +This migration enforces foreign key relationships between Thing and NMA legacy models: +1. Adds nma_pk_location column to Thing for storing legacy NM_Aquifer LocationID +2. Makes thing_id NOT NULL on NMA_AssociatedData (was nullable) +3. Makes thing_id NOT NULL on NMA_Soil_Rock_Results (was nullable) + +Note: Before running this migration, ensure no orphan records exist in the affected tables. +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = '76e3ae8b99cb' +down_revision: Union[str, Sequence[str], None] = 'c1d2e3f4a5b6' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + """Upgrade schema to enforce Thing FK relationships.""" + # 1. Add nma_pk_location column to thing table + op.add_column( + 'thing', + sa.Column( + 'nma_pk_location', + sa.String(), + nullable=True, + comment='To audit the original NM_Aquifer LocationID if it was transferred over' + ) + ) + + # 2. Make thing_id NOT NULL on NMA_AssociatedData + # First, delete any orphan records (records without a thing_id) + op.execute( + 'DELETE FROM "NMA_AssociatedData" WHERE thing_id IS NULL' + ) + op.alter_column( + 'NMA_AssociatedData', + 'thing_id', + existing_type=sa.Integer(), + nullable=False + ) + + # 3. Make thing_id NOT NULL on NMA_Soil_Rock_Results + # First, delete any orphan records (records without a thing_id) + op.execute( + 'DELETE FROM "NMA_Soil_Rock_Results" WHERE thing_id IS NULL' + ) + op.alter_column( + 'NMA_Soil_Rock_Results', + 'thing_id', + existing_type=sa.Integer(), + nullable=False + ) + + +def downgrade() -> None: + """Downgrade schema to allow nullable thing_id.""" + # 1. Remove nma_pk_location column from thing table + op.drop_column('thing', 'nma_pk_location') + + # 2. Make thing_id nullable on NMA_AssociatedData + op.alter_column( + 'NMA_AssociatedData', + 'thing_id', + existing_type=sa.Integer(), + nullable=True + ) + + # 3. Make thing_id nullable on NMA_Soil_Rock_Results + op.alter_column( + 'NMA_Soil_Rock_Results', + 'thing_id', + existing_type=sa.Integer(), + nullable=True + ) From d9c151c721e27215b64c54b1e8cf90dde9529810 Mon Sep 17 00:00:00 2001 From: kbighorse Date: Mon, 26 Jan 2026 20:56:26 +0000 Subject: [PATCH 08/22] Formatting changes --- ..._enforce_thing_fk_for_nma_legacy_models.py | 44 ++++++------------- .../test_well_data_relationships.py | 5 ++- 2 files changed, 17 insertions(+), 32 deletions(-) diff --git a/alembic/versions/76e3ae8b99cb_enforce_thing_fk_for_nma_legacy_models.py b/alembic/versions/76e3ae8b99cb_enforce_thing_fk_for_nma_legacy_models.py index 22b7fb05..ecaf8dd5 100644 --- a/alembic/versions/76e3ae8b99cb_enforce_thing_fk_for_nma_legacy_models.py +++ b/alembic/versions/76e3ae8b99cb_enforce_thing_fk_for_nma_legacy_models.py @@ -14,15 +14,15 @@ Note: Before running this migration, ensure no orphan records exist in the affected tables. """ + from typing import Sequence, Union from alembic import op import sqlalchemy as sa - # revision identifiers, used by Alembic. -revision: str = '76e3ae8b99cb' -down_revision: Union[str, Sequence[str], None] = 'c1d2e3f4a5b6' +revision: str = "76e3ae8b99cb" +down_revision: Union[str, Sequence[str], None] = "c1d2e3f4a5b6" branch_labels: Union[str, Sequence[str], None] = None depends_on: Union[str, Sequence[str], None] = None @@ -31,57 +31,41 @@ def upgrade() -> None: """Upgrade schema to enforce Thing FK relationships.""" # 1. Add nma_pk_location column to thing table op.add_column( - 'thing', + "thing", sa.Column( - 'nma_pk_location', + "nma_pk_location", sa.String(), nullable=True, - comment='To audit the original NM_Aquifer LocationID if it was transferred over' - ) + comment="To audit the original NM_Aquifer LocationID if it was transferred over", + ), ) # 2. Make thing_id NOT NULL on NMA_AssociatedData # First, delete any orphan records (records without a thing_id) - op.execute( - 'DELETE FROM "NMA_AssociatedData" WHERE thing_id IS NULL' - ) + op.execute('DELETE FROM "NMA_AssociatedData" WHERE thing_id IS NULL') op.alter_column( - 'NMA_AssociatedData', - 'thing_id', - existing_type=sa.Integer(), - nullable=False + "NMA_AssociatedData", "thing_id", existing_type=sa.Integer(), nullable=False ) # 3. Make thing_id NOT NULL on NMA_Soil_Rock_Results # First, delete any orphan records (records without a thing_id) - op.execute( - 'DELETE FROM "NMA_Soil_Rock_Results" WHERE thing_id IS NULL' - ) + op.execute('DELETE FROM "NMA_Soil_Rock_Results" WHERE thing_id IS NULL') op.alter_column( - 'NMA_Soil_Rock_Results', - 'thing_id', - existing_type=sa.Integer(), - nullable=False + "NMA_Soil_Rock_Results", "thing_id", existing_type=sa.Integer(), nullable=False ) def downgrade() -> None: """Downgrade schema to allow nullable thing_id.""" # 1. Remove nma_pk_location column from thing table - op.drop_column('thing', 'nma_pk_location') + op.drop_column("thing", "nma_pk_location") # 2. Make thing_id nullable on NMA_AssociatedData op.alter_column( - 'NMA_AssociatedData', - 'thing_id', - existing_type=sa.Integer(), - nullable=True + "NMA_AssociatedData", "thing_id", existing_type=sa.Integer(), nullable=True ) # 3. Make thing_id nullable on NMA_Soil_Rock_Results op.alter_column( - 'NMA_Soil_Rock_Results', - 'thing_id', - existing_type=sa.Integer(), - nullable=True + "NMA_Soil_Rock_Results", "thing_id", existing_type=sa.Integer(), nullable=True ) diff --git a/tests/integration/test_well_data_relationships.py b/tests/integration/test_well_data_relationships.py index e0f68a98..20d21f8b 100644 --- a/tests/integration/test_well_data_relationships.py +++ b/tests/integration/test_well_data_relationships.py @@ -41,7 +41,6 @@ ) from db.thing import Thing - # ============================================================================= # Fixtures # ============================================================================= @@ -293,7 +292,9 @@ def test_well_navigates_to_chemistry_samples(self, well_for_relationships): # Navigate through relationship assert hasattr(well, "chemistry_sample_infos") assert len(well.chemistry_sample_infos) >= 1 - assert any(s.sample_point_id == "NAV-CHEM-01" for s in well.chemistry_sample_infos) + assert any( + s.sample_point_id == "NAV-CHEM-01" for s in well.chemistry_sample_infos + ) def test_well_navigates_to_hydraulics_data(self, well_for_relationships): """Well can navigate to its hydraulic test data.""" From 81605bae91fb40cc1ffc2f61788a0ccceca6bbdd Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Mon, 26 Jan 2026 15:36:05 -0800 Subject: [PATCH 09/22] fix: add nma_pk_location to thing_version table SQLAlchemy-continuum creates a thing_version table that mirrors the thing table structure. The migration must add the new column to both tables for versioning to work correctly. Co-Authored-By: Claude Opus 4.5 --- ...8b99cb_enforce_thing_fk_for_nma_legacy_models.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/alembic/versions/76e3ae8b99cb_enforce_thing_fk_for_nma_legacy_models.py b/alembic/versions/76e3ae8b99cb_enforce_thing_fk_for_nma_legacy_models.py index ecaf8dd5..33784c7e 100644 --- a/alembic/versions/76e3ae8b99cb_enforce_thing_fk_for_nma_legacy_models.py +++ b/alembic/versions/76e3ae8b99cb_enforce_thing_fk_for_nma_legacy_models.py @@ -29,7 +29,7 @@ def upgrade() -> None: """Upgrade schema to enforce Thing FK relationships.""" - # 1. Add nma_pk_location column to thing table + # 1. Add nma_pk_location column to thing table and its version table op.add_column( "thing", sa.Column( @@ -39,6 +39,14 @@ def upgrade() -> None: comment="To audit the original NM_Aquifer LocationID if it was transferred over", ), ) + op.add_column( + "thing_version", + sa.Column( + "nma_pk_location", + sa.String(), + nullable=True, + ), + ) # 2. Make thing_id NOT NULL on NMA_AssociatedData # First, delete any orphan records (records without a thing_id) @@ -57,8 +65,9 @@ def upgrade() -> None: def downgrade() -> None: """Downgrade schema to allow nullable thing_id.""" - # 1. Remove nma_pk_location column from thing table + # 1. Remove nma_pk_location column from thing table and its version table op.drop_column("thing", "nma_pk_location") + op.drop_column("thing_version", "nma_pk_location") # 2. Make thing_id nullable on NMA_AssociatedData op.alter_column( From 9dec60859faf93dc3b31555bf8d4ed073cbd9813 Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Mon, 26 Jan 2026 15:41:46 -0800 Subject: [PATCH 10/22] fix: address PR review comments - Fix import names in BDD step file (use NMA_ prefix) - Fix radionuclide tests to create chemistry sample first (satisfies sample_pt_id FK constraint) Co-Authored-By: Claude Opus 4.5 --- .../features/steps/well-data-relationships.py | 92 +++++++++---------- .../test_well_data_relationships.py | 23 ++++- 2 files changed, 66 insertions(+), 49 deletions(-) diff --git a/tests/features/steps/well-data-relationships.py b/tests/features/steps/well-data-relationships.py index 19fb46f4..89933b1c 100644 --- a/tests/features/steps/well-data-relationships.py +++ b/tests/features/steps/well-data-relationships.py @@ -28,12 +28,12 @@ from db import Thing from db.engine import session_ctx from db.nma_legacy import ( - ChemistrySampleInfo, - NMAHydraulicsData, - Stratigraphy, - NMARadionuclides, - AssociatedData, - SoilRockResults, + NMA_Chemistry_SampleInfo, + NMA_HydraulicsData, + NMA_NMA_Stratigraphy, + NMA_Radionuclides, + NMA_NMA_AssociatedData, + NMA_Soil_Rock_Results, ) @@ -127,7 +127,7 @@ def step_when_save_chemistry(context: Context): try: with session_ctx() as session: - chemistry = ChemistrySampleInfo( + chemistry = NMA_Chemistry_SampleInfo( sample_pt_id=uuid.uuid4(), sample_point_id="TEST001", thing_id=None, # No parent well @@ -153,8 +153,8 @@ def step_then_no_orphan_chemistry(context: Context): """Verify no orphan chemistry records exist.""" with session_ctx() as session: orphan_count = ( - session.query(ChemistrySampleInfo) - .filter(ChemistrySampleInfo.thing_id.is_(None)) + session.query(NMA_Chemistry_SampleInfo) + .filter(NMA_Chemistry_SampleInfo.thing_id.is_(None)) .count() ) assert orphan_count == 0, f"Found {orphan_count} orphan chemistry records" @@ -173,7 +173,7 @@ def step_when_save_hydraulics(context: Context): try: with session_ctx() as session: - hydraulics = NMAHydraulicsData( + hydraulics = NMA_HydraulicsData( global_id=uuid.uuid4(), point_id="TEST001", thing_id=None, # No parent well @@ -193,15 +193,15 @@ def step_then_no_orphan_hydraulics(context: Context): """Verify no orphan hydraulic records exist.""" with session_ctx() as session: orphan_count = ( - session.query(NMAHydraulicsData) - .filter(NMAHydraulicsData.thing_id.is_(None)) + session.query(NMA_HydraulicsData) + .filter(NMA_HydraulicsData.thing_id.is_(None)) .count() ) assert orphan_count == 0, f"Found {orphan_count} orphan hydraulic records" # ============================================================================ -# Stratigraphy (Lithology) +# NMA_Stratigraphy (Lithology) # ============================================================================ @@ -213,7 +213,7 @@ def step_when_save_lithology(context: Context): try: with session_ctx() as session: - stratigraphy = Stratigraphy( + stratigraphy = NMA_Stratigraphy( global_id=uuid.uuid4(), point_id="TEST001", thing_id=None, # No parent well @@ -233,7 +233,7 @@ def step_then_no_orphan_lithology(context: Context): """Verify no orphan lithology records exist.""" with session_ctx() as session: orphan_count = ( - session.query(Stratigraphy).filter(Stratigraphy.thing_id.is_(None)).count() + session.query(NMA_Stratigraphy).filter(NMA_Stratigraphy.thing_id.is_(None)).count() ) assert orphan_count == 0, f"Found {orphan_count} orphan lithology records" @@ -252,7 +252,7 @@ def step_when_save_radionuclides(context: Context): try: with session_ctx() as session: # First create a chemistry sample info for the radionuclide - chemistry_sample = ChemistrySampleInfo( + chemistry_sample = NMA_Chemistry_SampleInfo( sample_pt_id=uuid.uuid4(), sample_point_id="TEST001", thing_id=context.test_well_id, @@ -261,7 +261,7 @@ def step_when_save_radionuclides(context: Context): session.add(chemistry_sample) session.flush() - radionuclide = NMARadionuclides( + radionuclide = NMA_Radionuclides( global_id=uuid.uuid4(), thing_id=None, # No parent well sample_pt_id=chemistry_sample.sample_pt_id, @@ -280,8 +280,8 @@ def step_then_no_orphan_radionuclides(context: Context): """Verify no orphan radionuclide records exist.""" with session_ctx() as session: orphan_count = ( - session.query(NMARadionuclides) - .filter(NMARadionuclides.thing_id.is_(None)) + session.query(NMA_Radionuclides) + .filter(NMA_Radionuclides.thing_id.is_(None)) .count() ) assert orphan_count == 0, f"Found {orphan_count} orphan radionuclide records" @@ -300,7 +300,7 @@ def step_when_save_associated_data(context: Context): try: with session_ctx() as session: - associated_data = AssociatedData( + associated_data = NMA_AssociatedData( assoc_id=uuid.uuid4(), point_id="TEST001", thing_id=None, # No parent well @@ -319,8 +319,8 @@ def step_then_no_orphan_associated_data(context: Context): """Verify no orphan associated data records exist.""" with session_ctx() as session: orphan_count = ( - session.query(AssociatedData) - .filter(AssociatedData.thing_id.is_(None)) + session.query(NMA_AssociatedData) + .filter(NMA_AssociatedData.thing_id.is_(None)) .count() ) assert orphan_count == 0, f"Found {orphan_count} orphan associated data records" @@ -339,7 +339,7 @@ def step_when_save_soil_rock(context: Context): try: with session_ctx() as session: - soil_rock = SoilRockResults( + soil_rock = NMA_Soil_Rock_Results( point_id="TEST001", thing_id=None, # No parent well sample_type="Soil", @@ -358,8 +358,8 @@ def step_then_no_orphan_soil_rock(context: Context): """Verify no orphan soil/rock records exist.""" with session_ctx() as session: orphan_count = ( - session.query(SoilRockResults) - .filter(SoilRockResults.thing_id.is_(None)) + session.query(NMA_Soil_Rock_Results) + .filter(NMA_Soil_Rock_Results.thing_id.is_(None)) .count() ) assert orphan_count == 0, f"Found {orphan_count} orphan soil/rock records" @@ -419,13 +419,13 @@ def step_given_well_has_chemistry(context: Context): step_given_well_exists(context) with session_ctx() as session: - chemistry1 = ChemistrySampleInfo( + chemistry1 = NMA_Chemistry_SampleInfo( sample_pt_id=uuid.uuid4(), sample_point_id="TEST001", thing_id=context.test_well_id, collection_date=datetime.now(), ) - chemistry2 = ChemistrySampleInfo( + chemistry2 = NMA_Chemistry_SampleInfo( sample_pt_id=uuid.uuid4(), sample_point_id="TEST002", thing_id=context.test_well_id, @@ -443,7 +443,7 @@ def step_given_well_has_hydraulics(context: Context): step_given_well_exists(context) with session_ctx() as session: - hydraulics = NMAHydraulicsData( + hydraulics = NMA_HydraulicsData( global_id=uuid.uuid4(), point_id="TEST001", thing_id=context.test_well_id, @@ -462,14 +462,14 @@ def step_given_well_has_lithology(context: Context): step_given_well_exists(context) with session_ctx() as session: - lithology1 = Stratigraphy( + lithology1 = NMA_Stratigraphy( global_id=uuid.uuid4(), point_id="TEST001", thing_id=context.test_well_id, strat_top=0.0, strat_bottom=100.0, ) - lithology2 = Stratigraphy( + lithology2 = NMA_Stratigraphy( global_id=uuid.uuid4(), point_id="TEST001", thing_id=context.test_well_id, @@ -488,7 +488,7 @@ def step_given_well_has_radionuclides(context: Context): step_given_well_exists(context) with session_ctx() as session: - chemistry_sample = ChemistrySampleInfo( + chemistry_sample = NMA_Chemistry_SampleInfo( sample_pt_id=uuid.uuid4(), sample_point_id="TEST001", thing_id=context.test_well_id, @@ -497,7 +497,7 @@ def step_given_well_has_radionuclides(context: Context): session.add(chemistry_sample) session.flush() - radionuclide = NMARadionuclides( + radionuclide = NMA_Radionuclides( global_id=uuid.uuid4(), thing_id=context.test_well_id, sample_pt_id=chemistry_sample.sample_pt_id, @@ -515,7 +515,7 @@ def step_given_well_has_associated_data(context: Context): step_given_well_exists(context) with session_ctx() as session: - associated_data = AssociatedData( + associated_data = NMA_AssociatedData( assoc_id=uuid.uuid4(), point_id="TEST001", thing_id=context.test_well_id, @@ -533,7 +533,7 @@ def step_given_well_has_soil_rock(context: Context): step_given_well_exists(context) with session_ctx() as session: - soil_rock = SoilRockResults( + soil_rock = NMA_Soil_Rock_Results( point_id="TEST001", thing_id=context.test_well_id, sample_type="Soil", @@ -560,8 +560,8 @@ def step_then_chemistry_deleted(context: Context): """Verify chemistry samples are cascade deleted.""" with session_ctx() as session: remaining = ( - session.query(ChemistrySampleInfo) - .filter(ChemistrySampleInfo.thing_id == context.test_well_id) + session.query(NMA_Chemistry_SampleInfo) + .filter(NMA_Chemistry_SampleInfo.thing_id == context.test_well_id) .count() ) assert remaining == 0, f"Expected 0 chemistry samples, found {remaining}" @@ -572,8 +572,8 @@ def step_then_hydraulics_deleted(context: Context): """Verify hydraulic data is cascade deleted.""" with session_ctx() as session: remaining = ( - session.query(NMAHydraulicsData) - .filter(NMAHydraulicsData.thing_id == context.test_well_id) + session.query(NMA_HydraulicsData) + .filter(NMA_HydraulicsData.thing_id == context.test_well_id) .count() ) assert remaining == 0, f"Expected 0 hydraulic records, found {remaining}" @@ -584,8 +584,8 @@ def step_then_lithology_deleted(context: Context): """Verify lithology logs are cascade deleted.""" with session_ctx() as session: remaining = ( - session.query(Stratigraphy) - .filter(Stratigraphy.thing_id == context.test_well_id) + session.query(NMA_Stratigraphy) + .filter(NMA_Stratigraphy.thing_id == context.test_well_id) .count() ) assert remaining == 0, f"Expected 0 lithology logs, found {remaining}" @@ -596,8 +596,8 @@ def step_then_radionuclides_deleted(context: Context): """Verify radionuclide results are cascade deleted.""" with session_ctx() as session: remaining = ( - session.query(NMARadionuclides) - .filter(NMARadionuclides.thing_id == context.test_well_id) + session.query(NMA_Radionuclides) + .filter(NMA_Radionuclides.thing_id == context.test_well_id) .count() ) assert remaining == 0, f"Expected 0 radionuclide records, found {remaining}" @@ -608,8 +608,8 @@ def step_then_associated_data_deleted(context: Context): """Verify associated data is cascade deleted.""" with session_ctx() as session: remaining = ( - session.query(AssociatedData) - .filter(AssociatedData.thing_id == context.test_well_id) + session.query(NMA_AssociatedData) + .filter(NMA_AssociatedData.thing_id == context.test_well_id) .count() ) assert remaining == 0, f"Expected 0 associated data records, found {remaining}" @@ -620,8 +620,8 @@ def step_then_soil_rock_deleted(context: Context): """Verify soil/rock results are cascade deleted.""" with session_ctx() as session: remaining = ( - session.query(SoilRockResults) - .filter(SoilRockResults.thing_id == context.test_well_id) + session.query(NMA_Soil_Rock_Results) + .filter(NMA_Soil_Rock_Results.thing_id == context.test_well_id) .count() ) assert remaining == 0, f"Expected 0 soil/rock records, found {remaining}" diff --git a/tests/integration/test_well_data_relationships.py b/tests/integration/test_well_data_relationships.py index 20d21f8b..b4c6dcfb 100644 --- a/tests/integration/test_well_data_relationships.py +++ b/tests/integration/test_well_data_relationships.py @@ -342,11 +342,19 @@ def test_well_navigates_to_radionuclides(self, well_for_relationships): with session_ctx() as session: well = session.merge(well_for_relationships) - # Create radionuclide record for this well - radio = NMA_Radionuclides( + # Create a chemistry sample for this well to satisfy the FK + chem_sample = NMA_Chemistry_SampleInfo( sample_pt_id=uuid.uuid4(), thing_id=well.id, ) + session.add(chem_sample) + session.flush() + + # Create radionuclide record for this well using the same sample_pt_id + radio = NMA_Radionuclides( + sample_pt_id=chem_sample.sample_pt_id, + thing_id=well.id, + ) session.add(radio) session.commit() session.refresh(well) @@ -517,10 +525,19 @@ def test_deleting_well_cascades_to_radionuclides(self): session.add(well) session.commit() - radio = NMA_Radionuclides( + # Create a chemistry sample for this well to satisfy the FK + chem_sample = NMA_Chemistry_SampleInfo( sample_pt_id=uuid.uuid4(), thing_id=well.id, ) + session.add(chem_sample) + session.flush() + + # Create radionuclide record using the chemistry sample's sample_pt_id + radio = NMA_Radionuclides( + sample_pt_id=chem_sample.sample_pt_id, + thing_id=well.id, + ) session.add(radio) session.commit() radio_id = radio.id From ea6926c5eb09e3042bdf2819274b70f59a9f30ae Mon Sep 17 00:00:00 2001 From: kbighorse Date: Mon, 26 Jan 2026 23:41:23 +0000 Subject: [PATCH 11/22] Formatting changes --- tests/features/steps/well-data-relationships.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/features/steps/well-data-relationships.py b/tests/features/steps/well-data-relationships.py index 89933b1c..7685728d 100644 --- a/tests/features/steps/well-data-relationships.py +++ b/tests/features/steps/well-data-relationships.py @@ -233,7 +233,9 @@ def step_then_no_orphan_lithology(context: Context): """Verify no orphan lithology records exist.""" with session_ctx() as session: orphan_count = ( - session.query(NMA_Stratigraphy).filter(NMA_Stratigraphy.thing_id.is_(None)).count() + session.query(NMA_Stratigraphy) + .filter(NMA_Stratigraphy.thing_id.is_(None)) + .count() ) assert orphan_count == 0, f"Found {orphan_count} orphan lithology records" From cf5dbd92f17c8dca33eb1eaf105e1551f3991c01 Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Mon, 26 Jan 2026 15:45:12 -0800 Subject: [PATCH 12/22] fix: correct duplicate NMA_ prefix in BDD step imports Co-Authored-By: Claude Opus 4.5 --- tests/features/steps/well-data-relationships.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/features/steps/well-data-relationships.py b/tests/features/steps/well-data-relationships.py index 7685728d..83678809 100644 --- a/tests/features/steps/well-data-relationships.py +++ b/tests/features/steps/well-data-relationships.py @@ -30,9 +30,9 @@ from db.nma_legacy import ( NMA_Chemistry_SampleInfo, NMA_HydraulicsData, - NMA_NMA_Stratigraphy, + NMA_Stratigraphy, NMA_Radionuclides, - NMA_NMA_AssociatedData, + NMA_AssociatedData, NMA_Soil_Rock_Results, ) From 464a6cf72b23143a0a64f3390062f307a8be0111 Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Tue, 27 Jan 2026 17:26:27 -0800 Subject: [PATCH 13/22] fix(tests): restore test database configuration Restore POSTGRES_DB and POSTGRES_PORT settings that were accidentally removed in commit 62ecda1a during the NMA_ prefix refactoring. Without these settings, tests would connect to ocotilloapi_dev instead of ocotilloapi_test because load_dotenv(override=True) would overwrite the POSTGRES_DB set by pytest_configure(). Co-Authored-By: Claude Opus 4.5 --- tests/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/__init__.py b/tests/__init__.py index 32b5d145..e351586a 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== +import os from functools import lru_cache from dotenv import load_dotenv @@ -21,6 +22,10 @@ # Use override=True to override conflicting shell environment variables load_dotenv(override=True) +# for safety dont test on the production database port +os.environ["POSTGRES_PORT"] = "5432" +# Always use test database, never dev +os.environ["POSTGRES_DB"] = "ocotilloapi_test" from fastapi.testclient import TestClient from fastapi_pagination import add_pagination From 8e84e35823844b453f4773b8fad12a3e1ee53df0 Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Tue, 27 Jan 2026 17:59:26 -0800 Subject: [PATCH 14/22] fix(tests): add required fields and fix cascade delete tests - Add test_top/test_bottom to NMA_HydraulicsData test fixtures - Add global_id to NMA_Radionuclides test fixtures - Add session.expire_all() before cascade delete assertions to clear SQLAlchemy's identity map cache (passive_deletes relies on DB cascade) - Fix point_id values to respect max 10 char constraint Co-Authored-By: Claude Opus 4.5 --- .../test_well_data_relationships.py | 90 ++++++++++++------- tests/test_associated_data_legacy.py | 2 +- tests/test_stratigraphy_legacy.py | 2 +- 3 files changed, 59 insertions(+), 35 deletions(-) diff --git a/tests/integration/test_well_data_relationships.py b/tests/integration/test_well_data_relationships.py index b4c6dcfb..549e7081 100644 --- a/tests/integration/test_well_data_relationships.py +++ b/tests/integration/test_well_data_relationships.py @@ -26,7 +26,6 @@ """ import uuid -from datetime import datetime import pytest @@ -197,10 +196,8 @@ def test_hydraulics_data_requires_well(self): with session_ctx() as session: with pytest.raises(ValueError, match="requires a parent Thing"): record = NMA_HydraulicsData( - point_id="ORPHAN-HYD", - date_measured=datetime.now(), - test_bottom=100, - test_top=50, + global_id=uuid.uuid4(), + point_id="ORPHANHYD", thing_id=None, # This should raise ValueError ) session.add(record) @@ -214,7 +211,8 @@ def test_stratigraphy_requires_well(self): with session_ctx() as session: with pytest.raises(ValueError, match="requires a parent Thing"): record = NMA_Stratigraphy( - point_id="ORPHAN-STRAT", + global_id=uuid.uuid4(), + point_id="ORPHSTRAT", thing_id=None, # This should raise ValueError ) session.add(record) @@ -282,7 +280,7 @@ def test_well_navigates_to_chemistry_samples(self, well_for_relationships): # Create a chemistry sample for this well sample = NMA_Chemistry_SampleInfo( sample_pt_id=uuid.uuid4(), - sample_point_id="NAV-CHEM-01", + sample_point_id="NAVCHEM01", # Max 10 chars thing_id=well.id, ) session.add(sample) @@ -293,7 +291,7 @@ def test_well_navigates_to_chemistry_samples(self, well_for_relationships): assert hasattr(well, "chemistry_sample_infos") assert len(well.chemistry_sample_infos) >= 1 assert any( - s.sample_point_id == "NAV-CHEM-01" for s in well.chemistry_sample_infos + s.sample_point_id == "NAVCHEM01" for s in well.chemistry_sample_infos ) def test_well_navigates_to_hydraulics_data(self, well_for_relationships): @@ -303,11 +301,11 @@ def test_well_navigates_to_hydraulics_data(self, well_for_relationships): # Create hydraulics data for this well hydraulics = NMA_HydraulicsData( - point_id="NAV-HYD-01", - date_measured=datetime.now(), - test_bottom=100, - test_top=50, + global_id=uuid.uuid4(), + point_id="NAVHYD01", # Max 10 chars thing_id=well.id, + test_top=0, + test_bottom=100, ) session.add(hydraulics) session.commit() @@ -316,7 +314,7 @@ def test_well_navigates_to_hydraulics_data(self, well_for_relationships): # Navigate through relationship assert hasattr(well, "hydraulics_data") assert len(well.hydraulics_data) >= 1 - assert any(h.point_id == "NAV-HYD-01" for h in well.hydraulics_data) + assert any(h.point_id == "NAVHYD01" for h in well.hydraulics_data) def test_well_navigates_to_stratigraphy_logs(self, well_for_relationships): """Well can navigate to its lithology logs.""" @@ -325,7 +323,8 @@ def test_well_navigates_to_stratigraphy_logs(self, well_for_relationships): # Create stratigraphy log for this well strat = NMA_Stratigraphy( - point_id="NAV-STRAT-01", + global_id=uuid.uuid4(), + point_id="NAVSTRAT1", # Max 10 chars thing_id=well.id, ) session.add(strat) @@ -335,7 +334,7 @@ def test_well_navigates_to_stratigraphy_logs(self, well_for_relationships): # Navigate through relationship assert hasattr(well, "stratigraphy_logs") assert len(well.stratigraphy_logs) >= 1 - assert any(s.point_id == "NAV-STRAT-01" for s in well.stratigraphy_logs) + assert any(s.point_id == "NAVSTRAT1" for s in well.stratigraphy_logs) def test_well_navigates_to_radionuclides(self, well_for_relationships): """Well can navigate to its radionuclide results.""" @@ -345,6 +344,7 @@ def test_well_navigates_to_radionuclides(self, well_for_relationships): # Create a chemistry sample for this well to satisfy the FK chem_sample = NMA_Chemistry_SampleInfo( sample_pt_id=uuid.uuid4(), + sample_point_id="NAVRAD01", # Required, max 10 chars thing_id=well.id, ) session.add(chem_sample) @@ -352,6 +352,7 @@ def test_well_navigates_to_radionuclides(self, well_for_relationships): # Create radionuclide record for this well using the same sample_pt_id radio = NMA_Radionuclides( + global_id=uuid.uuid4(), sample_pt_id=chem_sample.sample_pt_id, thing_id=well.id, ) @@ -370,7 +371,8 @@ def test_well_navigates_to_associated_data(self, well_for_relationships): # Create associated data for this well assoc = NMA_AssociatedData( - point_id="NAV-ASSOC-01", + assoc_id=uuid.uuid4(), + point_id="NAVASSOC1", # Max 10 chars thing_id=well.id, ) session.add(assoc) @@ -380,7 +382,7 @@ def test_well_navigates_to_associated_data(self, well_for_relationships): # Navigate through relationship assert hasattr(well, "associated_data") assert len(well.associated_data) >= 1 - assert any(a.point_id == "NAV-ASSOC-01" for a in well.associated_data) + assert any(a.point_id == "NAVASSOC1" for a in well.associated_data) def test_well_navigates_to_soil_rock_results(self, well_for_relationships): """Well can navigate to its soil/rock results.""" @@ -430,17 +432,20 @@ def test_deleting_well_cascades_to_chemistry_samples(self): sample = NMA_Chemistry_SampleInfo( sample_pt_id=uuid.uuid4(), - sample_point_id="CASCADE-CHEM-01", + sample_point_id="CASCCHEM1", # Max 10 chars thing_id=well.id, ) session.add(sample) session.commit() - sample_id = sample.id + sample_id = sample.sample_pt_id # PK is sample_pt_id # Delete the well session.delete(well) session.commit() + # Clear session cache to ensure fresh DB query + session.expire_all() + # Verify chemistry sample was also deleted orphan = session.get(NMA_Chemistry_SampleInfo, sample_id) assert orphan is None, "Chemistry sample should be deleted with well" @@ -460,23 +465,26 @@ def test_deleting_well_cascades_to_hydraulics_data(self): session.add(well) session.commit() + hyd_global_id = uuid.uuid4() hydraulics = NMA_HydraulicsData( - point_id="CASCADE-HYD-01", - date_measured=datetime.now(), - test_bottom=100, - test_top=50, + global_id=hyd_global_id, + point_id="CASCHYD01", # Max 10 chars thing_id=well.id, + test_top=0, + test_bottom=100, ) session.add(hydraulics) session.commit() - hyd_id = hydraulics.id # Delete the well session.delete(well) session.commit() + # Clear session cache to ensure fresh DB query + session.expire_all() + # Verify hydraulics data was also deleted - orphan = session.get(NMA_HydraulicsData, hyd_id) + orphan = session.get(NMA_HydraulicsData, hyd_global_id) assert orphan is None, "Hydraulics data should be deleted with well" def test_deleting_well_cascades_to_stratigraphy_logs(self): @@ -494,20 +502,24 @@ def test_deleting_well_cascades_to_stratigraphy_logs(self): session.add(well) session.commit() + strat_global_id = uuid.uuid4() strat = NMA_Stratigraphy( - point_id="CASCADE-STRAT-01", + global_id=strat_global_id, + point_id="CASCSTRAT", # Max 10 chars thing_id=well.id, ) session.add(strat) session.commit() - strat_id = strat.id # Delete the well session.delete(well) session.commit() + # Clear session cache to ensure fresh DB query + session.expire_all() + # Verify stratigraphy was also deleted - orphan = session.get(NMA_Stratigraphy, strat_id) + orphan = session.get(NMA_Stratigraphy, strat_global_id) assert orphan is None, "Stratigraphy log should be deleted with well" def test_deleting_well_cascades_to_radionuclides(self): @@ -528,6 +540,7 @@ def test_deleting_well_cascades_to_radionuclides(self): # Create a chemistry sample for this well to satisfy the FK chem_sample = NMA_Chemistry_SampleInfo( sample_pt_id=uuid.uuid4(), + sample_point_id="CASCRAD01", # Required, max 10 chars thing_id=well.id, ) session.add(chem_sample) @@ -535,17 +548,21 @@ def test_deleting_well_cascades_to_radionuclides(self): # Create radionuclide record using the chemistry sample's sample_pt_id radio = NMA_Radionuclides( + global_id=uuid.uuid4(), sample_pt_id=chem_sample.sample_pt_id, thing_id=well.id, ) session.add(radio) session.commit() - radio_id = radio.id + radio_id = radio.global_id # PK is global_id # Delete the well session.delete(well) session.commit() + # Clear session cache to ensure fresh DB query + session.expire_all() + # Verify radionuclide record was also deleted orphan = session.get(NMA_Radionuclides, radio_id) assert orphan is None, "Radionuclide record should be deleted with well" @@ -565,20 +582,24 @@ def test_deleting_well_cascades_to_associated_data(self): session.add(well) session.commit() + assoc_uuid = uuid.uuid4() assoc = NMA_AssociatedData( - point_id="CASCADE-ASSOC-01", + assoc_id=assoc_uuid, + point_id="CASCASSOC", # Max 10 chars thing_id=well.id, ) session.add(assoc) session.commit() - assoc_id = assoc.id # Delete the well session.delete(well) session.commit() + # Clear session cache to ensure fresh DB query + session.expire_all() + # Verify associated data was also deleted - orphan = session.get(NMA_AssociatedData, assoc_id) + orphan = session.get(NMA_AssociatedData, assoc_uuid) assert orphan is None, "Associated data should be deleted with well" def test_deleting_well_cascades_to_soil_rock_results(self): @@ -597,7 +618,7 @@ def test_deleting_well_cascades_to_soil_rock_results(self): session.commit() soil = NMA_Soil_Rock_Results( - point_id="CASCADE-SOIL-01", + point_id="CASCSOIL1", thing_id=well.id, ) session.add(soil) @@ -608,6 +629,9 @@ def test_deleting_well_cascades_to_soil_rock_results(self): session.delete(well) session.commit() + # Clear session cache to ensure fresh DB query + session.expire_all() + # Verify soil/rock results were also deleted orphan = session.get(NMA_Soil_Rock_Results, soil_id) assert orphan is None, "Soil/rock results should be deleted with well" diff --git a/tests/test_associated_data_legacy.py b/tests/test_associated_data_legacy.py index 83359052..4b32615a 100644 --- a/tests/test_associated_data_legacy.py +++ b/tests/test_associated_data_legacy.py @@ -115,7 +115,7 @@ def test_associated_data_back_populates_thing(water_well_thing): well = session.merge(water_well_thing) record = NMA_AssociatedData( assoc_id=uuid4(), - point_id="BP-ASSOC-01", + point_id="BPASSOC01", # Max 10 chars thing_id=well.id, ) session.add(record) diff --git a/tests/test_stratigraphy_legacy.py b/tests/test_stratigraphy_legacy.py index ee99915e..54faf8e5 100644 --- a/tests/test_stratigraphy_legacy.py +++ b/tests/test_stratigraphy_legacy.py @@ -90,7 +90,7 @@ def test_stratigraphy_back_populates_thing(water_well_thing): well = session.merge(water_well_thing) record = NMA_Stratigraphy( global_id=_next_global_id(), - point_id="BP-STRAT-01", + point_id="BPSTRAT01", # Max 10 chars thing_id=well.id, ) session.add(record) From f24ad391b4828036978d246af3c810d05bfd6f30 Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Wed, 28 Jan 2026 01:26:39 -0800 Subject: [PATCH 15/22] refactor(models): migrate NMA tables to Integer PKs with nma_ prefix Update all NMA legacy models to use Integer autoincrement primary keys instead of UUID PKs. Legacy columns are renamed with nma_ prefix for audit/traceability. Changes per table: - NMA_HydraulicsData: id (Integer PK), nma_global_id, nma_well_id, nma_point_id, nma_object_id - NMA_Stratigraphy: id (Integer PK), nma_global_id, nma_well_id, nma_point_id, nma_object_id - NMA_Chemistry_SampleInfo: id (Integer PK), nma_sample_pt_id, nma_sample_point_id, nma_wclab_id, nma_location_id, nma_object_id - NMA_AssociatedData: id (Integer PK), nma_assoc_id, nma_location_id, nma_point_id, nma_object_id - NMA_Radionuclides: id (Integer PK), nma_global_id, chemistry_sample_info_id (Integer FK), nma_sample_pt_id, nma_sample_point_id, nma_object_id, nma_wclab_id - NMA_MinorTraceChemistry: id (Integer PK), nma_global_id, chemistry_sample_info_id (Integer FK), nma_chemistry_sample_info_uuid - NMA_MajorChemistry: id (Integer PK), nma_global_id, chemistry_sample_info_id (Integer FK), nma_sample_pt_id, nma_sample_point_id, nma_object_id, nma_wclab_id - NMA_FieldParameters: id (Integer PK), nma_global_id, chemistry_sample_info_id (Integer FK), nma_sample_pt_id, nma_sample_point_id, nma_object_id, nma_wclab_id - NMA_Soil_Rock_Results: nma_point_id (rename only, already had Integer PK) Chemistry chain children now use Integer FK (chemistry_sample_info_id) pointing to NMA_Chemistry_SampleInfo.id instead of UUID FK. Co-Authored-By: Claude Opus 4.5 --- db/nma_legacy.py | 375 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 286 insertions(+), 89 deletions(-) diff --git a/db/nma_legacy.py b/db/nma_legacy.py index 3d4f5d48..dbe66740 100644 --- a/db/nma_legacy.py +++ b/db/nma_legacy.py @@ -14,7 +14,34 @@ # limitations under the License. # =============================================================================== -"""Legacy NM Aquifer models copied from AMPAPI.""" +"""Legacy NM Aquifer models copied from AMPAPI. + +This module contains models for NMA legacy tables that have been refactored to use +Integer primary keys. The original UUID PKs have been renamed with 'nma_' prefix +for audit/traceability purposes. + +Refactoring Summary (UUID -> Integer PK): +- NMA_HydraulicsData: global_id -> nma_global_id, new id PK +- NMA_Stratigraphy: global_id -> nma_global_id, new id PK +- NMA_Chemistry_SampleInfo: sample_pt_id -> nma_sample_pt_id, new id PK +- NMA_AssociatedData: assoc_id -> nma_assoc_id, new id PK +- NMA_Radionuclides: global_id -> nma_global_id, new id PK +- NMA_MinorTraceChemistry: global_id -> nma_global_id, new id PK +- NMA_MajorChemistry: global_id -> nma_global_id, new id PK +- NMA_FieldParameters: global_id -> nma_global_id, new id PK + +FK Standardization: +- Chemistry children now use chemistry_sample_info_id (Integer FK) +- Legacy UUID FKs stored as nma_sample_pt_id for audit + +Legacy ID Columns Renamed (nma_ prefix): +- well_id -> nma_well_id +- point_id -> nma_point_id +- location_id -> nma_location_id +- object_id -> nma_object_id +- sample_point_id -> nma_sample_point_id +- wclab_id -> nma_wclab_id +""" import uuid from datetime import date, datetime @@ -51,6 +78,9 @@ class NMA_WaterLevelsContinuous_Pressure_Daily(Base): This model is used for read-only migration/interop with the legacy NM Aquifer data and mirrors the original column names/types closely so transfer scripts can operate without further schema mapping. + + Note: This table is OUT OF SCOPE for the UUID->Integer PK refactoring since + it's not a Thing child table. """ __tablename__ = "NMA_WaterLevelsContinuous_Pressure_Daily" @@ -96,6 +126,8 @@ class NMA_view_NGWMN_WellConstruction(Base): A surrogate primary key is used so rows with missing depth values can still be represented faithfully from the legacy view. + + Note: This table is OUT OF SCOPE for refactoring (view table). """ __tablename__ = "NMA_view_NGWMN_WellConstruction" @@ -123,6 +155,8 @@ class NMA_view_NGWMN_WellConstruction(Base): class NMA_view_NGWMN_WaterLevels(Base): """ Legacy NGWMN water levels view. + + Note: This table is OUT OF SCOPE for refactoring (view table). """ __tablename__ = "NMA_view_NGWMN_WaterLevels" @@ -143,6 +177,8 @@ class NMA_view_NGWMN_WaterLevels(Base): class NMA_view_NGWMN_Lithology(Base): """ Legacy NGWMN lithology view. + + Note: This table is OUT OF SCOPE for refactoring (view table). """ __tablename__ = "NMA_view_NGWMN_Lithology" @@ -163,20 +199,39 @@ class NMA_view_NGWMN_Lithology(Base): class NMA_HydraulicsData(Base): """ Legacy HydraulicsData table from AMPAPI. + + Refactored from UUID PK to Integer PK: + - id: Integer PK (autoincrement) + - nma_global_id: Original UUID PK, now UNIQUE for audit + - nma_well_id: Legacy WellID UUID + - nma_point_id: Legacy PointID string + - nma_object_id: Legacy OBJECTID, UNIQUE """ __tablename__ = "NMA_HydraulicsData" - global_id: Mapped[uuid.UUID] = mapped_column( - "GlobalID", UUID(as_uuid=True), primary_key=True + # New Integer PK + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + + # Legacy UUID PK (now audit column) + nma_global_id: Mapped[Optional[uuid.UUID]] = mapped_column( + "nma_GlobalID", UUID(as_uuid=True), unique=True, nullable=True ) - well_id: Mapped[Optional[uuid.UUID]] = mapped_column("WellID", UUID(as_uuid=True)) - point_id: Mapped[Optional[str]] = mapped_column("PointID", String(50)) + + # Legacy ID columns (renamed with nma_ prefix) + nma_well_id: Mapped[Optional[uuid.UUID]] = mapped_column( + "nma_WellID", UUID(as_uuid=True) + ) + nma_point_id: Mapped[Optional[str]] = mapped_column("nma_PointID", String(50)) + nma_object_id: Mapped[Optional[int]] = mapped_column( + "nma_OBJECTID", Integer, unique=True + ) + + # Data columns data_source: Mapped[Optional[str]] = mapped_column("Data Source", String(255)) thing_id: Mapped[int] = mapped_column( Integer, ForeignKey("thing.id", ondelete="CASCADE"), nullable=False ) - object_id: Mapped[Optional[int]] = mapped_column("OBJECTID", Integer, unique=True) cs_gal_d_ft: Mapped[Optional[float]] = mapped_column("Cs (gal/d/ft)", Float) hd_ft2_d: Mapped[Optional[float]] = mapped_column("HD (ft2/d)", Float) @@ -217,15 +272,37 @@ def validate_thing_id(self, key, value): class NMA_Stratigraphy(Base): - """Legacy stratigraphy (lithology log) data from AMPAPI.""" + """ + Legacy stratigraphy (lithology log) data from AMPAPI. + + Refactored from UUID PK to Integer PK: + - id: Integer PK (autoincrement) + - nma_global_id: Original UUID PK, now UNIQUE for audit + - nma_well_id: Legacy WellID UUID + - nma_point_id: Legacy PointID string + - nma_object_id: Legacy OBJECTID, UNIQUE + """ __tablename__ = "NMA_Stratigraphy" - global_id: Mapped[uuid.UUID] = mapped_column( - "GlobalID", UUID(as_uuid=True), primary_key=True + # New Integer PK + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + + # Legacy UUID PK (now audit column) + nma_global_id: Mapped[Optional[uuid.UUID]] = mapped_column( + "nma_GlobalID", UUID(as_uuid=True), unique=True, nullable=True + ) + + # Legacy ID columns (renamed with nma_ prefix) + nma_well_id: Mapped[Optional[uuid.UUID]] = mapped_column( + "nma_WellID", UUID(as_uuid=True) + ) + nma_point_id: Mapped[str] = mapped_column("nma_PointID", String(10), nullable=False) + nma_object_id: Mapped[Optional[int]] = mapped_column( + "nma_OBJECTID", Integer, unique=True ) - well_id: Mapped[Optional[uuid.UUID]] = mapped_column("WellID", UUID(as_uuid=True)) - point_id: Mapped[str] = mapped_column("PointID", String(10), nullable=False) + + # FK to Thing thing_id: Mapped[int] = mapped_column( Integer, ForeignKey("thing.id", ondelete="CASCADE"), nullable=False ) @@ -242,7 +319,6 @@ class NMA_Stratigraphy(Base): ) strat_source: Mapped[Optional[str]] = mapped_column("StratSource", Text) strat_notes: Mapped[Optional[str]] = mapped_column("StratNotes", Text) - object_id: Mapped[Optional[int]] = mapped_column("OBJECTID", Integer, unique=True) thing: Mapped["Thing"] = relationship("Thing", back_populates="stratigraphy_logs") @@ -259,16 +335,36 @@ def validate_thing_id(self, key, value): class NMA_Chemistry_SampleInfo(Base): """ Legacy Chemistry SampleInfo table from AMPAPI. + + Refactored from UUID PK to Integer PK: + - id: Integer PK (autoincrement) + - nma_sample_pt_id: Original UUID PK (SamplePtID), now UNIQUE for audit + - nma_wclab_id: Legacy WCLab_ID + - nma_sample_point_id: Legacy SamplePointID + - nma_object_id: Legacy OBJECTID, UNIQUE + - nma_location_id: Legacy LocationId UUID """ __tablename__ = "NMA_Chemistry_SampleInfo" - sample_pt_id: Mapped[uuid.UUID] = mapped_column( - "SamplePtID", UUID(as_uuid=True), primary_key=True + # New Integer PK + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + + # Legacy UUID PK (now audit column) + nma_sample_pt_id: Mapped[Optional[uuid.UUID]] = mapped_column( + "nma_SamplePtID", UUID(as_uuid=True), unique=True, nullable=True + ) + + # Legacy ID columns (renamed with nma_ prefix) + nma_wclab_id: Mapped[Optional[str]] = mapped_column("nma_WCLab_ID", String(18)) + nma_sample_point_id: Mapped[str] = mapped_column( + "nma_SamplePointID", String(10), nullable=False ) - wclab_id: Mapped[Optional[str]] = mapped_column("WCLab_ID", String(18)) - sample_point_id: Mapped[str] = mapped_column( - "SamplePointID", String(10), nullable=False + nma_object_id: Mapped[Optional[int]] = mapped_column( + "nma_OBJECTID", Integer, unique=True + ) + nma_location_id: Mapped[Optional[uuid.UUID]] = mapped_column( + "nma_LocationId", UUID(as_uuid=True) ) # FK to Thing - required for all ChemistrySampleInfo records @@ -304,11 +400,6 @@ class NMA_Chemistry_SampleInfo(Base): ) sample_notes: Mapped[Optional[str]] = mapped_column("SampleNotes", Text) - object_id: Mapped[Optional[int]] = mapped_column("OBJECTID", Integer, unique=True) - location_id: Mapped[Optional[uuid.UUID]] = mapped_column( - "LocationId", UUID(as_uuid=True) - ) - # --- Relationships --- thing: Mapped["Thing"] = relationship( "Thing", back_populates="chemistry_sample_infos" @@ -355,20 +446,36 @@ def validate_thing_id(self, key, value): class NMA_AssociatedData(Base): """ Legacy AssociatedData table from NM_Aquifer. + + Refactored from UUID PK to Integer PK: + - id: Integer PK (autoincrement) + - nma_assoc_id: Original UUID PK (AssocID), now UNIQUE for audit + - nma_location_id: Legacy LocationId UUID, UNIQUE + - nma_point_id: Legacy PointID string + - nma_object_id: Legacy OBJECTID, UNIQUE """ __tablename__ = "NMA_AssociatedData" - location_id: Mapped[Optional[uuid.UUID]] = mapped_column( - "LocationId", UUID(as_uuid=True), unique=True + # New Integer PK + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + + # Legacy UUID PK (now audit column) + nma_assoc_id: Mapped[Optional[uuid.UUID]] = mapped_column( + "nma_AssocID", UUID(as_uuid=True), unique=True, nullable=True + ) + + # Legacy ID columns (renamed with nma_ prefix) + nma_location_id: Mapped[Optional[uuid.UUID]] = mapped_column( + "nma_LocationId", UUID(as_uuid=True), unique=True ) - point_id: Mapped[Optional[str]] = mapped_column("PointID", String(10)) - assoc_id: Mapped[uuid.UUID] = mapped_column( - "AssocID", UUID(as_uuid=True), primary_key=True + nma_point_id: Mapped[Optional[str]] = mapped_column("nma_PointID", String(10)) + nma_object_id: Mapped[Optional[int]] = mapped_column( + "nma_OBJECTID", Integer, unique=True ) + notes: Mapped[Optional[str]] = mapped_column("Notes", String(255)) formation: Mapped[Optional[str]] = mapped_column("Formation", String(15)) - object_id: Mapped[Optional[int]] = mapped_column("OBJECTID", Integer, unique=True) thing_id: Mapped[int] = mapped_column( Integer, ForeignKey("thing.id", ondelete="CASCADE"), nullable=False ) @@ -388,6 +495,8 @@ def validate_thing_id(self, key, value): class NMA_SurfaceWaterData(Base): """ Legacy SurfaceWaterData table from AMPAPI. + + Note: This table is OUT OF SCOPE for refactoring (not a Thing child). """ __tablename__ = "NMA_SurfaceWaterData" @@ -421,6 +530,8 @@ class NMA_SurfaceWaterData(Base): class NMA_SurfaceWaterPhotos(Base): """ Legacy SurfaceWaterPhotos table from NM_Aquifer. + + Note: This table is OUT OF SCOPE for refactoring (not a Thing child). """ __tablename__ = "NMA_SurfaceWaterPhotos" @@ -439,6 +550,8 @@ class NMA_SurfaceWaterPhotos(Base): class NMA_WeatherData(Base): """ Legacy WeatherData table from AMPAPI. + + Note: This table is OUT OF SCOPE for refactoring (not a Thing child). """ __tablename__ = "NMA_WeatherData" @@ -456,6 +569,8 @@ class NMA_WeatherData(Base): class NMA_WeatherPhotos(Base): """ Legacy WeatherPhotos table from NM_Aquifer. + + Note: This table is OUT OF SCOPE for refactoring (not a Thing child). """ __tablename__ = "NMA_WeatherPhotos" @@ -474,12 +589,15 @@ class NMA_WeatherPhotos(Base): class NMA_Soil_Rock_Results(Base): """ Legacy Soil_Rock_Results table from NM_Aquifer. + + Already has Integer PK. Only legacy column renames needed: + - point_id -> nma_point_id """ __tablename__ = "NMA_Soil_Rock_Results" id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) - point_id: Mapped[Optional[str]] = mapped_column("Point_ID", String(255)) + nma_point_id: Mapped[Optional[str]] = mapped_column("nma_Point_ID", String(255)) sample_type: Mapped[Optional[str]] = mapped_column("Sample Type", String(255)) date_sampled: Mapped[Optional[str]] = mapped_column("Date Sampled", String(255)) d13c: Mapped[Optional[float]] = mapped_column("d13C", Float) @@ -506,6 +624,12 @@ class NMA_MinorTraceChemistry(Base): Legacy MinorandTraceChemistry table from AMPAPI. Stores minor and trace element chemistry results linked to a ChemistrySampleInfo. + + Refactored from UUID PK to Integer PK: + - id: Integer PK (autoincrement) + - nma_global_id: Original UUID PK, now UNIQUE for audit + - chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id + - nma_chemistry_sample_info_uuid: Legacy UUID FK for audit """ __tablename__ = "NMA_MinorTraceChemistry" @@ -517,17 +641,26 @@ class NMA_MinorTraceChemistry(Base): ), ) - global_id: Mapped[uuid.UUID] = mapped_column( - "GlobalID", UUID(as_uuid=True), primary_key=True + # New Integer PK + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + + # Legacy UUID PK (now audit column) + nma_global_id: Mapped[Optional[uuid.UUID]] = mapped_column( + "nma_GlobalID", UUID(as_uuid=True), unique=True, nullable=True ) - # FK to ChemistrySampleInfo - required (no orphans) - chemistry_sample_info_id: Mapped[uuid.UUID] = mapped_column( - UUID(as_uuid=True), - ForeignKey("NMA_Chemistry_SampleInfo.SamplePtID", ondelete="CASCADE"), + # New Integer FK to ChemistrySampleInfo + chemistry_sample_info_id: Mapped[int] = mapped_column( + Integer, + ForeignKey("NMA_Chemistry_SampleInfo.id", ondelete="CASCADE"), nullable=False, ) + # Legacy UUID FK (for audit) + nma_chemistry_sample_info_uuid: Mapped[Optional[uuid.UUID]] = mapped_column( + "nma_chemistry_sample_info_uuid", UUID(as_uuid=True), nullable=True + ) + # Legacy columns analyte: Mapped[Optional[str]] = mapped_column(String(50)) sample_value: Mapped[Optional[float]] = mapped_column(Float) @@ -559,23 +692,52 @@ def validate_chemistry_sample_info_id(self, key, value): class NMA_Radionuclides(Base): """ Legacy Radionuclides table from NM_Aquifer_Dev_DB. + + Refactored from UUID PK to Integer PK: + - id: Integer PK (autoincrement) + - nma_global_id: Original UUID PK, now UNIQUE for audit + - chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id + - nma_sample_pt_id: Legacy UUID FK (SamplePtID) for audit + - nma_sample_point_id: Legacy SamplePointID string + - nma_object_id: Legacy OBJECTID, UNIQUE + - nma_wclab_id: Legacy WCLab_ID """ __tablename__ = "NMA_Radionuclides" - global_id: Mapped[uuid.UUID] = mapped_column( - "GlobalID", UUID(as_uuid=True), primary_key=True + # New Integer PK + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + + # Legacy UUID PK (now audit column) + nma_global_id: Mapped[Optional[uuid.UUID]] = mapped_column( + "nma_GlobalID", UUID(as_uuid=True), unique=True, nullable=True ) + + # FK to Thing thing_id: Mapped[int] = mapped_column( Integer, ForeignKey("thing.id", ondelete="CASCADE"), nullable=False ) - sample_pt_id: Mapped[uuid.UUID] = mapped_column( - "SamplePtID", - UUID(as_uuid=True), - ForeignKey("NMA_Chemistry_SampleInfo.SamplePtID", ondelete="CASCADE"), + + # New Integer FK to ChemistrySampleInfo + chemistry_sample_info_id: Mapped[int] = mapped_column( + Integer, + ForeignKey("NMA_Chemistry_SampleInfo.id", ondelete="CASCADE"), nullable=False, ) - sample_point_id: Mapped[Optional[str]] = mapped_column("SamplePointID", String(10)) + + # Legacy ID columns (renamed with nma_ prefix) + nma_sample_pt_id: Mapped[Optional[uuid.UUID]] = mapped_column( + "nma_SamplePtID", UUID(as_uuid=True), nullable=True + ) + nma_sample_point_id: Mapped[Optional[str]] = mapped_column( + "nma_SamplePointID", String(10) + ) + nma_object_id: Mapped[Optional[int]] = mapped_column( + "nma_OBJECTID", Integer, unique=True + ) + nma_wclab_id: Mapped[Optional[str]] = mapped_column("nma_WCLab_ID", String(25)) + + # Data columns analyte: Mapped[Optional[str]] = mapped_column("Analyte", String(50)) symbol: Mapped[Optional[str]] = mapped_column("Symbol", String(50)) sample_value: Mapped[Optional[float]] = mapped_column( @@ -594,9 +756,7 @@ class NMA_Radionuclides(Base): "Volume", Integer, server_default=text("0") ) volume_unit: Mapped[Optional[str]] = mapped_column("VolumeUnit", String(50)) - object_id: Mapped[Optional[int]] = mapped_column("OBJECTID", Integer, unique=True) analyses_agency: Mapped[Optional[str]] = mapped_column("AnalysesAgency", String(50)) - wclab_id: Mapped[Optional[str]] = mapped_column("WCLab_ID", String(25)) thing: Mapped["Thing"] = relationship("Thing", back_populates="radionuclides") chemistry_sample_info: Mapped["NMA_Chemistry_SampleInfo"] = relationship( @@ -612,30 +772,57 @@ def validate_thing_id(self, key, value): ) return value - @validates("sample_pt_id") - def validate_sample_pt_id(self, key, value): + @validates("chemistry_sample_info_id") + def validate_chemistry_sample_info_id(self, key, value): if value is None: - raise ValueError("NMA_Radionuclides requires a SamplePtID") + raise ValueError("NMA_Radionuclides requires a chemistry_sample_info_id") return value class NMA_MajorChemistry(Base): """ Legacy MajorChemistry table from NM_Aquifer_Dev_DB. + + Refactored from UUID PK to Integer PK: + - id: Integer PK (autoincrement) + - nma_global_id: Original UUID PK, now UNIQUE for audit + - chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id + - nma_sample_pt_id: Legacy UUID FK (SamplePtID) for audit + - nma_sample_point_id: Legacy SamplePointID string + - nma_object_id: Legacy OBJECTID, UNIQUE + - nma_wclab_id: Legacy WCLab_ID """ __tablename__ = "NMA_MajorChemistry" - global_id: Mapped[uuid.UUID] = mapped_column( - "GlobalID", UUID(as_uuid=True), primary_key=True + # New Integer PK + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + + # Legacy UUID PK (now audit column) + nma_global_id: Mapped[Optional[uuid.UUID]] = mapped_column( + "nma_GlobalID", UUID(as_uuid=True), unique=True, nullable=True ) - sample_pt_id: Mapped[uuid.UUID] = mapped_column( - "SamplePtID", - UUID(as_uuid=True), - ForeignKey("NMA_Chemistry_SampleInfo.SamplePtID", ondelete="CASCADE"), + + # New Integer FK to ChemistrySampleInfo + chemistry_sample_info_id: Mapped[int] = mapped_column( + Integer, + ForeignKey("NMA_Chemistry_SampleInfo.id", ondelete="CASCADE"), nullable=False, ) - sample_point_id: Mapped[Optional[str]] = mapped_column("SamplePointID", String(10)) + + # Legacy ID columns (renamed with nma_ prefix) + nma_sample_pt_id: Mapped[Optional[uuid.UUID]] = mapped_column( + "nma_SamplePtID", UUID(as_uuid=True), nullable=True + ) + nma_sample_point_id: Mapped[Optional[str]] = mapped_column( + "nma_SamplePointID", String(10) + ) + nma_object_id: Mapped[Optional[int]] = mapped_column( + "nma_OBJECTID", Integer, unique=True + ) + nma_wclab_id: Mapped[Optional[str]] = mapped_column("nma_WCLab_ID", String(25)) + + # Data columns analyte: Mapped[Optional[str]] = mapped_column("Analyte", String(50)) symbol: Mapped[Optional[str]] = mapped_column("Symbol", String(50)) sample_value: Mapped[Optional[float]] = mapped_column( @@ -652,18 +839,16 @@ class NMA_MajorChemistry(Base): "Volume", Integer, server_default=text("0") ) volume_unit: Mapped[Optional[str]] = mapped_column("VolumeUnit", String(50)) - object_id: Mapped[Optional[int]] = mapped_column("OBJECTID", Integer, unique=True) analyses_agency: Mapped[Optional[str]] = mapped_column("AnalysesAgency", String(50)) - wclab_id: Mapped[Optional[str]] = mapped_column("WCLab_ID", String(25)) chemistry_sample_info: Mapped["NMA_Chemistry_SampleInfo"] = relationship( "NMA_Chemistry_SampleInfo", back_populates="major_chemistries" ) - @validates("sample_pt_id") - def validate_sample_pt_id(self, key, value): + @validates("chemistry_sample_info_id") + def validate_chemistry_sample_info_id(self, key, value): if value is None: - raise ValueError("NMA_MajorChemistry requires a SamplePtID") + raise ValueError("NMA_MajorChemistry requires a chemistry_sample_info_id") return value @@ -671,69 +856,81 @@ class NMA_FieldParameters(Base): """ Legacy FieldParameters table from AMPAPI. Stores field measurements (pH, Temp, etc.) linked to ChemistrySampleInfo. + + Refactored from UUID PK to Integer PK: + - id: Integer PK (autoincrement) + - nma_global_id: Original UUID PK, now UNIQUE for audit + - chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id + - nma_sample_pt_id: Legacy UUID FK (SamplePtID) for audit + - nma_sample_point_id: Legacy SamplePointID string + - nma_object_id: Legacy OBJECTID, UNIQUE + - nma_wclab_id: Legacy WCLab_ID """ __tablename__ = "NMA_FieldParameters" __table_args__ = ( - # Explicit Indexes from DDL + # Explicit Indexes (updated for new column names) Index("FieldParameters$AnalysesAgency", "AnalysesAgency"), - Index("FieldParameters$ChemistrySampleInfoFieldParameters", "SamplePtID"), + Index("FieldParameters$ChemistrySampleInfoFieldParameters", "chemistry_sample_info_id"), Index("FieldParameters$FieldParameter", "FieldParameter"), - Index("FieldParameters$SamplePointID", "SamplePointID"), - Index( - "FieldParameters$SamplePtID", "SamplePtID" - ), # Note: DDL had two indexes on this col - Index("FieldParameters$WCLab_ID", "WCLab_ID"), - # Unique Indexes (Explicitly named to match DDL) - Index("FieldParameters$GlobalID", "GlobalID", unique=True), - Index("FieldParameters$OBJECTID", "OBJECTID", unique=True), + Index("FieldParameters$nma_SamplePointID", "nma_SamplePointID"), + Index("FieldParameters$nma_WCLab_ID", "nma_WCLab_ID"), + # Unique Indexes + Index("FieldParameters$nma_GlobalID", "nma_GlobalID", unique=True), + Index("FieldParameters$nma_OBJECTID", "nma_OBJECTID", unique=True), ) - # Primary Key - global_id: Mapped[uuid.UUID] = mapped_column( - "GlobalID", UUID(as_uuid=True), primary_key=True, default=uuid.uuid4 + # New Integer PK + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + + # Legacy UUID PK (now audit column) + nma_global_id: Mapped[Optional[uuid.UUID]] = mapped_column( + "nma_GlobalID", UUID(as_uuid=True), unique=True, nullable=True ) - # Foreign Key - sample_pt_id: Mapped[uuid.UUID] = mapped_column( - "SamplePtID", - UUID(as_uuid=True), + # New Integer FK to ChemistrySampleInfo + chemistry_sample_info_id: Mapped[int] = mapped_column( + Integer, ForeignKey( - "NMA_Chemistry_SampleInfo.SamplePtID", + "NMA_Chemistry_SampleInfo.id", onupdate="CASCADE", ondelete="CASCADE", ), nullable=False, ) - # Legacy Columns - sample_point_id: Mapped[Optional[str]] = mapped_column("SamplePointID", String(10)) + # Legacy ID columns (renamed with nma_ prefix) + nma_sample_pt_id: Mapped[Optional[uuid.UUID]] = mapped_column( + "nma_SamplePtID", UUID(as_uuid=True), nullable=True + ) + nma_sample_point_id: Mapped[Optional[str]] = mapped_column( + "nma_SamplePointID", String(10) + ) + nma_object_id: Mapped[int] = mapped_column( + "nma_OBJECTID", Integer, Identity(start=1), nullable=False + ) + nma_wclab_id: Mapped[Optional[str]] = mapped_column("nma_WCLab_ID", String(25)) + + # Data columns field_parameter: Mapped[Optional[str]] = mapped_column("FieldParameter", String(50)) sample_value: Mapped[Optional[float]] = mapped_column( "SampleValue", Float, nullable=True ) units: Mapped[Optional[str]] = mapped_column("Units", String(50)) notes: Mapped[Optional[str]] = mapped_column("Notes", String(255)) - - # Identity Column - object_id: Mapped[int] = mapped_column( - "OBJECTID", Integer, Identity(start=1), nullable=False - ) - analyses_agency: Mapped[Optional[str]] = mapped_column("AnalysesAgency", String(50)) - wc_lab_id: Mapped[Optional[str]] = mapped_column("WCLab_ID", String(25)) # Relationships chemistry_sample_info: Mapped["NMA_Chemistry_SampleInfo"] = relationship( "NMA_Chemistry_SampleInfo", back_populates="field_parameters" ) - @validates("sample_pt_id") - def validate_sample_pt_id(self, key, value): + @validates("chemistry_sample_info_id") + def validate_chemistry_sample_info_id(self, key, value): if value is None: raise ValueError( - "FieldParameter requires a parent ChemistrySampleInfo (SamplePtID)" + "FieldParameter requires a parent ChemistrySampleInfo (chemistry_sample_info_id)" ) return value From a9f002b5f679568dc68157bc791f3bb2e5eae407 Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Wed, 28 Jan 2026 01:27:00 -0800 Subject: [PATCH 16/22] refactor(transfers): update column mappings for Integer PK schema Update all transfer scripts to use nma_ prefixed column names and Integer FK relationships for chemistry chain. Changes: - chemistry_sampleinfo.py: Map to nma_sample_pt_id, nma_sample_point_id, nma_wclab_id, nma_location_id, nma_object_id - minor_trace_chemistry_transfer.py: Use Integer FK via chemistry_sample_info_id lookup, store legacy UUID in nma_chemistry_sample_info_uuid - radionuclides.py: Use Integer FK via chemistry_sample_info_id lookup, map to nma_* columns - field_parameters_transfer.py: Use Integer FK via chemistry_sample_info_id lookup, map to nma_* columns - major_chemistry.py: Use Integer FK via chemistry_sample_info_id lookup, map to nma_* columns - stratigraphy_legacy.py: Map to nma_global_id, nma_well_id, nma_point_id, nma_object_id - associated_data.py: Map to nma_assoc_id, nma_location_id, nma_point_id, nma_object_id - hydraulicsdata.py: Map to nma_global_id, nma_well_id, nma_point_id, nma_object_id - soil_rock_results.py: Map to nma_point_id Co-Authored-By: Claude Opus 4.5 --- transfers/associated_data.py | 37 +++++-- transfers/chemistry_sampleinfo.py | 54 +++++---- transfers/field_parameters_transfer.py | 86 +++++++++----- transfers/hydraulicsdata.py | 44 +++++--- transfers/major_chemistry.py | 96 +++++++++++----- transfers/minor_trace_chemistry_transfer.py | 73 +++++++----- transfers/radionuclides.py | 117 +++++++++++--------- transfers/soil_rock_results.py | 21 +++- transfers/stratigraphy_legacy.py | 35 ++++-- 9 files changed, 373 insertions(+), 190 deletions(-) diff --git a/transfers/associated_data.py b/transfers/associated_data.py index be29a2c7..ca9195b0 100644 --- a/transfers/associated_data.py +++ b/transfers/associated_data.py @@ -13,6 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== +""" +Transfer AssociatedData from NM_Aquifer to NMA_AssociatedData. + +Updated for Integer PK schema: +- id: Integer PK (autoincrement, generated by DB) +- nma_assoc_id: Legacy UUID PK (AssocID), UNIQUE for audit +- nma_location_id: Legacy LocationId UUID, UNIQUE +- nma_point_id: Legacy PointID string +- nma_object_id: Legacy OBJECTID, UNIQUE +""" from __future__ import annotations @@ -54,7 +64,7 @@ def _get_dfs(self) -> tuple[pd.DataFrame, pd.DataFrame]: def _transfer_hook(self, session: Session) -> None: rows = [self._row_dict(row) for row in self.cleaned_df.to_dict("records")] - rows = self._dedupe_rows(rows, key="AssocID") + rows = self._dedupe_rows(rows, key="nma_AssocID") if not rows: logger.info("No AssociatedData rows to transfer") @@ -71,28 +81,35 @@ def _transfer_hook(self, session: Session) -> None: i + len(chunk) - 1, len(chunk), ) + # Upsert on nma_AssocID (legacy UUID PK, now UNIQUE) stmt = insert_stmt.values(chunk).on_conflict_do_update( - index_elements=["AssocID"], + index_elements=["nma_AssocID"], set_={ - "LocationId": excluded["LocationId"], - "PointID": excluded["PointID"], + "nma_LocationId": excluded["nma_LocationId"], + "nma_PointID": excluded["nma_PointID"], "Notes": excluded["Notes"], "Formation": excluded["Formation"], - "OBJECTID": excluded["OBJECTID"], + "nma_OBJECTID": excluded["nma_OBJECTID"], + "thing_id": excluded["thing_id"], }, ) session.execute(stmt) session.commit() def _row_dict(self, row: dict[str, Any]) -> dict[str, Any]: + point_id = row.get("PointID") return { - "LocationId": self._uuid_val(row.get("LocationId")), - "PointID": row.get("PointID"), - "AssocID": self._uuid_val(row.get("AssocID")), + # Legacy UUID PK -> nma_assoc_id (unique audit column) + "nma_AssocID": self._uuid_val(row.get("AssocID")), + # Legacy ID columns (renamed with nma_ prefix) + "nma_LocationId": self._uuid_val(row.get("LocationId")), + "nma_PointID": point_id, + "nma_OBJECTID": row.get("OBJECTID"), + # Data columns "Notes": row.get("Notes"), "Formation": row.get("Formation"), - "OBJECTID": row.get("OBJECTID"), - "thing_id": self._thing_id_cache.get(row.get("PointID")), + # FK to Thing + "thing_id": self._thing_id_cache.get(point_id), } def _dedupe_rows( diff --git a/transfers/chemistry_sampleinfo.py b/transfers/chemistry_sampleinfo.py index 3c4fd444..88a8c6d2 100644 --- a/transfers/chemistry_sampleinfo.py +++ b/transfers/chemistry_sampleinfo.py @@ -36,6 +36,14 @@ class ChemistrySampleInfoTransferer(Transferer): Transfer for the legacy Chemistry_SampleInfo table. Loads the CSV and upserts into the legacy table. + + Updated for Integer PK schema: + - id: Integer PK (autoincrement, generated by DB) + - nma_sample_pt_id: Legacy UUID PK (SamplePtID), UNIQUE for audit + - nma_wclab_id: Legacy WCLab_ID + - nma_sample_point_id: Legacy SamplePointID + - nma_object_id: Legacy OBJECTID, UNIQUE + - nma_location_id: Legacy LocationId UUID """ source_table = "Chemistry_SampleInfo" @@ -168,13 +176,13 @@ def _transfer_hook(self, session: Session) -> None: lookup_miss_count = 0 for row in self.cleaned_df.to_dict("records"): row_dict = self._row_dict(row) - if row_dict.get("SamplePtID") is None: + if row_dict.get("nma_SamplePtID") is None: skipped_sample_pt_id_count += 1 logger.warning( - "Skipping ChemistrySampleInfo OBJECTID=%s SamplePointID=%s - " - "SamplePtID missing or invalid", - row_dict.get("OBJECTID"), - row_dict.get("SamplePointID"), + "Skipping ChemistrySampleInfo nma_OBJECTID=%s nma_SamplePointID=%s - " + "nma_SamplePtID missing or invalid", + row_dict.get("nma_OBJECTID"), + row_dict.get("nma_SamplePointID"), ) continue # Skip rows without valid thing_id (orphan prevention) @@ -182,15 +190,15 @@ def _transfer_hook(self, session: Session) -> None: skipped_orphan_count += 1 lookup_miss_count += 1 logger.warning( - f"Skipping ChemistrySampleInfo OBJECTID={row_dict.get('OBJECTID')} " - f"SamplePointID={row_dict.get('SamplePointID')} - Thing not found" + f"Skipping ChemistrySampleInfo nma_OBJECTID={row_dict.get('nma_OBJECTID')} " + f"nma_SamplePointID={row_dict.get('nma_SamplePointID')} - Thing not found" ) continue row_dicts.append(row_dict) if skipped_sample_pt_id_count > 0: logger.warning( - "Skipped %s ChemistrySampleInfo records without valid SamplePtID", + "Skipped %s ChemistrySampleInfo records without valid nma_SamplePtID", skipped_sample_pt_id_count, ) if skipped_orphan_count > 0: @@ -203,7 +211,7 @@ def _transfer_hook(self, session: Session) -> None: "ChemistrySampleInfo Thing lookup misses: %s", lookup_miss_count ) - rows = self._dedupe_rows(row_dicts, key="OBJECTID") + rows = self._dedupe_rows(row_dicts, key="nma_OBJECTID") insert_stmt = insert(NMA_Chemistry_SampleInfo) excluded = insert_stmt.excluded @@ -213,12 +221,13 @@ def _transfer_hook(self, session: Session) -> None: logger.info( f"Upserting batch {i}-{i+len(chunk)-1} ({len(chunk)} rows) into Chemistry_SampleInfo" ) + # Upsert on nma_SamplePtID (the legacy UUID PK, now UNIQUE) stmt = insert_stmt.values(chunk).on_conflict_do_update( - index_elements=["SamplePtID"], + index_elements=["nma_SamplePtID"], set_={ "thing_id": excluded.thing_id, # Required FK - prevent orphans - "SamplePointID": excluded.SamplePointID, - "WCLab_ID": excluded.WCLab_ID, + "nma_SamplePointID": excluded.nma_SamplePointID, + "nma_WCLab_ID": excluded.nma_WCLab_ID, "CollectionDate": excluded.CollectionDate, "CollectionMethod": excluded.CollectionMethod, "CollectedBy": excluded.CollectedBy, @@ -232,8 +241,8 @@ def _transfer_hook(self, session: Session) -> None: "PublicRelease": excluded.PublicRelease, "AddedDaytoDate": excluded.AddedDaytoDate, "AddedMonthDaytoDate": excluded.AddedMonthDaytoDate, - "LocationId": excluded.LocationId, - "OBJECTID": excluded.OBJECTID, + "nma_LocationId": excluded.nma_LocationId, + "nma_OBJECTID": excluded.nma_OBJECTID, "SampleNotes": excluded.SampleNotes, }, ) @@ -307,10 +316,18 @@ def bool_val(key: str) -> Optional[bool]: normalized_sample_point_id, ) + # Map to new column names (nma_ prefix for legacy columns) return { - "SamplePtID": uuid_val("SamplePtID"), - "WCLab_ID": str_val("WCLab_ID"), - "SamplePointID": str_val("SamplePointID"), + # Legacy UUID PK -> nma_sample_pt_id (unique audit column) + "nma_SamplePtID": uuid_val("SamplePtID"), + # Legacy ID columns (renamed with nma_ prefix) + "nma_WCLab_ID": str_val("WCLab_ID"), + "nma_SamplePointID": str_val("SamplePointID"), + "nma_LocationId": uuid_val("LocationId"), + "nma_OBJECTID": val("OBJECTID"), + # FK to Thing + "thing_id": thing_id, + # Data columns (unchanged names) "CollectionDate": collection_date, "CollectionMethod": str_val("CollectionMethod"), "CollectedBy": str_val("CollectedBy"), @@ -325,9 +342,6 @@ def bool_val(key: str) -> Optional[bool]: "AddedDaytoDate": bool_val("AddedDaytoDate"), "AddedMonthDaytoDate": bool_val("AddedMonthDaytoDate"), "SampleNotes": str_val("SampleNotes"), - "LocationId": uuid_val("LocationId"), - "OBJECTID": val("OBJECTID"), - "thing_id": thing_id, } def _dedupe_rows( diff --git a/transfers/field_parameters_transfer.py b/transfers/field_parameters_transfer.py index b9a4fe6c..e1780df5 100644 --- a/transfers/field_parameters_transfer.py +++ b/transfers/field_parameters_transfer.py @@ -16,7 +16,16 @@ """Transfer FieldParameters data from NM_Aquifer to NMA_FieldParameters. This transfer requires ChemistrySampleInfo to be backfilled first. Each -FieldParameters record links to a ChemistrySampleInfo record via SamplePtID. +FieldParameters record links to a ChemistrySampleInfo record via chemistry_sample_info_id. + +Updated for Integer PK schema: +- id: Integer PK (autoincrement, generated by DB) +- nma_global_id: Legacy UUID PK (GlobalID), UNIQUE for audit +- chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id +- nma_sample_pt_id: Legacy UUID FK (SamplePtID) for audit +- nma_sample_point_id: Legacy SamplePointID string +- nma_object_id: Legacy OBJECTID (Identity) +- nma_wclab_id: Legacy WCLab_ID """ from __future__ import annotations @@ -39,8 +48,8 @@ class FieldParametersTransferer(Transferer): """ Transfer FieldParameters records to NMA_FieldParameters. - Looks up ChemistrySampleInfo by SamplePtID and creates linked - FieldParameters records. Uses upsert for idempotent transfers. + Looks up ChemistrySampleInfo by nma_sample_pt_id (legacy UUID) and creates linked + FieldParameters records with Integer FK. Uses upsert for idempotent transfers. """ source_table = "FieldParameters" @@ -48,16 +57,23 @@ class FieldParametersTransferer(Transferer): def __init__(self, *args, batch_size: int = 1000, **kwargs): super().__init__(*args, **kwargs) self.batch_size = batch_size - self._sample_pt_ids: set[UUID] = set() - self._build_sample_pt_id_cache() + # Cache: legacy UUID -> Integer id + self._sample_info_cache: dict[UUID, int] = {} + self._build_sample_info_cache() - def _build_sample_pt_id_cache(self) -> None: - """Build cache of ChemistrySampleInfo.SamplePtID values.""" + def _build_sample_info_cache(self) -> None: + """Build cache of nma_sample_pt_id -> id for FK lookups.""" with session_ctx() as session: - sample_infos = session.query(NMA_Chemistry_SampleInfo.sample_pt_id).all() - self._sample_pt_ids = {sample_pt_id for (sample_pt_id,) in sample_infos} + sample_infos = session.query( + NMA_Chemistry_SampleInfo.nma_sample_pt_id, + NMA_Chemistry_SampleInfo.id + ).filter(NMA_Chemistry_SampleInfo.nma_sample_pt_id.isnot(None)).all() + self._sample_info_cache = { + nma_sample_pt_id: csi_id + for nma_sample_pt_id, csi_id in sample_infos + } logger.info( - f"Built ChemistrySampleInfo cache with {len(self._sample_pt_ids)} entries" + f"Built ChemistrySampleInfo cache with {len(self._sample_info_cache)} entries" ) def _get_dfs(self) -> tuple[pd.DataFrame, pd.DataFrame]: @@ -71,7 +87,7 @@ def _filter_to_valid_sample_infos(self, df: pd.DataFrame) -> pd.DataFrame: This prevents orphan records and ensures the FK constraint will be satisfied. """ - valid_sample_pt_ids = self._sample_pt_ids + valid_sample_pt_ids = set(self._sample_info_cache.keys()) before_count = len(df) mask = df["SamplePtID"].apply( lambda value: self._uuid_val(value) in valid_sample_pt_ids @@ -92,7 +108,7 @@ def _transfer_hook(self, session: Session) -> None: """ Override transfer hook to use batch upsert for idempotent transfers. - Uses ON CONFLICT DO UPDATE on GlobalID. + Uses ON CONFLICT DO UPDATE on nma_GlobalID (legacy UUID PK, now UNIQUE). """ limit = self.flags.get("LIMIT", 0) df = self.cleaned_df @@ -118,18 +134,20 @@ def _transfer_hook(self, session: Session) -> None: for i in range(0, len(rows), self.batch_size): chunk = rows[i : i + self.batch_size] logger.info(f"Upserting batch {i}-{i+len(chunk)-1} ({len(chunk)} rows)") + # Upsert on nma_GlobalID (legacy UUID PK, now UNIQUE) stmt = insert_stmt.values(chunk).on_conflict_do_update( - index_elements=["GlobalID"], + index_elements=["nma_GlobalID"], set_={ - "SamplePtID": excluded.SamplePtID, - "SamplePointID": excluded.SamplePointID, + "chemistry_sample_info_id": excluded.chemistry_sample_info_id, + "nma_SamplePtID": excluded.nma_SamplePtID, + "nma_SamplePointID": excluded.nma_SamplePointID, "FieldParameter": excluded.FieldParameter, "SampleValue": excluded.SampleValue, "Units": excluded.Units, "Notes": excluded.Notes, - "OBJECTID": excluded.OBJECTID, + "nma_OBJECTID": excluded.nma_OBJECTID, "AnalysesAgency": excluded.AnalysesAgency, - "WCLab_ID": excluded.WCLab_ID, + "nma_WCLab_ID": excluded.nma_WCLab_ID, }, ) session.execute(stmt) @@ -138,8 +156,9 @@ def _transfer_hook(self, session: Session) -> None: def _row_to_dict(self, row) -> Optional[dict[str, Any]]: """Convert a DataFrame row to a dict for upsert.""" - sample_pt_id = self._uuid_val(getattr(row, "SamplePtID", None)) - if sample_pt_id is None: + # Get legacy UUID FK + legacy_sample_pt_id = self._uuid_val(getattr(row, "SamplePtID", None)) + if legacy_sample_pt_id is None: self._capture_error( getattr(row, "SamplePtID", None), f"Invalid SamplePtID: {getattr(row, 'SamplePtID', None)}", @@ -147,16 +166,18 @@ def _row_to_dict(self, row) -> Optional[dict[str, Any]]: ) return None - if sample_pt_id not in self._sample_pt_ids: + # Look up Integer FK from cache + chemistry_sample_info_id = self._sample_info_cache.get(legacy_sample_pt_id) + if chemistry_sample_info_id is None: self._capture_error( - sample_pt_id, - f"ChemistrySampleInfo not found for SamplePtID: {sample_pt_id}", + legacy_sample_pt_id, + f"ChemistrySampleInfo not found for SamplePtID: {legacy_sample_pt_id}", "SamplePtID", ) return None - global_id = self._uuid_val(getattr(row, "GlobalID", None)) - if global_id is None: + nma_global_id = self._uuid_val(getattr(row, "GlobalID", None)) + if nma_global_id is None: self._capture_error( getattr(row, "GlobalID", None), f"Invalid GlobalID: {getattr(row, 'GlobalID', None)}", @@ -165,23 +186,28 @@ def _row_to_dict(self, row) -> Optional[dict[str, Any]]: return None return { - "GlobalID": global_id, - "SamplePtID": sample_pt_id, - "SamplePointID": self._safe_str(row, "SamplePointID"), + # Legacy UUID PK -> nma_global_id (unique audit column) + "nma_GlobalID": nma_global_id, + # New Integer FK to ChemistrySampleInfo + "chemistry_sample_info_id": chemistry_sample_info_id, + # Legacy ID columns (renamed with nma_ prefix) + "nma_SamplePtID": legacy_sample_pt_id, + "nma_SamplePointID": self._safe_str(row, "SamplePointID"), + "nma_OBJECTID": self._safe_int(row, "OBJECTID"), + "nma_WCLab_ID": self._safe_str(row, "WCLab_ID"), + # Data columns "FieldParameter": self._safe_str(row, "FieldParameter"), "SampleValue": self._safe_float(row, "SampleValue"), "Units": self._safe_str(row, "Units"), "Notes": self._safe_str(row, "Notes"), - "OBJECTID": self._safe_int(row, "OBJECTID"), "AnalysesAgency": self._safe_str(row, "AnalysesAgency"), - "WCLab_ID": self._safe_str(row, "WCLab_ID"), } def _dedupe_rows(self, rows: list[dict[str, Any]]) -> list[dict[str, Any]]: """Dedupe rows by unique key to avoid ON CONFLICT loops. Later rows win.""" deduped = {} for row in rows: - key = row.get("GlobalID") + key = row.get("nma_GlobalID") if key is None: continue deduped[key] = row diff --git a/transfers/hydraulicsdata.py b/transfers/hydraulicsdata.py index a1e1b7f4..bfaee00f 100644 --- a/transfers/hydraulicsdata.py +++ b/transfers/hydraulicsdata.py @@ -13,6 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== +""" +Transfer HydraulicsData from NM_Aquifer to NMA_HydraulicsData. + +Updated for Integer PK schema: +- id: Integer PK (autoincrement, generated by DB) +- nma_global_id: Legacy UUID PK (GlobalID), UNIQUE for audit +- nma_well_id: Legacy WellID UUID +- nma_point_id: Legacy PointID string +- nma_object_id: Legacy OBJECTID, UNIQUE +""" from __future__ import annotations @@ -33,6 +43,8 @@ class HydraulicsDataTransferer(Transferer): """ Transfer for the legacy NMA_HydraulicsData table. + + Uses Integer PK with legacy UUID stored in nma_global_id for audit. """ source_table = "HydraulicsData" @@ -75,9 +87,9 @@ def _transfer_hook(self, session: Session) -> None: if row_dict.get("thing_id") is None: skipped_count += 1 logger.warning( - "Skipping HydraulicsData GlobalID=%s PointID=%s - Thing not found", - row_dict.get("GlobalID"), - row_dict.get("PointID"), + "Skipping HydraulicsData nma_GlobalID=%s nma_PointID=%s - Thing not found", + row_dict.get("nma_GlobalID"), + row_dict.get("nma_PointID"), ) continue row_dicts.append(row_dict) @@ -88,7 +100,7 @@ def _transfer_hook(self, session: Session) -> None: f"(orphan prevention)" ) - rows = self._dedupe_rows(row_dicts, key="GlobalID") + rows = self._dedupe_rows(row_dicts, key="nma_GlobalID") insert_stmt = insert(NMA_HydraulicsData) excluded = insert_stmt.excluded @@ -98,11 +110,12 @@ def _transfer_hook(self, session: Session) -> None: logger.info( f"Upserting batch {i}-{i+len(chunk)-1} ({len(chunk)} rows) into NMA_HydraulicsData" ) + # Upsert on nma_GlobalID (legacy UUID PK, now UNIQUE) stmt = insert_stmt.values(chunk).on_conflict_do_update( - index_elements=["GlobalID"], + index_elements=["nma_GlobalID"], set_={ - "WellID": excluded["WellID"], - "PointID": excluded["PointID"], + "nma_WellID": excluded["nma_WellID"], + "nma_PointID": excluded["nma_PointID"], "HydraulicUnit": excluded["HydraulicUnit"], "thing_id": excluded["thing_id"], "TestTop": excluded["TestTop"], @@ -121,7 +134,7 @@ def _transfer_hook(self, session: Session) -> None: "P (decimal fraction)": excluded["P (decimal fraction)"], "k (darcy)": excluded["k (darcy)"], "Data Source": excluded["Data Source"], - "OBJECTID": excluded["OBJECTID"], + "nma_OBJECTID": excluded["nma_OBJECTID"], }, ) session.execute(stmt) @@ -155,12 +168,18 @@ def as_int(key: str) -> Optional[int]: except (TypeError, ValueError): return None + point_id = val("PointID") return { - "GlobalID": as_uuid("GlobalID"), - "WellID": as_uuid("WellID"), - "PointID": val("PointID"), + # Legacy UUID PK -> nma_global_id (unique audit column) + "nma_GlobalID": as_uuid("GlobalID"), + # Legacy ID columns (renamed with nma_ prefix) + "nma_WellID": as_uuid("WellID"), + "nma_PointID": point_id, + "nma_OBJECTID": as_int("OBJECTID"), + # FK to Thing + "thing_id": self._thing_id_cache.get(point_id), + # Data columns "HydraulicUnit": val("HydraulicUnit"), - "thing_id": self._thing_id_cache.get(val("PointID")), "TestTop": as_int("TestTop"), "TestBottom": as_int("TestBottom"), "HydraulicUnitType": val("HydraulicUnitType"), @@ -177,7 +196,6 @@ def as_int(key: str) -> Optional[int]: "P (decimal fraction)": val("P (decimal fraction)"), "k (darcy)": val("k (darcy)"), "Data Source": val("Data Source"), - "OBJECTID": as_int("OBJECTID"), } def _dedupe_rows( diff --git a/transfers/major_chemistry.py b/transfers/major_chemistry.py index d222fb0c..175e7d4d 100644 --- a/transfers/major_chemistry.py +++ b/transfers/major_chemistry.py @@ -13,6 +13,18 @@ # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== +""" +Transfer MajorChemistry data from NM_Aquifer to NMA_MajorChemistry. + +Updated for Integer PK schema: +- id: Integer PK (autoincrement, generated by DB) +- nma_global_id: Legacy UUID PK (GlobalID), UNIQUE for audit +- chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id +- nma_sample_pt_id: Legacy UUID FK (SamplePtID) for audit +- nma_sample_point_id: Legacy SamplePointID string +- nma_object_id: Legacy OBJECTID, UNIQUE +- nma_wclab_id: Legacy WCLab_ID +""" from __future__ import annotations @@ -34,6 +46,8 @@ class MajorChemistryTransferer(Transferer): """ Transfer for the legacy MajorChemistry table. + + Uses Integer FK to ChemistrySampleInfo via chemistry_sample_info_id. """ source_table = "MajorChemistry" @@ -41,15 +55,23 @@ class MajorChemistryTransferer(Transferer): def __init__(self, *args, batch_size: int = 1000, **kwargs): super().__init__(*args, **kwargs) self.batch_size = batch_size - self._sample_pt_ids: set[UUID] = set() - self._build_sample_pt_id_cache() + # Cache: legacy UUID -> Integer id + self._sample_info_cache: dict[UUID, int] = {} + self._build_sample_info_cache() - def _build_sample_pt_id_cache(self) -> None: + def _build_sample_info_cache(self) -> None: + """Build cache of nma_sample_pt_id -> id for FK lookups.""" with session_ctx() as session: - sample_infos = session.query(NMA_Chemistry_SampleInfo.sample_pt_id).all() - self._sample_pt_ids = {sample_pt_id for (sample_pt_id,) in sample_infos} + sample_infos = session.query( + NMA_Chemistry_SampleInfo.nma_sample_pt_id, + NMA_Chemistry_SampleInfo.id + ).filter(NMA_Chemistry_SampleInfo.nma_sample_pt_id.isnot(None)).all() + self._sample_info_cache = { + nma_sample_pt_id: csi_id + for nma_sample_pt_id, csi_id in sample_infos + } logger.info( - f"Built ChemistrySampleInfo cache with {len(self._sample_pt_ids)} entries" + f"Built ChemistrySampleInfo cache with {len(self._sample_info_cache)} entries" ) def _get_dfs(self) -> tuple[pd.DataFrame, pd.DataFrame]: @@ -58,7 +80,7 @@ def _get_dfs(self) -> tuple[pd.DataFrame, pd.DataFrame]: return input_df, cleaned_df def _filter_to_valid_sample_infos(self, df: pd.DataFrame) -> pd.DataFrame: - valid_sample_pt_ids = self._sample_pt_ids + valid_sample_pt_ids = set(self._sample_info_cache.keys()) mask = df["SamplePtID"].apply( lambda value: self._uuid_val(value) in valid_sample_pt_ids ) @@ -78,26 +100,39 @@ def _filter_to_valid_sample_infos(self, df: pd.DataFrame) -> pd.DataFrame: def _transfer_hook(self, session: Session) -> None: row_dicts = [] skipped_global_id = 0 + skipped_csi_id = 0 for row in self.cleaned_df.to_dict("records"): row_dict = self._row_dict(row) if row_dict is None: continue - if row_dict.get("GlobalID") is None: + if row_dict.get("nma_GlobalID") is None: skipped_global_id += 1 logger.warning( - "Skipping MajorChemistry SamplePtID=%s - GlobalID missing or invalid", - row_dict.get("SamplePtID"), + "Skipping MajorChemistry nma_SamplePtID=%s - nma_GlobalID missing or invalid", + row_dict.get("nma_SamplePtID"), + ) + continue + if row_dict.get("chemistry_sample_info_id") is None: + skipped_csi_id += 1 + logger.warning( + "Skipping MajorChemistry nma_SamplePtID=%s - chemistry_sample_info_id not found", + row_dict.get("nma_SamplePtID"), ) continue row_dicts.append(row_dict) if skipped_global_id > 0: logger.warning( - "Skipped %s MajorChemistry records without valid GlobalID", + "Skipped %s MajorChemistry records without valid nma_GlobalID", skipped_global_id, ) + if skipped_csi_id > 0: + logger.warning( + "Skipped %s MajorChemistry records without valid chemistry_sample_info_id", + skipped_csi_id, + ) - rows = self._dedupe_rows(row_dicts, key="GlobalID") + rows = self._dedupe_rows(row_dicts, key="nma_GlobalID") insert_stmt = insert(NMA_MajorChemistry) excluded = insert_stmt.excluded @@ -106,11 +141,13 @@ def _transfer_hook(self, session: Session) -> None: logger.info( f"Upserting batch {i}-{i+len(chunk)-1} ({len(chunk)} rows) into MajorChemistry" ) + # Upsert on nma_GlobalID (legacy UUID PK, now UNIQUE) stmt = insert_stmt.values(chunk).on_conflict_do_update( - index_elements=["GlobalID"], + index_elements=["nma_GlobalID"], set_={ - "SamplePtID": excluded.SamplePtID, - "SamplePointID": excluded.SamplePointID, + "chemistry_sample_info_id": excluded.chemistry_sample_info_id, + "nma_SamplePtID": excluded.nma_SamplePtID, + "nma_SamplePointID": excluded.nma_SamplePointID, "Analyte": excluded.Analyte, "Symbol": excluded.Symbol, "SampleValue": excluded.SampleValue, @@ -121,9 +158,9 @@ def _transfer_hook(self, session: Session) -> None: "Notes": excluded.Notes, "Volume": excluded.Volume, "VolumeUnit": excluded.VolumeUnit, - "OBJECTID": excluded.OBJECTID, + "nma_OBJECTID": excluded.nma_OBJECTID, "AnalysesAgency": excluded.AnalysesAgency, - "WCLab_ID": excluded.WCLab_ID, + "nma_WCLab_ID": excluded.nma_WCLab_ID, }, ) session.execute(stmt) @@ -161,8 +198,9 @@ def int_val(key: str) -> Optional[int]: if isinstance(analysis_date, datetime): analysis_date = analysis_date.replace(tzinfo=None) - sample_pt_id = self._uuid_val(val("SamplePtID")) - if sample_pt_id is None: + # Get legacy UUID FK + legacy_sample_pt_id = self._uuid_val(val("SamplePtID")) + if legacy_sample_pt_id is None: self._capture_error( val("SamplePtID"), f"Invalid SamplePtID: {val('SamplePtID')}", @@ -170,11 +208,22 @@ def int_val(key: str) -> Optional[int]: ) return None - global_id = self._uuid_val(val("GlobalID")) + # Look up Integer FK from cache + chemistry_sample_info_id = self._sample_info_cache.get(legacy_sample_pt_id) + + nma_global_id = self._uuid_val(val("GlobalID")) return { - "SamplePtID": sample_pt_id, - "SamplePointID": val("SamplePointID"), + # Legacy UUID PK -> nma_global_id (unique audit column) + "nma_GlobalID": nma_global_id, + # New Integer FK to ChemistrySampleInfo + "chemistry_sample_info_id": chemistry_sample_info_id, + # Legacy ID columns (renamed with nma_ prefix) + "nma_SamplePtID": legacy_sample_pt_id, + "nma_SamplePointID": val("SamplePointID"), + "nma_OBJECTID": val("OBJECTID"), + "nma_WCLab_ID": val("WCLab_ID"), + # Data columns "Analyte": val("Analyte"), "Symbol": val("Symbol"), "SampleValue": float_val("SampleValue"), @@ -185,10 +234,7 @@ def int_val(key: str) -> Optional[int]: "Notes": val("Notes"), "Volume": int_val("Volume"), "VolumeUnit": val("VolumeUnit"), - "OBJECTID": val("OBJECTID"), - "GlobalID": global_id, "AnalysesAgency": val("AnalysesAgency"), - "WCLab_ID": val("WCLab_ID"), } def _dedupe_rows( diff --git a/transfers/minor_trace_chemistry_transfer.py b/transfers/minor_trace_chemistry_transfer.py index ee9c314e..9cbd7218 100644 --- a/transfers/minor_trace_chemistry_transfer.py +++ b/transfers/minor_trace_chemistry_transfer.py @@ -18,7 +18,13 @@ This transfer requires ChemistrySampleInfo to be backfilled first (which links to Thing via thing_id). Each MinorTraceChemistry record links to a ChemistrySampleInfo -record via chemistry_sample_info_id. +record via chemistry_sample_info_id (Integer FK). + +Updated for Integer PK schema: +- id: Integer PK (autoincrement, generated by DB) +- nma_global_id: Legacy UUID PK (GlobalID), UNIQUE for audit +- chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id +- nma_chemistry_sample_info_uuid: Legacy UUID FK for audit """ from __future__ import annotations @@ -42,8 +48,8 @@ class MinorTraceChemistryTransferer(Transferer): """ Transfer MinorandTraceChemistry records to NMA_MinorTraceChemistry. - Looks up ChemistrySampleInfo by SamplePtID and creates linked - NMA_MinorTraceChemistry records. Uses upsert for idempotent transfers. + Looks up ChemistrySampleInfo by nma_sample_pt_id (legacy UUID) and creates linked + NMA_MinorTraceChemistry records with Integer FK. Uses upsert for idempotent transfers. """ source_table = "MinorandTraceChemistry" @@ -51,17 +57,23 @@ class MinorTraceChemistryTransferer(Transferer): def __init__(self, *args, batch_size: int = 1000, **kwargs): super().__init__(*args, **kwargs) self.batch_size = batch_size - # Cache ChemistrySampleInfo SamplePtIDs for FK validation - self._sample_pt_ids: set[UUID] = set() - self._build_sample_pt_id_cache() + # Cache ChemistrySampleInfo: legacy UUID -> Integer id + self._sample_info_cache: dict[UUID, int] = {} + self._build_sample_info_cache() - def _build_sample_pt_id_cache(self): - """Build cache of ChemistrySampleInfo.SamplePtID values.""" + def _build_sample_info_cache(self): + """Build cache of ChemistrySampleInfo.nma_sample_pt_id -> ChemistrySampleInfo.id.""" with session_ctx() as session: - sample_infos = session.query(NMA_Chemistry_SampleInfo.sample_pt_id).all() - self._sample_pt_ids = {sample_pt_id for (sample_pt_id,) in sample_infos} + sample_infos = session.query( + NMA_Chemistry_SampleInfo.nma_sample_pt_id, + NMA_Chemistry_SampleInfo.id + ).filter(NMA_Chemistry_SampleInfo.nma_sample_pt_id.isnot(None)).all() + self._sample_info_cache = { + nma_sample_pt_id: csi_id + for nma_sample_pt_id, csi_id in sample_infos + } logger.info( - f"Built ChemistrySampleInfo cache with {len(self._sample_pt_ids)} entries" + f"Built ChemistrySampleInfo cache with {len(self._sample_info_cache)} entries" ) def _get_dfs(self) -> tuple[pd.DataFrame, pd.DataFrame]: @@ -76,7 +88,7 @@ def _filter_to_valid_sample_infos(self, df: pd.DataFrame) -> pd.DataFrame: This prevents orphan records and ensures the FK constraint will be satisfied. """ - valid_sample_pt_ids = self._sample_pt_ids + valid_sample_pt_ids = set(self._sample_info_cache.keys()) before_count = len(df) mask = df["SamplePtID"].apply( @@ -98,7 +110,7 @@ def _transfer_hook(self, session: Session) -> None: """ Override transfer hook to use batch upsert for idempotent transfers. - Uses ON CONFLICT DO UPDATE on (chemistry_sample_info_id, analyte). + Uses ON CONFLICT DO UPDATE on nma_GlobalID (the legacy UUID PK, now UNIQUE). """ limit = self.flags.get("LIMIT", 0) df = self.cleaned_df @@ -116,7 +128,7 @@ def _transfer_hook(self, session: Session) -> None: logger.warning("No valid rows to transfer") return - # Dedupe by GlobalID to avoid PK conflicts. + # Dedupe by nma_GlobalID to avoid PK conflicts. rows = self._dedupe_rows(row_dicts) logger.info(f"Upserting {len(rows)} MinorTraceChemistry records") @@ -126,9 +138,12 @@ def _transfer_hook(self, session: Session) -> None: for i in range(0, len(rows), self.batch_size): chunk = rows[i : i + self.batch_size] logger.info(f"Upserting batch {i}-{i+len(chunk)-1} ({len(chunk)} rows)") + # Upsert on nma_GlobalID (legacy UUID PK, now UNIQUE) stmt = insert_stmt.values(chunk).on_conflict_do_update( - index_elements=["GlobalID"], + index_elements=["nma_GlobalID"], set_={ + "chemistry_sample_info_id": excluded.chemistry_sample_info_id, + "nma_chemistry_sample_info_uuid": excluded.nma_chemistry_sample_info_uuid, "sample_value": excluded.sample_value, "units": excluded.units, "symbol": excluded.symbol, @@ -147,8 +162,9 @@ def _transfer_hook(self, session: Session) -> None: def _row_to_dict(self, row) -> Optional[dict[str, Any]]: """Convert a DataFrame row to a dict for upsert.""" - sample_pt_id = self._uuid_val(row.SamplePtID) - if sample_pt_id is None: + # Get legacy UUID FK + legacy_sample_pt_id = self._uuid_val(row.SamplePtID) + if legacy_sample_pt_id is None: self._capture_error( getattr(row, "SamplePtID", None), f"Invalid SamplePtID: {getattr(row, 'SamplePtID', None)}", @@ -156,16 +172,18 @@ def _row_to_dict(self, row) -> Optional[dict[str, Any]]: ) return None - if sample_pt_id not in self._sample_pt_ids: + # Look up Integer FK from cache + chemistry_sample_info_id = self._sample_info_cache.get(legacy_sample_pt_id) + if chemistry_sample_info_id is None: self._capture_error( - sample_pt_id, - f"ChemistrySampleInfo not found for SamplePtID: {sample_pt_id}", + legacy_sample_pt_id, + f"ChemistrySampleInfo not found for SamplePtID: {legacy_sample_pt_id}", "SamplePtID", ) return None - global_id = self._uuid_val(getattr(row, "GlobalID", None)) - if global_id is None: + nma_global_id = self._uuid_val(getattr(row, "GlobalID", None)) + if nma_global_id is None: self._capture_error( getattr(row, "GlobalID", None), f"Invalid GlobalID: {getattr(row, 'GlobalID', None)}", @@ -174,8 +192,13 @@ def _row_to_dict(self, row) -> Optional[dict[str, Any]]: return None return { - "global_id": global_id, - "chemistry_sample_info_id": sample_pt_id, + # Legacy UUID PK -> nma_global_id (unique audit column) + "nma_GlobalID": nma_global_id, + # New Integer FK to ChemistrySampleInfo + "chemistry_sample_info_id": chemistry_sample_info_id, + # Legacy UUID FK for audit + "nma_chemistry_sample_info_uuid": legacy_sample_pt_id, + # Data columns "analyte": self._safe_str(row, "Analyte"), "sample_value": self._safe_float(row, "SampleValue"), "units": self._safe_str(row, "Units"), @@ -193,7 +216,7 @@ def _dedupe_rows(self, rows: list[dict[str, Any]]) -> list[dict[str, Any]]: """Dedupe rows by unique key to avoid ON CONFLICT loops. Later rows win.""" deduped = {} for row in rows: - key = row.get("global_id") + key = row.get("nma_GlobalID") if key is None: continue deduped[key] = row diff --git a/transfers/radionuclides.py b/transfers/radionuclides.py index 70575e03..ba17f038 100644 --- a/transfers/radionuclides.py +++ b/transfers/radionuclides.py @@ -13,6 +13,18 @@ # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== +""" +Transfer Radionuclides data from NM_Aquifer to NMA_Radionuclides. + +Updated for Integer PK schema: +- id: Integer PK (autoincrement, generated by DB) +- nma_global_id: Legacy UUID PK (GlobalID), UNIQUE for audit +- chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id +- nma_sample_pt_id: Legacy UUID FK (SamplePtID) for audit +- nma_sample_point_id: Legacy SamplePointID string +- nma_object_id: Legacy OBJECTID, UNIQUE +- nma_wclab_id: Legacy WCLab_ID +""" from __future__ import annotations @@ -34,6 +46,8 @@ class RadionuclidesTransferer(Transferer): """ Transfer for the legacy Radionuclides table. + + Uses Integer FK to ChemistrySampleInfo via chemistry_sample_info_id. """ source_table = "Radionuclides" @@ -41,21 +55,24 @@ class RadionuclidesTransferer(Transferer): def __init__(self, *args, batch_size: int = 1000, **kwargs): super().__init__(*args, **kwargs) self.batch_size = batch_size - self._sample_pt_ids: set[UUID] = set() - self._thing_id_by_sample_pt_id: dict[UUID, int] = {} + # Cache: legacy UUID -> (Integer id, thing_id) + self._sample_info_cache: dict[UUID, tuple[int, int]] = {} self._build_sample_info_cache() def _build_sample_info_cache(self) -> None: + """Build cache of nma_sample_pt_id -> (id, thing_id) for FK lookups.""" with session_ctx() as session: sample_infos = session.query( - NMA_Chemistry_SampleInfo.sample_pt_id, NMA_Chemistry_SampleInfo.thing_id - ).all() - self._sample_pt_ids = {sample_pt_id for sample_pt_id, _ in sample_infos} - self._thing_id_by_sample_pt_id = { - sample_pt_id: thing_id for sample_pt_id, thing_id in sample_infos + NMA_Chemistry_SampleInfo.nma_sample_pt_id, + NMA_Chemistry_SampleInfo.id, + NMA_Chemistry_SampleInfo.thing_id, + ).filter(NMA_Chemistry_SampleInfo.nma_sample_pt_id.isnot(None)).all() + self._sample_info_cache = { + nma_sample_pt_id: (csi_id, thing_id) + for nma_sample_pt_id, csi_id, thing_id in sample_infos } logger.info( - f"Built ChemistrySampleInfo cache with {len(self._sample_pt_ids)} entries" + f"Built ChemistrySampleInfo cache with {len(self._sample_info_cache)} entries" ) def _get_dfs(self) -> tuple[pd.DataFrame, pd.DataFrame]: @@ -64,7 +81,7 @@ def _get_dfs(self) -> tuple[pd.DataFrame, pd.DataFrame]: return input_df, cleaned_df def _filter_to_valid_sample_infos(self, df: pd.DataFrame) -> pd.DataFrame: - valid_sample_pt_ids = self._sample_pt_ids + valid_sample_pt_ids = set(self._sample_info_cache.keys()) mask = df["SamplePtID"].apply( lambda value: self._uuid_val(value) in valid_sample_pt_ids ) @@ -89,25 +106,31 @@ def _transfer_hook(self, session: Session) -> None: row_dict = self._row_dict(row) if row_dict is None: continue - if row_dict.get("GlobalID") is None: + if row_dict.get("nma_GlobalID") is None: skipped_global_id += 1 logger.warning( - "Skipping Radionuclides SamplePtID=%s - GlobalID missing or invalid", - row_dict.get("SamplePtID"), + "Skipping Radionuclides nma_SamplePtID=%s - nma_GlobalID missing or invalid", + row_dict.get("nma_SamplePtID"), ) continue if row_dict.get("thing_id") is None: skipped_thing_id += 1 logger.warning( - "Skipping Radionuclides SamplePtID=%s - Thing not found", - row_dict.get("SamplePtID"), + "Skipping Radionuclides nma_SamplePtID=%s - Thing not found", + row_dict.get("nma_SamplePtID"), + ) + continue + if row_dict.get("chemistry_sample_info_id") is None: + logger.warning( + "Skipping Radionuclides nma_SamplePtID=%s - chemistry_sample_info_id not found", + row_dict.get("nma_SamplePtID"), ) continue row_dicts.append(row_dict) if skipped_global_id > 0: logger.warning( - "Skipped %s Radionuclides records without valid GlobalID", + "Skipped %s Radionuclides records without valid nma_GlobalID", skipped_global_id, ) if skipped_thing_id > 0: @@ -116,7 +139,7 @@ def _transfer_hook(self, session: Session) -> None: skipped_thing_id, ) - rows = self._dedupe_rows(row_dicts, key="GlobalID") + rows = self._dedupe_rows(row_dicts, key="nma_GlobalID") insert_stmt = insert(NMA_Radionuclides) excluded = insert_stmt.excluded @@ -125,12 +148,14 @@ def _transfer_hook(self, session: Session) -> None: logger.info( f"Upserting batch {i}-{i+len(chunk)-1} ({len(chunk)} rows) into Radionuclides" ) + # Upsert on nma_GlobalID (legacy UUID PK, now UNIQUE) stmt = insert_stmt.values(chunk).on_conflict_do_update( - index_elements=["GlobalID"], + index_elements=["nma_GlobalID"], set_={ "thing_id": excluded.thing_id, - "SamplePtID": excluded.SamplePtID, - "SamplePointID": excluded.SamplePointID, + "chemistry_sample_info_id": excluded.chemistry_sample_info_id, + "nma_SamplePtID": excluded.nma_SamplePtID, + "nma_SamplePointID": excluded.nma_SamplePointID, "Analyte": excluded.Analyte, "Symbol": excluded.Symbol, "SampleValue": excluded.SampleValue, @@ -141,9 +166,9 @@ def _transfer_hook(self, session: Session) -> None: "Notes": excluded.Notes, "Volume": excluded.Volume, "VolumeUnit": excluded.VolumeUnit, - "OBJECTID": excluded.OBJECTID, + "nma_OBJECTID": excluded.nma_OBJECTID, "AnalysesAgency": excluded.AnalysesAgency, - "WCLab_ID": excluded.WCLab_ID, + "nma_WCLab_ID": excluded.nma_WCLab_ID, }, ) session.execute(stmt) @@ -181,8 +206,9 @@ def int_val(key: str) -> Optional[int]: if isinstance(analysis_date, datetime): analysis_date = analysis_date.replace(tzinfo=None) - sample_pt_id = self._uuid_val(val("SamplePtID")) - if sample_pt_id is None: + # Get legacy UUID FK + legacy_sample_pt_id = self._uuid_val(val("SamplePtID")) + if legacy_sample_pt_id is None: self._capture_error( val("SamplePtID"), f"Invalid SamplePtID: {val('SamplePtID')}", @@ -190,13 +216,25 @@ def int_val(key: str) -> Optional[int]: ) return None - global_id = self._uuid_val(val("GlobalID")) - thing_id = self._thing_id_by_sample_pt_id.get(sample_pt_id) + # Look up Integer FK and thing_id from cache + cache_entry = self._sample_info_cache.get(legacy_sample_pt_id) + chemistry_sample_info_id = cache_entry[0] if cache_entry else None + thing_id = cache_entry[1] if cache_entry else None + + nma_global_id = self._uuid_val(val("GlobalID")) return { + # Legacy UUID PK -> nma_global_id (unique audit column) + "nma_GlobalID": nma_global_id, + # FKs "thing_id": thing_id, - "SamplePtID": sample_pt_id, - "SamplePointID": val("SamplePointID"), + "chemistry_sample_info_id": chemistry_sample_info_id, + # Legacy ID columns (renamed with nma_ prefix) + "nma_SamplePtID": legacy_sample_pt_id, + "nma_SamplePointID": val("SamplePointID"), + "nma_OBJECTID": val("OBJECTID"), + "nma_WCLab_ID": val("WCLab_ID"), + # Data columns "Analyte": val("Analyte"), "Symbol": val("Symbol"), "SampleValue": float_val("SampleValue"), @@ -207,10 +245,7 @@ def int_val(key: str) -> Optional[int]: "Notes": val("Notes"), "Volume": int_val("Volume"), "VolumeUnit": val("VolumeUnit"), - "OBJECTID": val("OBJECTID"), - "GlobalID": global_id, "AnalysesAgency": val("AnalysesAgency"), - "WCLab_ID": val("WCLab_ID"), } def _uuid_val(self, value: Any) -> Optional[UUID]: @@ -229,26 +264,8 @@ def _dedupe_rows( self, rows: list[dict[str, Any]], key: str ) -> list[dict[str, Any]]: """ - Deduplicate rows within a batch by the given key to avoid ON CONFLICT loops - when inserting into the database. - - For any given ``key`` value, only a single row is kept in the returned list. - If multiple rows share the same ``key`` value, the *last* occurrence in - ``rows`` overwrites earlier ones (i.e. "later rows win"), because the - internal mapping is updated on each encounter of that key. - - This behavior is appropriate when: - * The input batch is ordered such that later rows represent the most - recent or authoritative data for a given key, and - * Only one row per key should be written in a single batch to prevent - repeated ON CONFLICT handling for the same key. - - Callers should be aware that this can silently drop earlier rows with the - same key. If preserving all conflicting rows or applying a custom conflict - resolution strategy is important, the caller should: - * Pre-process and consolidate rows before passing them to this method, or - * Implement a different deduplication/merge strategy tailored to their - needs. + Deduplicate rows within a batch by the given key to avoid ON CONFLICT loops. + Later rows win. """ deduped = {} for row in rows: diff --git a/transfers/soil_rock_results.py b/transfers/soil_rock_results.py index 35fa4866..cb13531d 100644 --- a/transfers/soil_rock_results.py +++ b/transfers/soil_rock_results.py @@ -13,6 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== +""" +Transfer Soil_Rock_Results from NM_Aquifer to NMA_Soil_Rock_Results. + +Already has Integer PK. Updated for legacy column rename: +- point_id -> nma_point_id +""" from __future__ import annotations @@ -71,12 +77,15 @@ def _transfer_hook(self, session: Session) -> None: def _row_dict(self, row: dict[str, Any]) -> dict[str, Any]: point_id = row.get("Point_ID") return { - "point_id": point_id, - "sample_type": row.get("Sample Type"), - "date_sampled": row.get("Date Sampled"), - "d13c": self._float_val(row.get("d13C")), - "d18o": self._float_val(row.get("d18O")), - "sampled_by": row.get("Sampled by"), + # Legacy ID column (renamed with nma_ prefix) + "nma_Point_ID": point_id, + # Data columns + "Sample Type": row.get("Sample Type"), + "Date Sampled": row.get("Date Sampled"), + "d13C": self._float_val(row.get("d13C")), + "d18O": self._float_val(row.get("d18O")), + "Sampled by": row.get("Sampled by"), + # FK to Thing "thing_id": self._thing_id_cache.get(point_id), } diff --git a/transfers/stratigraphy_legacy.py b/transfers/stratigraphy_legacy.py index 326f6434..82bf8a3a 100644 --- a/transfers/stratigraphy_legacy.py +++ b/transfers/stratigraphy_legacy.py @@ -1,4 +1,12 @@ -"""Transfer Stratigraphy.csv into the NMA_Stratigraphy legacy table.""" +"""Transfer Stratigraphy.csv into the NMA_Stratigraphy legacy table. + +Updated for Integer PK schema: +- id: Integer PK (autoincrement, generated by DB) +- nma_global_id: Legacy UUID PK (GlobalID), UNIQUE for audit +- nma_well_id: Legacy WellID UUID +- nma_point_id: Legacy PointID string +- nma_object_id: Legacy OBJECTID, UNIQUE +""" from __future__ import annotations @@ -63,11 +71,12 @@ def _transfer_hook(self, session: Session) -> None: # type: ignore[override] start + len(chunk) - 1, len(chunk), ) + # Upsert on nma_GlobalID (legacy UUID PK, now UNIQUE) stmt = insert_stmt.values(chunk).on_conflict_do_update( - index_elements=["GlobalID"], + index_elements=["nma_GlobalID"], set_={ - "WellID": excluded.WellID, - "PointID": excluded.PointID, + "nma_WellID": excluded.nma_WellID, + "nma_PointID": excluded.nma_PointID, "thing_id": excluded.thing_id, "StratTop": excluded.StratTop, "StratBottom": excluded.StratBottom, @@ -77,7 +86,7 @@ def _transfer_hook(self, session: Session) -> None: # type: ignore[override] "ContributingUnit": excluded.ContributingUnit, "StratSource": excluded.StratSource, "StratNotes": excluded.StratNotes, - "OBJECTID": excluded.OBJECTID, + "nma_OBJECTID": excluded.nma_OBJECTID, }, ) session.execute(stmt) @@ -104,16 +113,21 @@ def _row_dict(self, row: pd.Series) -> Dict[str, Any] | None: self._capture_error(point_id, "No Thing found for PointID", "thing_id") return None - global_id = self._uuid_value(getattr(row, "GlobalID", None)) - if global_id is None: + nma_global_id = self._uuid_value(getattr(row, "GlobalID", None)) + if nma_global_id is None: self._capture_error(point_id, "Invalid GlobalID", "GlobalID") return None return { - "GlobalID": global_id, - "WellID": self._uuid_value(getattr(row, "WellID", None)), - "PointID": point_id, + # Legacy UUID PK -> nma_global_id (unique audit column) + "nma_GlobalID": nma_global_id, + # Legacy ID columns (renamed with nma_ prefix) + "nma_WellID": self._uuid_value(getattr(row, "WellID", None)), + "nma_PointID": point_id, + "nma_OBJECTID": self._int_value(getattr(row, "OBJECTID", None)), + # FK to Thing "thing_id": thing_id, + # Data columns "StratTop": self._float_value(getattr(row, "StratTop", None)), "StratBottom": self._float_value(getattr(row, "StratBottom", None)), "UnitIdentifier": self._string_value(getattr(row, "UnitIdentifier", None)), @@ -126,7 +140,6 @@ def _row_dict(self, row: pd.Series) -> Dict[str, Any] | None: ), "StratSource": self._string_value(getattr(row, "StratSource", None)), "StratNotes": self._string_value(getattr(row, "StratNotes", None)), - "OBJECTID": self._int_value(getattr(row, "OBJECTID", None)), } def _uuid_value(self, value: Any) -> UUID | None: From 2587dc6b8c350209df8eb999c7eb5af466c51242 Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Wed, 28 Jan 2026 01:27:27 -0800 Subject: [PATCH 17/22] refactor(admin): update views for Integer PK schema Update all NMA admin views to use Integer primary keys and nma_ prefixed field names for display. Changes to all views: - Set pk_attr = "id" and pk_type = int - Update list_fields, fields, sortable_fields, searchable_fields with nma_ prefix - Update field_labels with "(Legacy)" suffix for audit columns Files updated: - chemistry_sampleinfo.py - hydraulicsdata.py - stratigraphy.py - radionuclides.py - minor_trace_chemistry.py - field_parameters.py - soil_rock_results.py Co-Authored-By: Claude Opus 4.5 --- admin/views/chemistry_sampleinfo.py | 68 +++++++++++++++++++++----- admin/views/field_parameters.py | 71 ++++++++++++++++++---------- admin/views/hydraulicsdata.py | 51 +++++++++++++------- admin/views/minor_trace_chemistry.py | 33 +++++++++---- admin/views/radionuclides.py | 65 ++++++++++++++++--------- admin/views/soil_rock_results.py | 19 +++++--- admin/views/stratigraphy.py | 48 ++++++++++++------- 7 files changed, 246 insertions(+), 109 deletions(-) diff --git a/admin/views/chemistry_sampleinfo.py b/admin/views/chemistry_sampleinfo.py index f791e26e..5675beb8 100644 --- a/admin/views/chemistry_sampleinfo.py +++ b/admin/views/chemistry_sampleinfo.py @@ -15,6 +15,14 @@ # =============================================================================== """ ChemistrySampleInfoAdmin view for legacy Chemistry_SampleInfo. + +Updated for Integer PK schema: +- id: Integer PK (autoincrement) +- nma_sample_pt_id: Legacy UUID PK (SamplePtID), UNIQUE for audit +- nma_wclab_id: Legacy WCLab_ID +- nma_sample_point_id: Legacy SamplePointID +- nma_object_id: Legacy OBJECTID, UNIQUE +- nma_location_id: Legacy LocationId UUID """ from admin.views.base import OcotilloModelView @@ -31,13 +39,18 @@ class ChemistrySampleInfoAdmin(OcotilloModelView): label = "Chemistry Sample Info" icon = "fa fa-flask" + # Integer PK + pk_attr = "id" + pk_type = int + # ========== List View ========== sortable_fields = [ - "sample_pt_id", - "object_id", - "sample_point_id", - "wclab_id", + "id", + "nma_sample_pt_id", + "nma_object_id", + "nma_sample_point_id", + "nma_wclab_id", "collection_date", "sample_type", "data_source", @@ -48,9 +61,9 @@ class ChemistrySampleInfoAdmin(OcotilloModelView): fields_default_sort = [("collection_date", True)] searchable_fields = [ - "sample_point_id", - "sample_pt_id", - "wclab_id", + "nma_sample_point_id", + "nma_sample_pt_id", + "nma_wclab_id", "collected_by", "analyses_agency", "sample_notes", @@ -70,10 +83,13 @@ class ChemistrySampleInfoAdmin(OcotilloModelView): # ========== Form View ========== fields = [ - "sample_pt_id", - "sample_point_id", - "object_id", - "wclab_id", + "id", + "nma_sample_pt_id", + "nma_sample_point_id", + "nma_object_id", + "nma_wclab_id", + "nma_location_id", + "thing_id", "collection_date", "collection_method", "collected_by", @@ -91,12 +107,38 @@ class ChemistrySampleInfoAdmin(OcotilloModelView): ] exclude_fields_from_create = [ - "object_id", + "id", + "nma_object_id", ] exclude_fields_from_edit = [ - "object_id", + "id", + "nma_object_id", ] + field_labels = { + "id": "ID", + "nma_sample_pt_id": "NMA SamplePtID (Legacy)", + "nma_sample_point_id": "NMA SamplePointID (Legacy)", + "nma_object_id": "NMA OBJECTID (Legacy)", + "nma_wclab_id": "NMA WCLab_ID (Legacy)", + "nma_location_id": "NMA LocationId (Legacy)", + "thing_id": "Thing ID", + "collection_date": "Collection Date", + "collection_method": "Collection Method", + "collected_by": "Collected By", + "analyses_agency": "Analyses Agency", + "sample_type": "Sample Type", + "sample_material_not_h2o": "Sample Material (Not H2O)", + "water_type": "Water Type", + "study_sample": "Study Sample", + "data_source": "Data Source", + "data_quality": "Data Quality", + "public_release": "Public Release", + "added_day_to_date": "Added Day to Date", + "added_month_day_to_date": "Added Month/Day to Date", + "sample_notes": "Sample Notes", + } + # ============= EOF ============================================= diff --git a/admin/views/field_parameters.py b/admin/views/field_parameters.py index c21542fd..ac23f76b 100644 --- a/admin/views/field_parameters.py +++ b/admin/views/field_parameters.py @@ -15,6 +15,15 @@ # =============================================================================== """ FieldParametersAdmin view for legacy NMA_FieldParameters. + +Updated for Integer PK schema: +- id: Integer PK (autoincrement) +- nma_global_id: Legacy UUID PK (GlobalID), UNIQUE for audit +- chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id +- nma_sample_pt_id: Legacy UUID FK (SamplePtID) for audit +- nma_sample_point_id: Legacy SamplePointID string +- nma_object_id: Legacy OBJECTID +- nma_wclab_id: Legacy WCLab_ID """ from admin.views.base import OcotilloModelView @@ -31,6 +40,10 @@ class FieldParametersAdmin(OcotilloModelView): label = "Field Parameters" icon = "fa fa-tachometer" + # Integer PK + pk_attr = "id" + pk_type = int + can_create = False can_edit = False can_delete = False @@ -38,41 +51,45 @@ class FieldParametersAdmin(OcotilloModelView): # ========== List View ========== list_fields = [ - "global_id", - "sample_pt_id", - "sample_point_id", + "id", + "nma_global_id", + "chemistry_sample_info_id", + "nma_sample_pt_id", + "nma_sample_point_id", "field_parameter", "sample_value", "units", "analyses_agency", - "wc_lab_id", - "object_id", + "nma_wclab_id", + "nma_object_id", ] sortable_fields = [ - "global_id", - "sample_pt_id", - "sample_point_id", + "id", + "nma_global_id", + "chemistry_sample_info_id", + "nma_sample_pt_id", + "nma_sample_point_id", "field_parameter", "sample_value", "units", "notes", "analyses_agency", - "wc_lab_id", - "object_id", + "nma_wclab_id", + "nma_object_id", ] - fields_default_sort = [("sample_point_id", True)] + fields_default_sort = [("nma_sample_point_id", True)] searchable_fields = [ - "global_id", - "sample_pt_id", - "sample_point_id", + "nma_global_id", + "nma_sample_pt_id", + "nma_sample_point_id", "field_parameter", "units", "notes", "analyses_agency", - "wc_lab_id", + "nma_wclab_id", ] page_size = 50 @@ -81,29 +98,33 @@ class FieldParametersAdmin(OcotilloModelView): # ========== Form View ========== fields = [ - "global_id", - "sample_pt_id", - "sample_point_id", + "id", + "nma_global_id", + "chemistry_sample_info_id", + "nma_sample_pt_id", + "nma_sample_point_id", "field_parameter", "sample_value", "units", "notes", - "object_id", + "nma_object_id", "analyses_agency", - "wc_lab_id", + "nma_wclab_id", ] field_labels = { - "global_id": "GlobalID", - "sample_pt_id": "SamplePtID", - "sample_point_id": "SamplePointID", + "id": "ID", + "nma_global_id": "NMA GlobalID (Legacy)", + "chemistry_sample_info_id": "Chemistry Sample Info ID", + "nma_sample_pt_id": "NMA SamplePtID (Legacy)", + "nma_sample_point_id": "NMA SamplePointID (Legacy)", "field_parameter": "FieldParameter", "sample_value": "SampleValue", "units": "Units", "notes": "Notes", - "object_id": "OBJECTID", + "nma_object_id": "NMA OBJECTID (Legacy)", "analyses_agency": "AnalysesAgency", - "wc_lab_id": "WCLab_ID", + "nma_wclab_id": "NMA WCLab_ID (Legacy)", } diff --git a/admin/views/hydraulicsdata.py b/admin/views/hydraulicsdata.py index d081dbce..9723cbb3 100644 --- a/admin/views/hydraulicsdata.py +++ b/admin/views/hydraulicsdata.py @@ -15,6 +15,13 @@ # =============================================================================== """ HydraulicsDataAdmin view for legacy NMA_HydraulicsData. + +Updated for Integer PK schema: +- id: Integer PK (autoincrement) +- nma_global_id: Legacy UUID PK (GlobalID), UNIQUE for audit +- nma_well_id: Legacy WellID UUID +- nma_point_id: Legacy PointID string +- nma_object_id: Legacy OBJECTID, UNIQUE """ from admin.views.base import OcotilloModelView @@ -31,6 +38,10 @@ class HydraulicsDataAdmin(OcotilloModelView): label = "Hydraulics Data" icon = "fa fa-tint" + # Integer PK + pk_attr = "id" + pk_type = int + can_create = False can_edit = False can_delete = False @@ -38,9 +49,10 @@ class HydraulicsDataAdmin(OcotilloModelView): # ========== List View ========== list_fields = [ - "global_id", - "well_id", - "point_id", + "id", + "nma_global_id", + "nma_well_id", + "nma_point_id", "thing_id", "hydraulic_unit", "hydraulic_unit_type", @@ -49,13 +61,14 @@ class HydraulicsDataAdmin(OcotilloModelView): "t_ft2_d", "k_darcy", "data_source", - "object_id", + "nma_object_id", ] sortable_fields = [ - "global_id", - "well_id", - "point_id", + "id", + "nma_global_id", + "nma_well_id", + "nma_point_id", "thing_id", "hydraulic_unit", "hydraulic_unit_type", @@ -64,12 +77,12 @@ class HydraulicsDataAdmin(OcotilloModelView): "t_ft2_d", "k_darcy", "data_source", - "object_id", + "nma_object_id", ] searchable_fields = [ - "global_id", - "point_id", + "nma_global_id", + "nma_point_id", "hydraulic_unit", "hydraulic_remarks", "data_source", @@ -81,9 +94,10 @@ class HydraulicsDataAdmin(OcotilloModelView): # ========== Form View ========== fields = [ - "global_id", - "well_id", - "point_id", + "id", + "nma_global_id", + "nma_well_id", + "nma_point_id", "thing_id", "hydraulic_unit", "hydraulic_unit_type", @@ -102,13 +116,14 @@ class HydraulicsDataAdmin(OcotilloModelView): "p_decimal_fraction", "k_darcy", "data_source", - "object_id", + "nma_object_id", ] field_labels = { - "global_id": "GlobalID", - "well_id": "WellID", - "point_id": "PointID", + "id": "ID", + "nma_global_id": "NMA GlobalID (Legacy)", + "nma_well_id": "NMA WellID (Legacy)", + "nma_point_id": "NMA PointID (Legacy)", "thing_id": "Thing ID", "hydraulic_unit": "HydraulicUnit", "hydraulic_unit_type": "HydraulicUnitType", @@ -127,7 +142,7 @@ class HydraulicsDataAdmin(OcotilloModelView): "p_decimal_fraction": "P (decimal fraction)", "k_darcy": "k (darcy)", "data_source": "Data Source", - "object_id": "OBJECTID", + "nma_object_id": "NMA OBJECTID (Legacy)", } diff --git a/admin/views/minor_trace_chemistry.py b/admin/views/minor_trace_chemistry.py index 3db6e8a0..0c51e609 100644 --- a/admin/views/minor_trace_chemistry.py +++ b/admin/views/minor_trace_chemistry.py @@ -15,9 +15,13 @@ # =============================================================================== """ MinorTraceChemistryAdmin view for legacy NMA_MinorTraceChemistry. -""" -import uuid +Updated for Integer PK schema: +- id: Integer PK (autoincrement) +- nma_global_id: Legacy UUID PK (GlobalID), UNIQUE for audit +- chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id +- nma_chemistry_sample_info_uuid: Legacy UUID FK for audit +""" from starlette.requests import Request from starlette_admin.fields import HasOne @@ -36,8 +40,10 @@ class MinorTraceChemistryAdmin(OcotilloModelView): name = "Minor Trace Chemistry" label = "Minor Trace Chemistry" icon = "fa fa-flask" - pk_attr = "global_id" - pk_type = uuid.UUID + + # Integer PK + pk_attr = "id" + pk_type = int def can_create(self, request: Request) -> bool: return False @@ -51,8 +57,10 @@ def can_delete(self, request: Request) -> bool: # ========== List View ========== list_fields = [ - "global_id", + "id", + "nma_global_id", HasOne("chemistry_sample_info", identity="n-m-a_-chemistry_-sample-info"), + "nma_chemistry_sample_info_uuid", "analyte", "sample_value", "units", @@ -62,7 +70,9 @@ def can_delete(self, request: Request) -> bool: ] sortable_fields = [ - "global_id", + "id", + "nma_global_id", + "chemistry_sample_info_id", "analyte", "sample_value", "units", @@ -74,7 +84,7 @@ def can_delete(self, request: Request) -> bool: fields_default_sort = [("analysis_date", True)] searchable_fields = [ - "global_id", + "nma_global_id", "analyte", "symbol", "analysis_method", @@ -88,8 +98,10 @@ def can_delete(self, request: Request) -> bool: # ========== Form View ========== fields = [ - "global_id", + "id", + "nma_global_id", HasOne("chemistry_sample_info", identity="n-m-a_-chemistry_-sample-info"), + "nma_chemistry_sample_info_uuid", "analyte", "symbol", "sample_value", @@ -104,8 +116,11 @@ def can_delete(self, request: Request) -> bool: ] field_labels = { - "global_id": "GlobalID", + "id": "ID", + "nma_global_id": "NMA GlobalID (Legacy)", "chemistry_sample_info": "Chemistry Sample Info", + "chemistry_sample_info_id": "Chemistry Sample Info ID", + "nma_chemistry_sample_info_uuid": "NMA Chemistry Sample Info UUID (Legacy)", "analyte": "Analyte", "symbol": "Symbol", "sample_value": "Sample Value", diff --git a/admin/views/radionuclides.py b/admin/views/radionuclides.py index be990c42..9c76b036 100644 --- a/admin/views/radionuclides.py +++ b/admin/views/radionuclides.py @@ -15,6 +15,15 @@ # =============================================================================== """ RadionuclidesAdmin view for legacy NMA_Radionuclides. + +Updated for Integer PK schema: +- id: Integer PK (autoincrement) +- nma_global_id: Legacy UUID PK (GlobalID), UNIQUE for audit +- chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id +- nma_sample_pt_id: Legacy UUID FK (SamplePtID) for audit +- nma_sample_point_id: Legacy SamplePointID string +- nma_object_id: Legacy OBJECTID, UNIQUE +- nma_wclab_id: Legacy WCLab_ID """ from admin.views.base import OcotilloModelView @@ -31,6 +40,10 @@ class RadionuclidesAdmin(OcotilloModelView): label = "Radionuclides" icon = "fa fa-radiation" + # Integer PK + pk_attr = "id" + pk_type = int + can_create = False can_edit = False can_delete = False @@ -38,9 +51,11 @@ class RadionuclidesAdmin(OcotilloModelView): # ========== List View ========== list_fields = [ - "global_id", - "sample_pt_id", - "sample_point_id", + "id", + "nma_global_id", + "chemistry_sample_info_id", + "nma_sample_pt_id", + "nma_sample_point_id", "thing_id", "analyte", "sample_value", @@ -50,32 +65,34 @@ class RadionuclidesAdmin(OcotilloModelView): ] sortable_fields = [ - "global_id", - "sample_pt_id", - "sample_point_id", + "id", + "nma_global_id", + "chemistry_sample_info_id", + "nma_sample_pt_id", + "nma_sample_point_id", "thing_id", "analyte", "sample_value", "units", "analysis_date", "analyses_agency", - "wclab_id", - "object_id", + "nma_wclab_id", + "nma_object_id", ] fields_default_sort = [("analysis_date", True)] searchable_fields = [ - "global_id", - "sample_pt_id", - "sample_point_id", + "nma_global_id", + "nma_sample_pt_id", + "nma_sample_point_id", "analyte", "symbol", "analysis_method", "analysis_date", "notes", "analyses_agency", - "wclab_id", + "nma_wclab_id", ] page_size = 50 @@ -84,9 +101,11 @@ class RadionuclidesAdmin(OcotilloModelView): # ========== Form View ========== fields = [ - "global_id", - "sample_pt_id", - "sample_point_id", + "id", + "nma_global_id", + "chemistry_sample_info_id", + "nma_sample_pt_id", + "nma_sample_point_id", "thing_id", "analyte", "symbol", @@ -98,15 +117,17 @@ class RadionuclidesAdmin(OcotilloModelView): "notes", "volume", "volume_unit", - "object_id", + "nma_object_id", "analyses_agency", - "wclab_id", + "nma_wclab_id", ] field_labels = { - "global_id": "GlobalID", - "sample_pt_id": "SamplePtID", - "sample_point_id": "SamplePointID", + "id": "ID", + "nma_global_id": "NMA GlobalID (Legacy)", + "chemistry_sample_info_id": "Chemistry Sample Info ID", + "nma_sample_pt_id": "NMA SamplePtID (Legacy)", + "nma_sample_point_id": "NMA SamplePointID (Legacy)", "thing_id": "Thing ID", "analyte": "Analyte", "symbol": "Symbol", @@ -118,9 +139,9 @@ class RadionuclidesAdmin(OcotilloModelView): "notes": "Notes", "volume": "Volume", "volume_unit": "VolumeUnit", - "object_id": "OBJECTID", + "nma_object_id": "NMA OBJECTID (Legacy)", "analyses_agency": "AnalysesAgency", - "wclab_id": "WCLab_ID", + "nma_wclab_id": "NMA WCLab_ID (Legacy)", } diff --git a/admin/views/soil_rock_results.py b/admin/views/soil_rock_results.py index 00786058..94780498 100644 --- a/admin/views/soil_rock_results.py +++ b/admin/views/soil_rock_results.py @@ -1,5 +1,8 @@ """ SoilRockResultsAdmin view for legacy NMA_Soil_Rock_Results. + +Already has Integer PK. Updated for legacy column rename: +- point_id -> nma_point_id """ from admin.views.base import OcotilloModelView @@ -15,6 +18,10 @@ class SoilRockResultsAdmin(OcotilloModelView): label = "NMA Soil Rock Results" icon = "fa fa-mountain" + # Integer PK (already correct) + pk_attr = "id" + pk_type = int + # Pagination page_size = 50 page_size_options = [25, 50, 100, 200] @@ -22,7 +29,7 @@ class SoilRockResultsAdmin(OcotilloModelView): # ========== List View ========== list_fields = [ "id", - "point_id", + "nma_point_id", "sample_type", "date_sampled", "d13c", @@ -33,11 +40,11 @@ class SoilRockResultsAdmin(OcotilloModelView): sortable_fields = [ "id", - "point_id", + "nma_point_id", ] searchable_fields = [ - "point_id", + "nma_point_id", "sample_type", "date_sampled", "sampled_by", @@ -48,7 +55,7 @@ class SoilRockResultsAdmin(OcotilloModelView): # ========== Detail View ========== fields = [ "id", - "point_id", + "nma_point_id", "sample_type", "date_sampled", "d13c", @@ -59,8 +66,8 @@ class SoilRockResultsAdmin(OcotilloModelView): # ========== Legacy Field Labels ========== field_labels = { - "id": "id", - "point_id": "Point_ID", + "id": "ID", + "nma_point_id": "NMA Point_ID (Legacy)", "sample_type": "Sample Type", "date_sampled": "Date Sampled", "d13c": "d13C", diff --git a/admin/views/stratigraphy.py b/admin/views/stratigraphy.py index 9f2526f0..0bbd3223 100644 --- a/admin/views/stratigraphy.py +++ b/admin/views/stratigraphy.py @@ -1,5 +1,12 @@ """ StratigraphyAdmin view for legacy stratigraphy. + +Updated for Integer PK schema: +- id: Integer PK (autoincrement) +- nma_global_id: Legacy UUID PK (GlobalID), UNIQUE for audit +- nma_well_id: Legacy WellID UUID +- nma_point_id: Legacy PointID string +- nma_object_id: Legacy OBJECTID, UNIQUE """ from admin.views.base import OcotilloModelView @@ -15,6 +22,10 @@ class StratigraphyAdmin(OcotilloModelView): label = "NMA Stratigraphy" icon = "fa fa-layer-group" + # Integer PK + pk_attr = "id" + pk_type = int + # Pagination page_size = 50 page_size_options = [25, 50, 100, 200] @@ -22,16 +33,17 @@ class StratigraphyAdmin(OcotilloModelView): # ========== List View ========== sortable_fields = [ - "global_id", - "object_id", - "point_id", + "id", + "nma_global_id", + "nma_object_id", + "nma_point_id", ] - fields_default_sort = [("point_id", False), ("strat_top", False)] + fields_default_sort = [("nma_point_id", False), ("strat_top", False)] searchable_fields = [ - "point_id", - "global_id", + "nma_point_id", + "nma_global_id", "unit_identifier", "lithology", "lithologic_modifier", @@ -43,9 +55,10 @@ class StratigraphyAdmin(OcotilloModelView): # ========== Form View ========== fields = [ - "global_id", - "well_id", - "point_id", + "id", + "nma_global_id", + "nma_well_id", + "nma_point_id", "thing_id", "strat_top", "strat_bottom", @@ -55,22 +68,25 @@ class StratigraphyAdmin(OcotilloModelView): "contributing_unit", "strat_source", "strat_notes", - "object_id", + "nma_object_id", ] exclude_fields_from_create = [ - "object_id", + "id", + "nma_object_id", ] exclude_fields_from_edit = [ - "object_id", + "id", + "nma_object_id", ] # ========== Legacy Field Labels ========== field_labels = { - "global_id": "GlobalID", - "well_id": "WellID", - "point_id": "PointID", + "id": "ID", + "nma_global_id": "NMA GlobalID (Legacy)", + "nma_well_id": "NMA WellID (Legacy)", + "nma_point_id": "NMA PointID (Legacy)", "thing_id": "ThingID", "strat_top": "StratTop", "strat_bottom": "StratBottom", @@ -80,5 +96,5 @@ class StratigraphyAdmin(OcotilloModelView): "contributing_unit": "ContributingUnit", "strat_source": "StratSource", "strat_notes": "StratNotes", - "object_id": "OBJECTID", + "nma_object_id": "NMA OBJECTID (Legacy)", } From 620afd044e7d86ee9aa0de1c22a61147d8933918 Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Wed, 28 Jan 2026 01:27:45 -0800 Subject: [PATCH 18/22] test(unit): update NMA legacy model tests for Integer PK schema Update all unit tests to use Integer PK (id) and nma_ prefixed columns. Add new tests for Integer PK validation and unique constraints. Changes: - Replace global_id, sample_pt_id, etc. with nma_global_id, nma_sample_pt_id - Replace UUID PK assertions with Integer PK assertions - Use chemistry_sample_info_id (Integer FK) instead of sample_pt_id (UUID FK) - Add tests for Integer PK column type and unique constraints - Update admin view tests for new field names and labels Files updated: - test_stratigraphy_legacy.py - test_associated_data_legacy.py - test_radionuclides_legacy.py - test_field_parameters_legacy.py - test_major_chemistry_legacy.py - test_chemistry_sampleinfo_legacy.py - test_hydraulics_data_legacy.py - test_soil_rock_results_legacy.py - test_admin_minor_trace_chemistry.py Co-Authored-By: Claude Opus 4.5 --- tests/test_admin_minor_trace_chemistry.py | 34 ++++- tests/test_associated_data_legacy.py | 68 ++++++--- tests/test_chemistry_sampleinfo_legacy.py | 104 +++++++------ tests/test_field_parameters_legacy.py | 174 ++++++++++++--------- tests/test_hydraulics_data_legacy.py | 125 +++++++-------- tests/test_major_chemistry_legacy.py | 168 ++++++++++++-------- tests/test_radionuclides_legacy.py | 177 +++++++++++++--------- tests/test_soil_rock_results_legacy.py | 34 +++-- tests/test_stratigraphy_legacy.py | 42 ++++- 9 files changed, 569 insertions(+), 357 deletions(-) diff --git a/tests/test_admin_minor_trace_chemistry.py b/tests/test_admin_minor_trace_chemistry.py index 9777d0c8..4ec1705d 100644 --- a/tests/test_admin_minor_trace_chemistry.py +++ b/tests/test_admin_minor_trace_chemistry.py @@ -18,6 +18,12 @@ These tests verify the admin view is properly configured without requiring a running server or database. + +Updated for Integer PK schema: +- id: Integer PK (autoincrement) +- nma_global_id: Legacy GlobalID UUID (UNIQUE) +- chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id +- nma_chemistry_sample_info_uuid: Legacy UUID FK (for audit) """ import pytest @@ -106,7 +112,8 @@ def test_list_fields_include_required_columns(self, view): field_names.append(getattr(f, "name", str(f))) required_columns = [ - "global_id", + "id", # Integer PK + "nma_global_id", # Legacy UUID "chemistry_sample_info", # HasOne relationship to parent "analyte", "sample_value", @@ -145,7 +152,9 @@ def test_form_includes_all_chemistry_fields(self): # Check the class-level configuration # Note: chemistry_sample_info is a HasOne field, not a string expected_string_fields = [ - "global_id", + "id", # Integer PK + "nma_global_id", # Legacy GlobalID + "nma_chemistry_sample_info_uuid", # Legacy UUID FK "analyte", "symbol", "sample_value", @@ -175,15 +184,34 @@ def test_form_includes_all_chemistry_fields(self): def test_field_labels_are_human_readable(self, view): """Field labels should be human-readable.""" - assert view.field_labels.get("global_id") == "GlobalID" + assert view.field_labels.get("id") == "ID" + assert view.field_labels.get("nma_global_id") == "NMA GlobalID (Legacy)" assert view.field_labels.get("sample_value") == "Sample Value" assert view.field_labels.get("analysis_date") == "Analysis Date" def test_searchable_fields_include_key_fields(self, view): """Searchable fields should include commonly searched columns.""" + assert "nma_global_id" in view.searchable_fields assert "analyte" in view.searchable_fields assert "symbol" in view.searchable_fields assert "analyses_agency" in view.searchable_fields +class TestMinorTraceChemistryAdminIntegerPK: + """Tests for Integer PK configuration.""" + + @pytest.fixture + def view(self): + """Create a MinorTraceChemistryAdmin instance for testing.""" + return MinorTraceChemistryAdmin(NMA_MinorTraceChemistry) + + def test_pk_attr_is_id(self, view): + """Primary key attribute should be 'id'.""" + assert view.pk_attr == "id" + + def test_pk_type_is_int(self, view): + """Primary key type should be int.""" + assert view.pk_type == int + + # ============= EOF ============================================= diff --git a/tests/test_associated_data_legacy.py b/tests/test_associated_data_legacy.py index 4b32615a..6448feca 100644 --- a/tests/test_associated_data_legacy.py +++ b/tests/test_associated_data_legacy.py @@ -17,13 +17,13 @@ Unit tests for NMA_AssociatedData legacy model. These tests verify the migration of columns from the legacy NMA_AssociatedData table. -Migrated columns: -- LocationId -> location_id -- PointID -> point_id -- AssocID -> assoc_id -- Notes -> notes -- Formation -> formation -- OBJECTID -> object_id + +Updated for Integer PK schema: +- id: Integer PK (autoincrement) +- nma_assoc_id: Legacy AssocID UUID (UNIQUE) +- nma_location_id: Legacy LocationId UUID (UNIQUE) +- nma_point_id: Legacy PointID string +- nma_object_id: Legacy OBJECTID (UNIQUE) """ from uuid import uuid4 @@ -36,24 +36,25 @@ def test_create_associated_data_all_fields(water_well_thing): """Test creating an associated data record with all fields.""" with session_ctx() as session: record = NMA_AssociatedData( - location_id=uuid4(), - point_id="AA-0001", - assoc_id=uuid4(), + nma_location_id=uuid4(), + nma_point_id="AA-0001", + nma_assoc_id=uuid4(), notes="Legacy notes", formation="TEST", - object_id=42, + nma_object_id=42, thing_id=water_well_thing.id, ) session.add(record) session.commit() session.refresh(record) - assert record.assoc_id is not None - assert record.location_id is not None - assert record.point_id == "AA-0001" + assert record.id is not None # Integer PK auto-generated + assert record.nma_assoc_id is not None + assert record.nma_location_id is not None + assert record.nma_point_id == "AA-0001" assert record.notes == "Legacy notes" assert record.formation == "TEST" - assert record.object_id == 42 + assert record.nma_object_id == 42 assert record.thing_id == water_well_thing.id session.delete(record) @@ -64,18 +65,19 @@ def test_create_associated_data_minimal(water_well_thing): """Test creating an associated data record with required fields only.""" with session_ctx() as session: well = session.merge(water_well_thing) - record = NMA_AssociatedData(assoc_id=uuid4(), thing_id=well.id) + record = NMA_AssociatedData(nma_assoc_id=uuid4(), thing_id=well.id) session.add(record) session.commit() session.refresh(record) - assert record.assoc_id is not None + assert record.id is not None # Integer PK auto-generated + assert record.nma_assoc_id is not None assert record.thing_id == well.id - assert record.location_id is None - assert record.point_id is None + assert record.nma_location_id is None + assert record.nma_point_id is None assert record.notes is None assert record.formation is None - assert record.object_id is None + assert record.nma_object_id is None session.delete(record) session.commit() @@ -90,8 +92,8 @@ def test_associated_data_validator_rejects_none_thing_id(): with pytest.raises(ValueError, match="requires a parent Thing"): NMA_AssociatedData( - assoc_id=uuid4(), - point_id="ORPHAN-TEST", + nma_assoc_id=uuid4(), + nma_point_id="ORPHAN-TEST", thing_id=None, ) @@ -114,8 +116,8 @@ def test_associated_data_back_populates_thing(water_well_thing): with session_ctx() as session: well = session.merge(water_well_thing) record = NMA_AssociatedData( - assoc_id=uuid4(), - point_id="BPASSOC01", # Max 10 chars + nma_assoc_id=uuid4(), + nma_point_id="BPASSOC01", # Max 10 chars thing_id=well.id, ) session.add(record) @@ -129,4 +131,22 @@ def test_associated_data_back_populates_thing(water_well_thing): session.commit() +# ===================== Integer PK tests ========================== + + +def test_associated_data_has_integer_pk(): + """NMA_AssociatedData.id is Integer PK.""" + from sqlalchemy import Integer + + col = NMA_AssociatedData.__table__.c.id + assert col.primary_key is True + assert isinstance(col.type, Integer) + + +def test_associated_data_nma_assoc_id_is_unique(): + """NMA_AssociatedData.nma_assoc_id is UNIQUE.""" + col = NMA_AssociatedData.__table__.c.nma_assoc_id + assert col.unique is True + + # ============= EOF ============================================= diff --git a/tests/test_chemistry_sampleinfo_legacy.py b/tests/test_chemistry_sampleinfo_legacy.py index 2648befc..b48a2b5c 100644 --- a/tests/test_chemistry_sampleinfo_legacy.py +++ b/tests/test_chemistry_sampleinfo_legacy.py @@ -17,25 +17,14 @@ Unit tests for NMA_Chemistry_SampleInfo legacy model. These tests verify the migration of columns from the legacy Chemistry_SampleInfo table. -Migrated columns: -- OBJECTID -> object_id -- SamplePointID -> sample_point_id -- SamplePtID -> sample_pt_id -- WCLab_ID -> wclab_id -- CollectionDate -> collection_date -- CollectionMethod -> collection_method -- CollectedBy -> collected_by -- AnalysesAgency -> analyses_agency -- SampleType -> sample_type -- SampleMaterialNotH2O -> sample_material_not_h2o -- WaterType -> water_type -- StudySample -> study_sample -- DataSource -> data_source -- DataQuality -> data_quality -- PublicRelease -> public_release -- AddedDaytoDate -> added_day_to_date -- AddedMonthDaytoDate -> added_month_day_to_date -- SampleNotes -> sample_notes + +Updated for Integer PK schema: +- id: Integer PK (autoincrement) +- nma_sample_pt_id: Legacy SamplePtID UUID (UNIQUE) +- nma_sample_point_id: Legacy SamplePointID string +- nma_wclab_id: Legacy WCLab_ID string +- nma_location_id: Legacy LocationId UUID +- nma_object_id: Legacy OBJECTID (UNIQUE) """ from datetime import datetime @@ -58,10 +47,10 @@ def test_create_chemistry_sampleinfo_all_fields(water_well_thing): """Test creating a chemistry sample info record with all fields.""" with session_ctx() as session: record = NMA_Chemistry_SampleInfo( - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, - wclab_id="LAB-123", + nma_wclab_id="LAB-123", collection_date=datetime(2024, 1, 1, 10, 30, 0), collection_method="Grab", collected_by="Tech", @@ -81,9 +70,10 @@ def test_create_chemistry_sampleinfo_all_fields(water_well_thing): session.commit() session.refresh(record) - assert record.sample_pt_id is not None - assert record.sample_point_id is not None - assert record.wclab_id == "LAB-123" + assert record.id is not None # Integer PK auto-generated + assert record.nma_sample_pt_id is not None + assert record.nma_sample_point_id is not None + assert record.nma_wclab_id == "LAB-123" assert record.collection_date == datetime(2024, 1, 1, 10, 30, 0) assert record.sample_material_not_h2o == "Yes" assert record.study_sample == "Yes" @@ -96,16 +86,17 @@ def test_create_chemistry_sampleinfo_minimal(water_well_thing): """Test creating a chemistry sample info record with minimal fields.""" with session_ctx() as session: record = NMA_Chemistry_SampleInfo( - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(record) session.commit() session.refresh(record) - assert record.sample_pt_id is not None - assert record.sample_point_id is not None + assert record.id is not None # Integer PK auto-generated + assert record.nma_sample_pt_id is not None + assert record.nma_sample_point_id is not None assert record.collection_date is None session.delete(record) @@ -113,21 +104,22 @@ def test_create_chemistry_sampleinfo_minimal(water_well_thing): # ===================== READ tests ========================== -def test_read_chemistry_sampleinfo_by_object_id(water_well_thing): - """Test reading a chemistry sample info record by OBJECTID.""" +def test_read_chemistry_sampleinfo_by_id(water_well_thing): + """Test reading a chemistry sample info record by Integer ID.""" with session_ctx() as session: record = NMA_Chemistry_SampleInfo( - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(record) session.commit() - fetched = session.get(NMA_Chemistry_SampleInfo, record.sample_pt_id) + fetched = session.get(NMA_Chemistry_SampleInfo, record.id) assert fetched is not None - assert fetched.sample_pt_id == record.sample_pt_id - assert fetched.sample_point_id == record.sample_point_id + assert fetched.id == record.id + assert fetched.nma_sample_pt_id == record.nma_sample_pt_id + assert fetched.nma_sample_point_id == record.nma_sample_point_id session.delete(record) session.commit() @@ -138,8 +130,8 @@ def test_update_chemistry_sampleinfo(water_well_thing): """Test updating a chemistry sample info record.""" with session_ctx() as session: record = NMA_Chemistry_SampleInfo( - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(record) @@ -162,17 +154,18 @@ def test_delete_chemistry_sampleinfo(water_well_thing): """Test deleting a chemistry sample info record.""" with session_ctx() as session: record = NMA_Chemistry_SampleInfo( - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(record) session.commit() + record_id = record.id session.delete(record) session.commit() - fetched = session.get(NMA_Chemistry_SampleInfo, record.sample_pt_id) + fetched = session.get(NMA_Chemistry_SampleInfo, record_id) assert fetched is None @@ -180,9 +173,10 @@ def test_delete_chemistry_sampleinfo(water_well_thing): def test_chemistry_sampleinfo_has_all_migrated_columns(): """Test that the model has all expected columns.""" expected_columns = [ - "sample_point_id", - "sample_pt_id", - "wclab_id", + "id", + "nma_sample_point_id", + "nma_sample_pt_id", + "nma_wclab_id", "thing_id", "collection_date", "collection_method", @@ -198,8 +192,8 @@ def test_chemistry_sampleinfo_has_all_migrated_columns(): "added_day_to_date", "added_month_day_to_date", "sample_notes", - "object_id", - "location_id", + "nma_object_id", + "nma_location_id", ] for column in expected_columns: @@ -213,4 +207,22 @@ def test_chemistry_sampleinfo_table_name(): assert NMA_Chemistry_SampleInfo.__tablename__ == "NMA_Chemistry_SampleInfo" +# ===================== Integer PK tests ========================== + + +def test_chemistry_sampleinfo_has_integer_pk(): + """NMA_Chemistry_SampleInfo.id is Integer PK.""" + from sqlalchemy import Integer + + col = NMA_Chemistry_SampleInfo.__table__.c.id + assert col.primary_key is True + assert isinstance(col.type, Integer) + + +def test_chemistry_sampleinfo_nma_sample_pt_id_is_unique(): + """NMA_Chemistry_SampleInfo.nma_sample_pt_id is UNIQUE.""" + col = NMA_Chemistry_SampleInfo.__table__.c.nma_sample_pt_id + assert col.unique is True + + # ============= EOF ============================================= diff --git a/tests/test_field_parameters_legacy.py b/tests/test_field_parameters_legacy.py index aa04174d..2ad3f9ea 100644 --- a/tests/test_field_parameters_legacy.py +++ b/tests/test_field_parameters_legacy.py @@ -2,17 +2,15 @@ Unit tests for NMA_FieldParameters legacy model. These tests verify the migration of columns from the legacy NMA_FieldParameters table. -Migrated columns (excluding SSMA_TimeStamp): -- SamplePtID -> sample_pt_id -- SamplePointID -> sample_point_id -- FieldParameter -> field_parameter -- SampleValue -> sample_value -- Units -> units -- Notes -> notes -- OBJECTID -> object_id -- GlobalID -> global_id -- AnalysesAgency -> analyses_agency -- WCLab_ID -> wc_lab_id + +Updated for Integer PK schema: +- id: Integer PK (autoincrement) +- nma_global_id: Legacy GlobalID UUID (UNIQUE) +- chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id +- nma_sample_pt_id: Legacy SamplePtID UUID (for audit) +- nma_sample_point_id: Legacy SamplePointID string +- nma_object_id: Legacy OBJECTID (UNIQUE) +- nma_wclab_id: Legacy WCLab_ID string """ from uuid import uuid4 @@ -31,12 +29,13 @@ def _next_sample_point_id() -> str: def _create_sample_info(session, water_well_thing) -> NMA_Chemistry_SampleInfo: sample = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid4(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=uuid4(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(sample) session.commit() + session.refresh(sample) return sample @@ -52,16 +51,18 @@ def test_field_parameters_has_all_migrated_columns(): actual_columns = [column.key for column in mapper.attrs] expected_columns = [ - "global_id", - "sample_pt_id", - "sample_point_id", + "id", + "nma_global_id", + "chemistry_sample_info_id", + "nma_sample_pt_id", + "nma_sample_point_id", "field_parameter", "sample_value", "units", "notes", - "object_id", + "nma_object_id", "analyses_agency", - "wc_lab_id", + "nma_wclab_id", ] for column in expected_columns: @@ -85,22 +86,23 @@ def test_field_parameters_persistence(water_well_thing): sample_info = _create_sample_info(session, water_well_thing) test_global_id = uuid4() new_fp = NMA_FieldParameters( - global_id=test_global_id, - sample_pt_id=sample_info.sample_pt_id, - sample_point_id="PT-123", + nma_global_id=test_global_id, + chemistry_sample_info_id=sample_info.id, + nma_sample_pt_id=sample_info.nma_sample_pt_id, + nma_sample_point_id="PT-123", field_parameter="pH", sample_value=7.4, units="SU", notes="Legacy migration verification", analyses_agency="NMA Agency", - wc_lab_id="WCLAB-01", + nma_wclab_id="WCLAB-01", ) session.add(new_fp) session.commit() session.expire_all() - retrieved = session.get(NMA_FieldParameters, test_global_id) + retrieved = session.get(NMA_FieldParameters, new_fp.id) assert retrieved.sample_value == 7.4 assert retrieved.field_parameter == "pH" assert retrieved.units == "SU" @@ -111,19 +113,21 @@ def test_field_parameters_persistence(water_well_thing): session.commit() -def test_object_id_auto_generation(water_well_thing): - """Verifies that the OBJECTID (Identity) column auto-increments in Postgres.""" +def test_object_id_column_exists(water_well_thing): + """Verifies that the nma_object_id column exists.""" with session_ctx() as session: sample_info = _create_sample_info(session, water_well_thing) fp1 = NMA_FieldParameters( - sample_pt_id=sample_info.sample_pt_id, + chemistry_sample_info_id=sample_info.id, field_parameter="Temp", ) session.add(fp1) session.commit() session.refresh(fp1) - assert fp1.object_id is not None + # nma_object_id is nullable + assert fp1.id is not None # Integer PK auto-generated + assert hasattr(fp1, "nma_object_id") session.delete(fp1) session.delete(sample_info) @@ -136,23 +140,26 @@ def test_create_field_parameters_all_fields(water_well_thing): with session_ctx() as session: sample_info = _create_sample_info(session, water_well_thing) record = NMA_FieldParameters( - global_id=uuid4(), - sample_pt_id=sample_info.sample_pt_id, - sample_point_id=sample_info.sample_point_id, + nma_global_id=uuid4(), + chemistry_sample_info_id=sample_info.id, + nma_sample_pt_id=sample_info.nma_sample_pt_id, + nma_sample_point_id=sample_info.nma_sample_point_id, field_parameter="pH", sample_value=7.4, units="SU", notes="Test notes", analyses_agency="NMBGMR", - wc_lab_id="LAB-202", + nma_wclab_id="LAB-202", ) session.add(record) session.commit() session.refresh(record) - assert record.global_id is not None - assert record.sample_pt_id == sample_info.sample_pt_id - assert record.sample_point_id == sample_info.sample_point_id + assert record.id is not None # Integer PK auto-generated + assert record.nma_global_id is not None + assert record.chemistry_sample_info_id == sample_info.id + assert record.nma_sample_pt_id == sample_info.nma_sample_pt_id + assert record.nma_sample_point_id == sample_info.nma_sample_point_id assert record.field_parameter == "pH" assert record.sample_value == 7.4 @@ -166,15 +173,16 @@ def test_create_field_parameters_minimal(water_well_thing): with session_ctx() as session: sample_info = _create_sample_info(session, water_well_thing) record = NMA_FieldParameters( - global_id=uuid4(), - sample_pt_id=sample_info.sample_pt_id, + nma_global_id=uuid4(), + chemistry_sample_info_id=sample_info.id, ) session.add(record) session.commit() session.refresh(record) - assert record.global_id is not None - assert record.sample_pt_id == sample_info.sample_pt_id + assert record.id is not None # Integer PK auto-generated + assert record.nma_global_id is not None + assert record.chemistry_sample_info_id == sample_info.id assert record.field_parameter is None assert record.units is None assert record.sample_value is None @@ -185,50 +193,53 @@ def test_create_field_parameters_minimal(water_well_thing): # ===================== READ tests ========================== -def test_read_field_parameters_by_global_id(water_well_thing): - """Test reading a field parameters record by GlobalID.""" +def test_read_field_parameters_by_id(water_well_thing): + """Test reading a field parameters record by Integer ID.""" with session_ctx() as session: sample_info = _create_sample_info(session, water_well_thing) record = NMA_FieldParameters( - global_id=uuid4(), - sample_pt_id=sample_info.sample_pt_id, + nma_global_id=uuid4(), + chemistry_sample_info_id=sample_info.id, ) session.add(record) session.commit() - fetched = session.get(NMA_FieldParameters, record.global_id) + fetched = session.get(NMA_FieldParameters, record.id) assert fetched is not None - assert fetched.global_id == record.global_id + assert fetched.id == record.id + assert fetched.nma_global_id == record.nma_global_id session.delete(record) session.delete(sample_info) session.commit() -def test_query_field_parameters_by_sample_point_id(water_well_thing): - """Test querying field parameters by sample_point_id.""" +def test_query_field_parameters_by_nma_sample_point_id(water_well_thing): + """Test querying field parameters by nma_sample_point_id.""" with session_ctx() as session: sample_info = _create_sample_info(session, water_well_thing) record1 = NMA_FieldParameters( - global_id=uuid4(), - sample_pt_id=sample_info.sample_pt_id, - sample_point_id=sample_info.sample_point_id, + nma_global_id=uuid4(), + chemistry_sample_info_id=sample_info.id, + nma_sample_point_id=sample_info.nma_sample_point_id, ) record2 = NMA_FieldParameters( - global_id=uuid4(), - sample_pt_id=sample_info.sample_pt_id, - sample_point_id="OTHER-PT", + nma_global_id=uuid4(), + chemistry_sample_info_id=sample_info.id, + nma_sample_point_id="OTHER-PT", ) session.add_all([record1, record2]) session.commit() # Use SQLAlchemy 2.0 style select/execute for ORM queries. stmt = select(NMA_FieldParameters).filter( - NMA_FieldParameters.sample_point_id == sample_info.sample_point_id + NMA_FieldParameters.nma_sample_point_id == sample_info.nma_sample_point_id ) results = session.execute(stmt).scalars().all() assert len(results) >= 1 - assert all(r.sample_point_id == sample_info.sample_point_id for r in results) + assert all( + r.nma_sample_point_id == sample_info.nma_sample_point_id for r in results + ) session.delete(record1) session.delete(record2) @@ -242,8 +253,8 @@ def test_update_field_parameters(water_well_thing): with session_ctx() as session: sample_info = _create_sample_info(session, water_well_thing) record = NMA_FieldParameters( - global_id=uuid4(), - sample_pt_id=sample_info.sample_pt_id, + nma_global_id=uuid4(), + chemistry_sample_info_id=sample_info.id, ) session.add(record) session.commit() @@ -267,16 +278,17 @@ def test_delete_field_parameters(water_well_thing): with session_ctx() as session: sample_info = _create_sample_info(session, water_well_thing) record = NMA_FieldParameters( - global_id=uuid4(), - sample_pt_id=sample_info.sample_pt_id, + nma_global_id=uuid4(), + chemistry_sample_info_id=sample_info.id, ) session.add(record) session.commit() + record_id = record.id session.delete(record) session.commit() - fetched = session.get(NMA_FieldParameters, record.global_id) + fetched = session.get(NMA_FieldParameters, record_id) assert fetched is None session.delete(sample_info) @@ -288,7 +300,7 @@ def test_delete_field_parameters(water_well_thing): def test_orphan_prevention_constraint(): """ - VERIFIES: 'SamplePtID IS NOT NULL' and Foreign Key presence. + VERIFIES: 'chemistry_sample_info_id IS NOT NULL' and Foreign Key presence. Ensures the DB rejects records that aren't linked to a NMA_Chemistry_SampleInfo. """ with session_ctx() as session: @@ -311,13 +323,13 @@ def test_cascade_delete_behavior(water_well_thing): with session_ctx() as session: sample_info = _create_sample_info(session, water_well_thing) fp = NMA_FieldParameters( - sample_pt_id=sample_info.sample_pt_id, + chemistry_sample_info_id=sample_info.id, field_parameter="Temperature", ) session.add(fp) session.commit() session.refresh(fp) - fp_id = fp.global_id + fp_id = fp.id # Delete parent and check child session.delete(sample_info) @@ -331,22 +343,22 @@ def test_cascade_delete_behavior(water_well_thing): def test_update_cascade_propagation(water_well_thing): """ - VERIFIES: foreign key integrity on SamplePtID. - Ensures the DB rejects updates to a non-existent parent SamplePtID. + VERIFIES: foreign key integrity on chemistry_sample_info_id. + Ensures the DB rejects updates to a non-existent parent. """ with session_ctx() as session: sample_info = _create_sample_info(session, water_well_thing) fp = NMA_FieldParameters( - global_id=uuid4(), - sample_pt_id=sample_info.sample_pt_id, + nma_global_id=uuid4(), + chemistry_sample_info_id=sample_info.id, field_parameter="Dissolved Oxygen", ) session.add(fp) session.commit() - fp_id = fp.global_id + fp_id = fp.id with pytest.raises((IntegrityError, ProgrammingError)): - fp.sample_pt_id = uuid4() + fp.chemistry_sample_info_id = 999999 # Non-existent ID session.flush() session.rollback() @@ -355,3 +367,29 @@ def test_update_cascade_propagation(water_well_thing): session.delete(fetched) session.delete(sample_info) session.commit() + + +# ===================== Integer PK tests ========================== + + +def test_field_parameters_has_integer_pk(): + """NMA_FieldParameters.id is Integer PK.""" + from sqlalchemy import Integer + + col = NMA_FieldParameters.__table__.c.id + assert col.primary_key is True + assert isinstance(col.type, Integer) + + +def test_field_parameters_nma_global_id_is_unique(): + """NMA_FieldParameters.nma_global_id is UNIQUE.""" + col = NMA_FieldParameters.__table__.c.nma_global_id + assert col.unique is True + + +def test_field_parameters_chemistry_sample_info_fk(): + """NMA_FieldParameters.chemistry_sample_info_id is Integer FK.""" + col = NMA_FieldParameters.__table__.c.chemistry_sample_info_id + fks = list(col.foreign_keys) + assert len(fks) == 1 + assert "NMA_Chemistry_SampleInfo.id" in str(fks[0].target_fullname) diff --git a/tests/test_hydraulics_data_legacy.py b/tests/test_hydraulics_data_legacy.py index b2cef985..4097195f 100644 --- a/tests/test_hydraulics_data_legacy.py +++ b/tests/test_hydraulics_data_legacy.py @@ -17,29 +17,13 @@ Unit tests for HydraulicsData legacy model. These tests verify the migration of columns from the legacy HydraulicsData table. -Migrated columns: -- GlobalID -> global_id -- WellID -> well_id -- PointID -> point_id -- Data Source -> data_source -- Cs (gal/d/ft) -> cs_gal_d_ft -- HD (ft2/d) -> hd_ft2_d -- HL (day-1) -> hl_day_1 -- KH (ft/d) -> kh_ft_d -- KV (ft/d) -> kv_ft_d -- P (decimal fraction) -> p_decimal_fraction -- S (dimensionless) -> s_dimensionless -- Ss (ft-1) -> ss_ft_1 -- Sy (decimalfractn) -> sy_decimalfractn -- T (ft2/d) -> t_ft2_d -- k (darcy) -> k_darcy -- TestBottom -> test_bottom -- TestTop -> test_top -- HydraulicUnit -> hydraulic_unit -- HydraulicUnitType -> hydraulic_unit_type -- Hydraulic Remarks -> hydraulic_remarks -- OBJECTID -> object_id -- thing_id -> thing_id + +Updated for Integer PK schema: +- id: Integer PK (autoincrement) +- nma_global_id: Legacy GlobalID UUID (UNIQUE) +- nma_well_id: Legacy WellID UUID +- nma_point_id: Legacy PointID string +- nma_object_id: Legacy OBJECTID (UNIQUE) """ from uuid import uuid4 @@ -57,9 +41,9 @@ def test_create_hydraulics_data_all_fields(water_well_thing): """Test creating a hydraulics data record with all fields.""" with session_ctx() as session: record = NMA_HydraulicsData( - global_id=_next_global_id(), - well_id=uuid4(), - point_id=water_well_thing.name, + nma_global_id=_next_global_id(), + nma_well_id=uuid4(), + nma_point_id=water_well_thing.name, data_source="Legacy Source", cs_gal_d_ft=1.2, hd_ft2_d=3.4, @@ -77,20 +61,21 @@ def test_create_hydraulics_data_all_fields(water_well_thing): hydraulic_unit="Unit A", hydraulic_unit_type="U", hydraulic_remarks="Test remarks", - object_id=101, + nma_object_id=101, thing_id=water_well_thing.id, ) session.add(record) session.commit() session.refresh(record) - assert record.global_id is not None - assert record.well_id is not None - assert record.point_id == water_well_thing.name + assert record.id is not None # Integer PK auto-generated + assert record.nma_global_id is not None + assert record.nma_well_id is not None + assert record.nma_point_id == water_well_thing.name assert record.data_source == "Legacy Source" assert record.test_top == 30 assert record.test_bottom == 120 - assert record.object_id == 101 + assert record.nma_object_id == 101 assert record.thing_id == water_well_thing.id session.delete(record) @@ -101,7 +86,7 @@ def test_create_hydraulics_data_minimal(water_well_thing): """Test creating a hydraulics data record with minimal fields.""" with session_ctx() as session: record = NMA_HydraulicsData( - global_id=_next_global_id(), + nma_global_id=_next_global_id(), test_top=10, test_bottom=20, thing_id=water_well_thing.id, @@ -110,11 +95,12 @@ def test_create_hydraulics_data_minimal(water_well_thing): session.commit() session.refresh(record) - assert record.global_id is not None - assert record.well_id is None - assert record.point_id is None + assert record.id is not None # Integer PK auto-generated + assert record.nma_global_id is not None + assert record.nma_well_id is None + assert record.nma_point_id is None assert record.data_source is None - assert record.object_id is None + assert record.nma_object_id is None assert record.thing_id == water_well_thing.id session.delete(record) @@ -122,11 +108,11 @@ def test_create_hydraulics_data_minimal(water_well_thing): # ===================== READ tests ========================== -def test_read_hydraulics_data_by_global_id(water_well_thing): - """Test reading a hydraulics data record by GlobalID.""" +def test_read_hydraulics_data_by_id(water_well_thing): + """Test reading a hydraulics data record by Integer ID.""" with session_ctx() as session: record = NMA_HydraulicsData( - global_id=_next_global_id(), + nma_global_id=_next_global_id(), test_top=5, test_bottom=15, thing_id=water_well_thing.id, @@ -134,28 +120,29 @@ def test_read_hydraulics_data_by_global_id(water_well_thing): session.add(record) session.commit() - fetched = session.get(NMA_HydraulicsData, record.global_id) + fetched = session.get(NMA_HydraulicsData, record.id) assert fetched is not None - assert fetched.global_id == record.global_id + assert fetched.id == record.id + assert fetched.nma_global_id == record.nma_global_id session.delete(record) session.commit() -def test_query_hydraulics_data_by_point_id(water_well_thing): - """Test querying hydraulics data by point_id.""" +def test_query_hydraulics_data_by_nma_point_id(water_well_thing): + """Test querying hydraulics data by nma_point_id.""" with session_ctx() as session: record1 = NMA_HydraulicsData( - global_id=_next_global_id(), - well_id=uuid4(), - point_id=water_well_thing.name, + nma_global_id=_next_global_id(), + nma_well_id=uuid4(), + nma_point_id=water_well_thing.name, test_top=10, test_bottom=20, thing_id=water_well_thing.id, ) record2 = NMA_HydraulicsData( - global_id=_next_global_id(), - point_id="OTHER-POINT", + nma_global_id=_next_global_id(), + nma_point_id="OTHER-POINT", test_top=30, test_bottom=40, thing_id=water_well_thing.id, @@ -165,11 +152,11 @@ def test_query_hydraulics_data_by_point_id(water_well_thing): results = ( session.query(NMA_HydraulicsData) - .filter(NMA_HydraulicsData.point_id == water_well_thing.name) + .filter(NMA_HydraulicsData.nma_point_id == water_well_thing.name) .all() ) assert len(results) >= 1 - assert all(r.point_id == water_well_thing.name for r in results) + assert all(r.nma_point_id == water_well_thing.name for r in results) session.delete(record1) session.delete(record2) @@ -181,7 +168,7 @@ def test_update_hydraulics_data(water_well_thing): """Test updating a hydraulics data record.""" with session_ctx() as session: record = NMA_HydraulicsData( - global_id=_next_global_id(), + nma_global_id=_next_global_id(), test_top=5, test_bottom=15, thing_id=water_well_thing.id, @@ -206,18 +193,19 @@ def test_delete_hydraulics_data(water_well_thing): """Test deleting a hydraulics data record.""" with session_ctx() as session: record = NMA_HydraulicsData( - global_id=_next_global_id(), + nma_global_id=_next_global_id(), test_top=5, test_bottom=15, thing_id=water_well_thing.id, ) session.add(record) session.commit() + record_id = record.id session.delete(record) session.commit() - fetched = session.get(NMA_HydraulicsData, record.global_id) + fetched = session.get(NMA_HydraulicsData, record_id) assert fetched is None @@ -225,9 +213,10 @@ def test_delete_hydraulics_data(water_well_thing): def test_hydraulics_data_has_all_migrated_columns(): """Test that the model has all expected columns.""" expected_columns = [ - "global_id", - "well_id", - "point_id", + "id", + "nma_global_id", + "nma_well_id", + "nma_point_id", "data_source", "cs_gal_d_ft", "hd_ft2_d", @@ -245,7 +234,7 @@ def test_hydraulics_data_has_all_migrated_columns(): "hydraulic_unit", "hydraulic_unit_type", "hydraulic_remarks", - "object_id", + "nma_object_id", "thing_id", ] @@ -269,7 +258,7 @@ def test_hydraulics_data_validator_rejects_none_thing_id(): with pytest.raises(ValueError, match="requires a parent Thing"): NMA_HydraulicsData( - global_id=_next_global_id(), + nma_global_id=_next_global_id(), test_top=5, test_bottom=15, thing_id=None, @@ -294,7 +283,7 @@ def test_hydraulics_data_back_populates_thing(water_well_thing): with session_ctx() as session: well = session.merge(water_well_thing) record = NMA_HydraulicsData( - global_id=_next_global_id(), + nma_global_id=_next_global_id(), test_top=5, test_bottom=15, thing_id=well.id, @@ -310,4 +299,22 @@ def test_hydraulics_data_back_populates_thing(water_well_thing): session.commit() +# ===================== Integer PK tests ========================== + + +def test_hydraulics_data_has_integer_pk(): + """NMA_HydraulicsData.id is Integer PK.""" + from sqlalchemy import Integer + + col = NMA_HydraulicsData.__table__.c.id + assert col.primary_key is True + assert isinstance(col.type, Integer) + + +def test_hydraulics_data_nma_global_id_is_unique(): + """NMA_HydraulicsData.nma_global_id is UNIQUE.""" + col = NMA_HydraulicsData.__table__.c.nma_global_id + assert col.unique is True + + # ============= EOF ============================================= diff --git a/tests/test_major_chemistry_legacy.py b/tests/test_major_chemistry_legacy.py index 7161ec74..94d5f037 100644 --- a/tests/test_major_chemistry_legacy.py +++ b/tests/test_major_chemistry_legacy.py @@ -17,23 +17,15 @@ Unit tests for MajorChemistry legacy model. These tests verify the migration of columns from the legacy MajorChemistry table. -Migrated columns (excluding SSMA_TimeStamp): -- SamplePtID -> sample_pt_id -- SamplePointID -> sample_point_id -- Analyte -> analyte -- Symbol -> symbol -- SampleValue -> sample_value -- Units -> units -- Uncertainty -> uncertainty -- AnalysisMethod -> analysis_method -- AnalysisDate -> analysis_date -- Notes -> notes -- Volume -> volume -- VolumeUnit -> volume_unit -- OBJECTID -> object_id -- GlobalID -> global_id -- AnalysesAgency -> analyses_agency -- WCLab_ID -> wclab_id + +Updated for Integer PK schema: +- id: Integer PK (autoincrement) +- nma_global_id: Legacy GlobalID UUID (UNIQUE) +- chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id +- nma_sample_pt_id: Legacy SamplePtID UUID (for audit) +- nma_sample_point_id: Legacy SamplePointID string +- nma_object_id: Legacy OBJECTID (UNIQUE) +- nma_wclab_id: Legacy WCLab_ID string """ from datetime import datetime @@ -52,17 +44,19 @@ def test_create_major_chemistry_all_fields(water_well_thing): """Test creating a major chemistry record with all fields.""" with session_ctx() as session: sample_info = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid4(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=uuid4(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(sample_info) session.commit() + session.refresh(sample_info) record = NMA_MajorChemistry( - global_id=uuid4(), - sample_pt_id=sample_info.sample_pt_id, - sample_point_id=sample_info.sample_point_id, + nma_global_id=uuid4(), + chemistry_sample_info_id=sample_info.id, + nma_sample_pt_id=sample_info.nma_sample_pt_id, + nma_sample_point_id=sample_info.nma_sample_point_id, analyte="Ca", symbol="<", sample_value=12.3, @@ -74,15 +68,17 @@ def test_create_major_chemistry_all_fields(water_well_thing): volume=250, volume_unit="mL", analyses_agency="NMBGMR", - wclab_id="LAB-101", + nma_wclab_id="LAB-101", ) session.add(record) session.commit() session.refresh(record) - assert record.global_id is not None - assert record.sample_pt_id == sample_info.sample_pt_id - assert record.sample_point_id == sample_info.sample_point_id + assert record.id is not None # Integer PK auto-generated + assert record.nma_global_id is not None + assert record.chemistry_sample_info_id == sample_info.id + assert record.nma_sample_pt_id == sample_info.nma_sample_pt_id + assert record.nma_sample_point_id == sample_info.nma_sample_point_id assert record.analyte == "Ca" assert record.sample_value == 12.3 assert record.uncertainty == 0.1 @@ -96,23 +92,25 @@ def test_create_major_chemistry_minimal(water_well_thing): """Test creating a major chemistry record with minimal fields.""" with session_ctx() as session: sample_info = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid4(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=uuid4(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(sample_info) session.commit() + session.refresh(sample_info) record = NMA_MajorChemistry( - global_id=uuid4(), - sample_pt_id=sample_info.sample_pt_id, + nma_global_id=uuid4(), + chemistry_sample_info_id=sample_info.id, ) session.add(record) session.commit() session.refresh(record) - assert record.global_id is not None - assert record.sample_pt_id == sample_info.sample_pt_id + assert record.id is not None # Integer PK auto-generated + assert record.nma_global_id is not None + assert record.chemistry_sample_info_id == sample_info.id assert record.analyte is None assert record.units is None @@ -122,64 +120,71 @@ def test_create_major_chemistry_minimal(water_well_thing): # ===================== READ tests ========================== -def test_read_major_chemistry_by_global_id(water_well_thing): - """Test reading a major chemistry record by GlobalID.""" +def test_read_major_chemistry_by_id(water_well_thing): + """Test reading a major chemistry record by Integer ID.""" with session_ctx() as session: sample_info = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid4(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=uuid4(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(sample_info) session.commit() + session.refresh(sample_info) record = NMA_MajorChemistry( - global_id=uuid4(), - sample_pt_id=sample_info.sample_pt_id, + nma_global_id=uuid4(), + chemistry_sample_info_id=sample_info.id, ) session.add(record) session.commit() - fetched = session.get(NMA_MajorChemistry, record.global_id) + fetched = session.get(NMA_MajorChemistry, record.id) assert fetched is not None - assert fetched.global_id == record.global_id + assert fetched.id == record.id + assert fetched.nma_global_id == record.nma_global_id session.delete(record) session.delete(sample_info) session.commit() -def test_query_major_chemistry_by_sample_point_id(water_well_thing): - """Test querying major chemistry by sample_point_id.""" +def test_query_major_chemistry_by_nma_sample_point_id(water_well_thing): + """Test querying major chemistry by nma_sample_point_id.""" with session_ctx() as session: sample_info = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid4(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=uuid4(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(sample_info) session.commit() + session.refresh(sample_info) record1 = NMA_MajorChemistry( - global_id=uuid4(), - sample_pt_id=sample_info.sample_pt_id, - sample_point_id=sample_info.sample_point_id, + nma_global_id=uuid4(), + chemistry_sample_info_id=sample_info.id, + nma_sample_point_id=sample_info.nma_sample_point_id, ) record2 = NMA_MajorChemistry( - global_id=uuid4(), - sample_pt_id=sample_info.sample_pt_id, - sample_point_id="OTHER-PT", + nma_global_id=uuid4(), + chemistry_sample_info_id=sample_info.id, + nma_sample_point_id="OTHER-PT", ) session.add_all([record1, record2]) session.commit() results = ( session.query(NMA_MajorChemistry) - .filter(NMA_MajorChemistry.sample_point_id == sample_info.sample_point_id) + .filter( + NMA_MajorChemistry.nma_sample_point_id == sample_info.nma_sample_point_id + ) .all() ) assert len(results) >= 1 - assert all(r.sample_point_id == sample_info.sample_point_id for r in results) + assert all( + r.nma_sample_point_id == sample_info.nma_sample_point_id for r in results + ) session.delete(record1) session.delete(record2) @@ -192,16 +197,17 @@ def test_update_major_chemistry(water_well_thing): """Test updating a major chemistry record.""" with session_ctx() as session: sample_info = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid4(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=uuid4(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(sample_info) session.commit() + session.refresh(sample_info) record = NMA_MajorChemistry( - global_id=uuid4(), - sample_pt_id=sample_info.sample_pt_id, + nma_global_id=uuid4(), + chemistry_sample_info_id=sample_info.id, ) session.add(record) session.commit() @@ -224,24 +230,26 @@ def test_delete_major_chemistry(water_well_thing): """Test deleting a major chemistry record.""" with session_ctx() as session: sample_info = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid4(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=uuid4(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(sample_info) session.commit() + session.refresh(sample_info) record = NMA_MajorChemistry( - global_id=uuid4(), - sample_pt_id=sample_info.sample_pt_id, + nma_global_id=uuid4(), + chemistry_sample_info_id=sample_info.id, ) session.add(record) session.commit() + record_id = record.id session.delete(record) session.commit() - fetched = session.get(NMA_MajorChemistry, record.global_id) + fetched = session.get(NMA_MajorChemistry, record_id) assert fetched is None session.delete(sample_info) @@ -252,9 +260,11 @@ def test_delete_major_chemistry(water_well_thing): def test_major_chemistry_has_all_migrated_columns(): """Test that the model has all expected columns.""" expected_columns = [ - "global_id", - "sample_pt_id", - "sample_point_id", + "id", + "nma_global_id", + "chemistry_sample_info_id", + "nma_sample_pt_id", + "nma_sample_point_id", "analyte", "symbol", "sample_value", @@ -265,9 +275,9 @@ def test_major_chemistry_has_all_migrated_columns(): "notes", "volume", "volume_unit", - "object_id", + "nma_object_id", "analyses_agency", - "wclab_id", + "nma_wclab_id", ] for column in expected_columns: @@ -281,4 +291,30 @@ def test_major_chemistry_table_name(): assert NMA_MajorChemistry.__tablename__ == "NMA_MajorChemistry" +# ===================== Integer PK tests ========================== + + +def test_major_chemistry_has_integer_pk(): + """NMA_MajorChemistry.id is Integer PK.""" + from sqlalchemy import Integer + + col = NMA_MajorChemistry.__table__.c.id + assert col.primary_key is True + assert isinstance(col.type, Integer) + + +def test_major_chemistry_nma_global_id_is_unique(): + """NMA_MajorChemistry.nma_global_id is UNIQUE.""" + col = NMA_MajorChemistry.__table__.c.nma_global_id + assert col.unique is True + + +def test_major_chemistry_chemistry_sample_info_fk(): + """NMA_MajorChemistry.chemistry_sample_info_id is Integer FK.""" + col = NMA_MajorChemistry.__table__.c.chemistry_sample_info_id + fks = list(col.foreign_keys) + assert len(fks) == 1 + assert "NMA_Chemistry_SampleInfo.id" in str(fks[0].target_fullname) + + # ============= EOF ============================================= diff --git a/tests/test_radionuclides_legacy.py b/tests/test_radionuclides_legacy.py index efaec941..74fdf6ca 100644 --- a/tests/test_radionuclides_legacy.py +++ b/tests/test_radionuclides_legacy.py @@ -17,23 +17,15 @@ Unit tests for Radionuclides legacy model. These tests verify the migration of columns from the legacy Radionuclides table. -Migrated columns (excluding SSMA_TimeStamp): -- SamplePtID -> sample_pt_id -- SamplePointID -> sample_point_id -- Analyte -> analyte -- Symbol -> symbol -- SampleValue -> sample_value -- Units -> units -- Uncertainty -> uncertainty -- AnalysisMethod -> analysis_method -- AnalysisDate -> analysis_date -- Notes -> notes -- Volume -> volume -- VolumeUnit -> volume_unit -- OBJECTID -> object_id -- GlobalID -> global_id -- AnalysesAgency -> analyses_agency -- WCLab_ID -> wclab_id + +Updated for Integer PK schema: +- id: Integer PK (autoincrement) +- nma_global_id: Legacy GlobalID UUID (UNIQUE) +- chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id +- nma_sample_pt_id: Legacy SamplePtID UUID (for audit) +- nma_sample_point_id: Legacy SamplePointID string +- nma_object_id: Legacy OBJECTID (UNIQUE) +- nma_wclab_id: Legacy WCLab_ID string """ from datetime import datetime @@ -52,18 +44,20 @@ def test_create_radionuclides_all_fields(water_well_thing): """Test creating a radionuclides record with all fields.""" with session_ctx() as session: sample_info = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid4(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=uuid4(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(sample_info) session.commit() + session.refresh(sample_info) record = NMA_Radionuclides( - global_id=uuid4(), + nma_global_id=uuid4(), thing_id=water_well_thing.id, - sample_pt_id=sample_info.sample_pt_id, - sample_point_id=sample_info.sample_point_id, + chemistry_sample_info_id=sample_info.id, + nma_sample_pt_id=sample_info.nma_sample_pt_id, + nma_sample_point_id=sample_info.nma_sample_point_id, analyte="U-238", symbol="<", sample_value=0.12, @@ -75,15 +69,17 @@ def test_create_radionuclides_all_fields(water_well_thing): volume=250, volume_unit="mL", analyses_agency="NMBGMR", - wclab_id="LAB-001", + nma_wclab_id="LAB-001", ) session.add(record) session.commit() session.refresh(record) - assert record.global_id is not None - assert record.sample_pt_id == sample_info.sample_pt_id - assert record.sample_point_id == sample_info.sample_point_id + assert record.id is not None # Integer PK auto-generated + assert record.nma_global_id is not None + assert record.chemistry_sample_info_id == sample_info.id + assert record.nma_sample_pt_id == sample_info.nma_sample_pt_id + assert record.nma_sample_point_id == sample_info.nma_sample_point_id assert record.analyte == "U-238" assert record.sample_value == 0.12 assert record.uncertainty == 0.01 @@ -97,24 +93,26 @@ def test_create_radionuclides_minimal(water_well_thing): """Test creating a radionuclides record with minimal fields.""" with session_ctx() as session: sample_info = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid4(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=uuid4(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(sample_info) session.commit() + session.refresh(sample_info) record = NMA_Radionuclides( - global_id=uuid4(), + nma_global_id=uuid4(), thing_id=water_well_thing.id, - sample_pt_id=sample_info.sample_pt_id, + chemistry_sample_info_id=sample_info.id, ) session.add(record) session.commit() session.refresh(record) - assert record.global_id is not None - assert record.sample_pt_id == sample_info.sample_pt_id + assert record.id is not None # Integer PK auto-generated + assert record.nma_global_id is not None + assert record.chemistry_sample_info_id == sample_info.id assert record.analyte is None assert record.units is None @@ -124,67 +122,74 @@ def test_create_radionuclides_minimal(water_well_thing): # ===================== READ tests ========================== -def test_read_radionuclides_by_global_id(water_well_thing): - """Test reading a radionuclides record by GlobalID.""" +def test_read_radionuclides_by_id(water_well_thing): + """Test reading a radionuclides record by Integer ID.""" with session_ctx() as session: sample_info = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid4(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=uuid4(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(sample_info) session.commit() + session.refresh(sample_info) record = NMA_Radionuclides( - global_id=uuid4(), + nma_global_id=uuid4(), thing_id=water_well_thing.id, - sample_pt_id=sample_info.sample_pt_id, + chemistry_sample_info_id=sample_info.id, ) session.add(record) session.commit() - fetched = session.get(NMA_Radionuclides, record.global_id) + fetched = session.get(NMA_Radionuclides, record.id) assert fetched is not None - assert fetched.global_id == record.global_id + assert fetched.id == record.id + assert fetched.nma_global_id == record.nma_global_id session.delete(record) session.delete(sample_info) session.commit() -def test_query_radionuclides_by_sample_point_id(water_well_thing): - """Test querying radionuclides by sample_point_id.""" +def test_query_radionuclides_by_nma_sample_point_id(water_well_thing): + """Test querying radionuclides by nma_sample_point_id.""" with session_ctx() as session: sample_info = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid4(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=uuid4(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(sample_info) session.commit() + session.refresh(sample_info) record1 = NMA_Radionuclides( - global_id=uuid4(), + nma_global_id=uuid4(), thing_id=water_well_thing.id, - sample_pt_id=sample_info.sample_pt_id, - sample_point_id=sample_info.sample_point_id, + chemistry_sample_info_id=sample_info.id, + nma_sample_point_id=sample_info.nma_sample_point_id, ) record2 = NMA_Radionuclides( - global_id=uuid4(), + nma_global_id=uuid4(), thing_id=water_well_thing.id, - sample_pt_id=sample_info.sample_pt_id, - sample_point_id="OTHER-PT", + chemistry_sample_info_id=sample_info.id, + nma_sample_point_id="OTHER-PT", ) session.add_all([record1, record2]) session.commit() results = ( session.query(NMA_Radionuclides) - .filter(NMA_Radionuclides.sample_point_id == sample_info.sample_point_id) + .filter( + NMA_Radionuclides.nma_sample_point_id == sample_info.nma_sample_point_id + ) .all() ) assert len(results) >= 1 - assert all(r.sample_point_id == sample_info.sample_point_id for r in results) + assert all( + r.nma_sample_point_id == sample_info.nma_sample_point_id for r in results + ) session.delete(record1) session.delete(record2) @@ -197,17 +202,18 @@ def test_update_radionuclides(water_well_thing): """Test updating a radionuclides record.""" with session_ctx() as session: sample_info = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid4(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=uuid4(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(sample_info) session.commit() + session.refresh(sample_info) record = NMA_Radionuclides( - global_id=uuid4(), + nma_global_id=uuid4(), thing_id=water_well_thing.id, - sample_pt_id=sample_info.sample_pt_id, + chemistry_sample_info_id=sample_info.id, ) session.add(record) session.commit() @@ -230,25 +236,27 @@ def test_delete_radionuclides(water_well_thing): """Test deleting a radionuclides record.""" with session_ctx() as session: sample_info = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid4(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=uuid4(), + nma_sample_point_id=_next_sample_point_id(), thing_id=water_well_thing.id, ) session.add(sample_info) session.commit() + session.refresh(sample_info) record = NMA_Radionuclides( - global_id=uuid4(), + nma_global_id=uuid4(), thing_id=water_well_thing.id, - sample_pt_id=sample_info.sample_pt_id, + chemistry_sample_info_id=sample_info.id, ) session.add(record) session.commit() + record_id = record.id session.delete(record) session.commit() - fetched = session.get(NMA_Radionuclides, record.global_id) + fetched = session.get(NMA_Radionuclides, record_id) assert fetched is None session.delete(sample_info) @@ -259,9 +267,12 @@ def test_delete_radionuclides(water_well_thing): def test_radionuclides_has_all_migrated_columns(): """Test that the model has all expected columns.""" expected_columns = [ + "id", + "nma_global_id", "thing_id", - "sample_pt_id", - "sample_point_id", + "chemistry_sample_info_id", + "nma_sample_pt_id", + "nma_sample_point_id", "analyte", "symbol", "sample_value", @@ -272,10 +283,9 @@ def test_radionuclides_has_all_migrated_columns(): "notes", "volume", "volume_unit", - "object_id", - "global_id", + "nma_object_id", "analyses_agency", - "wclab_id", + "nma_wclab_id", ] for column in expected_columns: @@ -306,16 +316,17 @@ def test_radionuclides_back_populates_thing(water_well_thing): # Radionuclides requires a chemistry_sample_info sample_info = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid4(), - sample_point_id=_next_sample_point_id(), + nma_sample_pt_id=uuid4(), + nma_sample_point_id=_next_sample_point_id(), thing_id=well.id, ) session.add(sample_info) session.commit() + session.refresh(sample_info) record = NMA_Radionuclides( - global_id=uuid4(), - sample_pt_id=sample_info.sample_pt_id, + nma_global_id=uuid4(), + chemistry_sample_info_id=sample_info.id, thing_id=well.id, ) session.add(record) @@ -330,4 +341,30 @@ def test_radionuclides_back_populates_thing(water_well_thing): session.commit() +# ===================== Integer PK tests ========================== + + +def test_radionuclides_has_integer_pk(): + """NMA_Radionuclides.id is Integer PK.""" + from sqlalchemy import Integer + + col = NMA_Radionuclides.__table__.c.id + assert col.primary_key is True + assert isinstance(col.type, Integer) + + +def test_radionuclides_nma_global_id_is_unique(): + """NMA_Radionuclides.nma_global_id is UNIQUE.""" + col = NMA_Radionuclides.__table__.c.nma_global_id + assert col.unique is True + + +def test_radionuclides_chemistry_sample_info_fk(): + """NMA_Radionuclides.chemistry_sample_info_id is Integer FK.""" + col = NMA_Radionuclides.__table__.c.chemistry_sample_info_id + fks = list(col.foreign_keys) + assert len(fks) == 1 + assert "NMA_Chemistry_SampleInfo.id" in str(fks[0].target_fullname) + + # ============= EOF ============================================= diff --git a/tests/test_soil_rock_results_legacy.py b/tests/test_soil_rock_results_legacy.py index 3ec2091c..0df8cf9a 100644 --- a/tests/test_soil_rock_results_legacy.py +++ b/tests/test_soil_rock_results_legacy.py @@ -17,14 +17,10 @@ Unit tests for Soil_Rock_Results legacy model. These tests verify the migration of columns from the legacy Soil_Rock_Results table. -Migrated columns: -- Point_ID -> point_id -- Sample Type -> sample_type -- Date Sampled -> date_sampled -- d13C -> d13c -- d18O -> d18o -- Sampled by -> sampled_by -- SSMA_TimeStamp -> ssma_timestamp + +Updated for Integer PK schema (already had Integer PK): +- id: Integer PK (autoincrement) [unchanged] +- nma_point_id: Legacy Point_ID string (renamed from point_id) """ from db.engine import session_ctx @@ -35,7 +31,7 @@ def test_create_soil_rock_results_all_fields(water_well_thing): """Test creating a soil/rock results record with all fields.""" with session_ctx() as session: record = NMA_Soil_Rock_Results( - point_id="SR-0001", + nma_point_id="SR-0001", sample_type="Soil", date_sampled="2026-01-01", d13c=-5.5, @@ -48,7 +44,7 @@ def test_create_soil_rock_results_all_fields(water_well_thing): session.refresh(record) assert record.id is not None - assert record.point_id == "SR-0001" + assert record.nma_point_id == "SR-0001" assert record.sample_type == "Soil" assert record.date_sampled == "2026-01-01" assert record.d13c == -5.5 @@ -70,7 +66,7 @@ def test_create_soil_rock_results_minimal(water_well_thing): assert record.id is not None assert record.thing_id == well.id - assert record.point_id is None + assert record.nma_point_id is None assert record.sample_type is None assert record.date_sampled is None assert record.d13c is None @@ -89,7 +85,7 @@ def test_soil_rock_results_validator_rejects_none_thing_id(): with pytest.raises(ValueError, match="requires a parent Thing"): NMA_Soil_Rock_Results( - point_id="ORPHAN-TEST", + nma_point_id="ORPHAN-TEST", thing_id=None, ) @@ -112,7 +108,7 @@ def test_soil_rock_results_back_populates_thing(water_well_thing): with session_ctx() as session: well = session.merge(water_well_thing) record = NMA_Soil_Rock_Results( - point_id="BP-SOIL-01", + nma_point_id="BP-SOIL-01", thing_id=well.id, ) session.add(record) @@ -126,4 +122,16 @@ def test_soil_rock_results_back_populates_thing(water_well_thing): session.commit() +# ===================== Integer PK tests ========================== + + +def test_soil_rock_results_has_integer_pk(): + """NMA_Soil_Rock_Results.id is Integer PK.""" + from sqlalchemy import Integer + + col = NMA_Soil_Rock_Results.__table__.c.id + assert col.primary_key is True + assert isinstance(col.type, Integer) + + # ============= EOF ============================================= diff --git a/tests/test_stratigraphy_legacy.py b/tests/test_stratigraphy_legacy.py index 54faf8e5..0e4e6966 100644 --- a/tests/test_stratigraphy_legacy.py +++ b/tests/test_stratigraphy_legacy.py @@ -17,6 +17,13 @@ Unit tests for NMA_Stratigraphy (lithology log) legacy model. These tests verify FK enforcement for Issue #363. + +Updated for Integer PK schema: +- id: Integer PK (autoincrement) +- nma_global_id: Legacy UUID (UNIQUE) +- nma_well_id: Legacy WellID UUID +- nma_point_id: Legacy PointID string +- nma_object_id: Legacy OBJECTID (UNIQUE) """ from uuid import uuid4 @@ -39,8 +46,8 @@ def test_create_stratigraphy_with_thing(water_well_thing): with session_ctx() as session: well = session.merge(water_well_thing) record = NMA_Stratigraphy( - global_id=_next_global_id(), - point_id="STRAT-01", + nma_global_id=_next_global_id(), + nma_point_id="STRAT-01", thing_id=well.id, strat_top=0.0, strat_bottom=10.0, @@ -50,8 +57,9 @@ def test_create_stratigraphy_with_thing(water_well_thing): session.commit() session.refresh(record) - assert record.global_id is not None - assert record.point_id == "STRAT-01" + assert record.id is not None # Integer PK auto-generated + assert record.nma_global_id is not None + assert record.nma_point_id == "STRAT-01" assert record.thing_id == well.id session.delete(record) @@ -65,8 +73,8 @@ def test_stratigraphy_validator_rejects_none_thing_id(): """NMA_Stratigraphy validator rejects None thing_id.""" with pytest.raises(ValueError, match="requires a parent Thing"): NMA_Stratigraphy( - global_id=_next_global_id(), - point_id="ORPHAN-STRAT", + nma_global_id=_next_global_id(), + nma_point_id="ORPHAN-STRAT", thing_id=None, ) @@ -89,8 +97,8 @@ def test_stratigraphy_back_populates_thing(water_well_thing): with session_ctx() as session: well = session.merge(water_well_thing) record = NMA_Stratigraphy( - global_id=_next_global_id(), - point_id="BPSTRAT01", # Max 10 chars + nma_global_id=_next_global_id(), + nma_point_id="BPSTRAT01", # Max 10 chars thing_id=well.id, ) session.add(record) @@ -104,4 +112,22 @@ def test_stratigraphy_back_populates_thing(water_well_thing): session.commit() +# ===================== Integer PK tests ========================== + + +def test_stratigraphy_has_integer_pk(): + """NMA_Stratigraphy.id is Integer PK.""" + from sqlalchemy import Integer + + col = NMA_Stratigraphy.__table__.c.id + assert col.primary_key is True + assert isinstance(col.type, Integer) + + +def test_stratigraphy_nma_global_id_is_unique(): + """NMA_Stratigraphy.nma_global_id is UNIQUE.""" + col = NMA_Stratigraphy.__table__.c.nma_global_id + assert col.unique is True + + # ============= EOF ============================================= From 9302064cd54359d4af2d91f34d8aeb2ce9ebce7d Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Wed, 28 Jan 2026 01:28:23 -0800 Subject: [PATCH 19/22] test(integration): update relationship tests for Integer PK schema Update integration and BDD tests to use Integer PK (id) and nma_ prefixed columns for all NMA legacy models. Changes: - Replace global_id, sample_pt_id, point_id, etc. with nma_ prefixed versions - Use chemistry_sample_info_id (Integer FK) for radionuclides relationship - Update cascade delete tests to use Integer PK for record lookup - Update relationship navigation tests to check nma_ prefixed columns Files updated: - tests/integration/test_well_data_relationships.py - tests/features/steps/well-data-relationships.py Co-Authored-By: Claude Opus 4.5 --- .../features/steps/well-data-relationships.py | 74 +++++++----- .../test_well_data_relationships.py | 114 ++++++++++-------- 2 files changed, 104 insertions(+), 84 deletions(-) diff --git a/tests/features/steps/well-data-relationships.py b/tests/features/steps/well-data-relationships.py index 83678809..97e2e223 100644 --- a/tests/features/steps/well-data-relationships.py +++ b/tests/features/steps/well-data-relationships.py @@ -16,6 +16,12 @@ """ Step definitions for Well Data Relationships feature tests. Tests FK relationships, orphan prevention, and cascade delete behavior. + +Updated for Integer PK schema: +- All models now use `id` (Integer, autoincrement) as PK +- Legacy UUID columns renamed with `nma_` prefix (e.g., `nma_global_id`) +- Legacy string columns renamed with `nma_` prefix (e.g., `nma_point_id`) +- Chemistry children use `chemistry_sample_info_id` (Integer FK) """ import uuid @@ -128,8 +134,8 @@ def step_when_save_chemistry(context: Context): try: with session_ctx() as session: chemistry = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid.uuid4(), - sample_point_id="TEST001", + nma_sample_pt_id=uuid.uuid4(), + nma_sample_point_id="TEST001", thing_id=None, # No parent well collection_date=datetime.now(), ) @@ -174,8 +180,8 @@ def step_when_save_hydraulics(context: Context): try: with session_ctx() as session: hydraulics = NMA_HydraulicsData( - global_id=uuid.uuid4(), - point_id="TEST001", + nma_global_id=uuid.uuid4(), + nma_point_id="TEST001", thing_id=None, # No parent well test_top=100, test_bottom=200, @@ -214,8 +220,8 @@ def step_when_save_lithology(context: Context): try: with session_ctx() as session: stratigraphy = NMA_Stratigraphy( - global_id=uuid.uuid4(), - point_id="TEST001", + nma_global_id=uuid.uuid4(), + nma_point_id="TEST001", thing_id=None, # No parent well strat_top=100.0, strat_bottom=200.0, @@ -255,18 +261,20 @@ def step_when_save_radionuclides(context: Context): with session_ctx() as session: # First create a chemistry sample info for the radionuclide chemistry_sample = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid.uuid4(), - sample_point_id="TEST001", + nma_sample_pt_id=uuid.uuid4(), + nma_sample_point_id="TEST001", thing_id=context.test_well_id, collection_date=datetime.now(), ) session.add(chemistry_sample) - session.flush() + session.commit() + session.refresh(chemistry_sample) radionuclide = NMA_Radionuclides( - global_id=uuid.uuid4(), + nma_global_id=uuid.uuid4(), thing_id=None, # No parent well - sample_pt_id=chemistry_sample.sample_pt_id, + chemistry_sample_info_id=chemistry_sample.id, + nma_sample_pt_id=chemistry_sample.nma_sample_pt_id, analyte="U-238", ) session.add(radionuclide) @@ -303,8 +311,8 @@ def step_when_save_associated_data(context: Context): try: with session_ctx() as session: associated_data = NMA_AssociatedData( - assoc_id=uuid.uuid4(), - point_id="TEST001", + nma_assoc_id=uuid.uuid4(), + nma_point_id="TEST001", thing_id=None, # No parent well notes="Test notes", ) @@ -342,7 +350,7 @@ def step_when_save_soil_rock(context: Context): try: with session_ctx() as session: soil_rock = NMA_Soil_Rock_Results( - point_id="TEST001", + nma_point_id="TEST001", thing_id=None, # No parent well sample_type="Soil", date_sampled="2025-01-01", @@ -422,14 +430,14 @@ def step_given_well_has_chemistry(context: Context): with session_ctx() as session: chemistry1 = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid.uuid4(), - sample_point_id="TEST001", + nma_sample_pt_id=uuid.uuid4(), + nma_sample_point_id="TEST001", thing_id=context.test_well_id, collection_date=datetime.now(), ) chemistry2 = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid.uuid4(), - sample_point_id="TEST002", + nma_sample_pt_id=uuid.uuid4(), + nma_sample_point_id="TEST002", thing_id=context.test_well_id, collection_date=datetime.now(), ) @@ -446,8 +454,8 @@ def step_given_well_has_hydraulics(context: Context): with session_ctx() as session: hydraulics = NMA_HydraulicsData( - global_id=uuid.uuid4(), - point_id="TEST001", + nma_global_id=uuid.uuid4(), + nma_point_id="TEST001", thing_id=context.test_well_id, test_top=100, test_bottom=200, @@ -465,15 +473,15 @@ def step_given_well_has_lithology(context: Context): with session_ctx() as session: lithology1 = NMA_Stratigraphy( - global_id=uuid.uuid4(), - point_id="TEST001", + nma_global_id=uuid.uuid4(), + nma_point_id="TEST001", thing_id=context.test_well_id, strat_top=0.0, strat_bottom=100.0, ) lithology2 = NMA_Stratigraphy( - global_id=uuid.uuid4(), - point_id="TEST001", + nma_global_id=uuid.uuid4(), + nma_point_id="TEST001", thing_id=context.test_well_id, strat_top=100.0, strat_bottom=200.0, @@ -491,18 +499,20 @@ def step_given_well_has_radionuclides(context: Context): with session_ctx() as session: chemistry_sample = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid.uuid4(), - sample_point_id="TEST001", + nma_sample_pt_id=uuid.uuid4(), + nma_sample_point_id="TEST001", thing_id=context.test_well_id, collection_date=datetime.now(), ) session.add(chemistry_sample) - session.flush() + session.commit() + session.refresh(chemistry_sample) radionuclide = NMA_Radionuclides( - global_id=uuid.uuid4(), + nma_global_id=uuid.uuid4(), thing_id=context.test_well_id, - sample_pt_id=chemistry_sample.sample_pt_id, + chemistry_sample_info_id=chemistry_sample.id, + nma_sample_pt_id=chemistry_sample.nma_sample_pt_id, analyte="U-238", ) session.add(radionuclide) @@ -518,8 +528,8 @@ def step_given_well_has_associated_data(context: Context): with session_ctx() as session: associated_data = NMA_AssociatedData( - assoc_id=uuid.uuid4(), - point_id="TEST001", + nma_assoc_id=uuid.uuid4(), + nma_point_id="TEST001", thing_id=context.test_well_id, notes="Test associated data", ) @@ -536,7 +546,7 @@ def step_given_well_has_soil_rock(context: Context): with session_ctx() as session: soil_rock = NMA_Soil_Rock_Results( - point_id="TEST001", + nma_point_id="TEST001", thing_id=context.test_well_id, sample_type="Soil", date_sampled="2025-01-01", diff --git a/tests/integration/test_well_data_relationships.py b/tests/integration/test_well_data_relationships.py index 549e7081..b1ae4878 100644 --- a/tests/integration/test_well_data_relationships.py +++ b/tests/integration/test_well_data_relationships.py @@ -23,6 +23,12 @@ As a NMBGMR data manager I need well-related records to always belong to a well So that data integrity is maintained and orphaned records are prevented + +Updated for Integer PK schema: +- All models now use `id` (Integer, autoincrement) as PK +- Legacy UUID columns renamed with `nma_` prefix (e.g., `nma_global_id`) +- Legacy string columns renamed with `nma_` prefix (e.g., `nma_point_id`) +- Chemistry children use `chemistry_sample_info_id` (Integer FK) """ import uuid @@ -181,8 +187,8 @@ def test_chemistry_sample_requires_well(self): with session_ctx() as session: with pytest.raises(ValueError, match="requires a parent Thing"): record = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid.uuid4(), - sample_point_id="ORPHAN-CHEM", + nma_sample_pt_id=uuid.uuid4(), + nma_sample_point_id="ORPHAN-CHEM", thing_id=None, # This should raise ValueError ) session.add(record) @@ -196,8 +202,8 @@ def test_hydraulics_data_requires_well(self): with session_ctx() as session: with pytest.raises(ValueError, match="requires a parent Thing"): record = NMA_HydraulicsData( - global_id=uuid.uuid4(), - point_id="ORPHANHYD", + nma_global_id=uuid.uuid4(), + nma_point_id="ORPHANHYD", thing_id=None, # This should raise ValueError ) session.add(record) @@ -211,8 +217,8 @@ def test_stratigraphy_requires_well(self): with session_ctx() as session: with pytest.raises(ValueError, match="requires a parent Thing"): record = NMA_Stratigraphy( - global_id=uuid.uuid4(), - point_id="ORPHSTRAT", + nma_global_id=uuid.uuid4(), + nma_point_id="ORPHSTRAT", thing_id=None, # This should raise ValueError ) session.add(record) @@ -226,7 +232,7 @@ def test_radionuclides_requires_well(self): with session_ctx() as session: with pytest.raises(ValueError, match="requires a parent Thing"): record = NMA_Radionuclides( - sample_pt_id=uuid.uuid4(), + nma_sample_pt_id=uuid.uuid4(), thing_id=None, # This should raise ValueError ) session.add(record) @@ -240,7 +246,7 @@ def test_associated_data_requires_well(self): with session_ctx() as session: with pytest.raises(ValueError, match="requires a parent Thing"): record = NMA_AssociatedData( - point_id="ORPHAN-ASSOC", + nma_point_id="ORPHAN-ASSOC", thing_id=None, # This should raise ValueError ) session.add(record) @@ -254,7 +260,7 @@ def test_soil_rock_results_requires_well(self): with session_ctx() as session: with pytest.raises(ValueError, match="requires a parent Thing"): record = NMA_Soil_Rock_Results( - point_id="ORPHAN-SOIL", + nma_point_id="ORPHAN-SOIL", thing_id=None, # This should raise ValueError ) session.add(record) @@ -279,8 +285,8 @@ def test_well_navigates_to_chemistry_samples(self, well_for_relationships): # Create a chemistry sample for this well sample = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid.uuid4(), - sample_point_id="NAVCHEM01", # Max 10 chars + nma_sample_pt_id=uuid.uuid4(), + nma_sample_point_id="NAVCHEM01", # Max 10 chars thing_id=well.id, ) session.add(sample) @@ -291,7 +297,7 @@ def test_well_navigates_to_chemistry_samples(self, well_for_relationships): assert hasattr(well, "chemistry_sample_infos") assert len(well.chemistry_sample_infos) >= 1 assert any( - s.sample_point_id == "NAVCHEM01" for s in well.chemistry_sample_infos + s.nma_sample_point_id == "NAVCHEM01" for s in well.chemistry_sample_infos ) def test_well_navigates_to_hydraulics_data(self, well_for_relationships): @@ -301,8 +307,8 @@ def test_well_navigates_to_hydraulics_data(self, well_for_relationships): # Create hydraulics data for this well hydraulics = NMA_HydraulicsData( - global_id=uuid.uuid4(), - point_id="NAVHYD01", # Max 10 chars + nma_global_id=uuid.uuid4(), + nma_point_id="NAVHYD01", # Max 10 chars thing_id=well.id, test_top=0, test_bottom=100, @@ -314,7 +320,7 @@ def test_well_navigates_to_hydraulics_data(self, well_for_relationships): # Navigate through relationship assert hasattr(well, "hydraulics_data") assert len(well.hydraulics_data) >= 1 - assert any(h.point_id == "NAVHYD01" for h in well.hydraulics_data) + assert any(h.nma_point_id == "NAVHYD01" for h in well.hydraulics_data) def test_well_navigates_to_stratigraphy_logs(self, well_for_relationships): """Well can navigate to its lithology logs.""" @@ -323,8 +329,8 @@ def test_well_navigates_to_stratigraphy_logs(self, well_for_relationships): # Create stratigraphy log for this well strat = NMA_Stratigraphy( - global_id=uuid.uuid4(), - point_id="NAVSTRAT1", # Max 10 chars + nma_global_id=uuid.uuid4(), + nma_point_id="NAVSTRAT1", # Max 10 chars thing_id=well.id, ) session.add(strat) @@ -334,7 +340,7 @@ def test_well_navigates_to_stratigraphy_logs(self, well_for_relationships): # Navigate through relationship assert hasattr(well, "stratigraphy_logs") assert len(well.stratigraphy_logs) >= 1 - assert any(s.point_id == "NAVSTRAT1" for s in well.stratigraphy_logs) + assert any(s.nma_point_id == "NAVSTRAT1" for s in well.stratigraphy_logs) def test_well_navigates_to_radionuclides(self, well_for_relationships): """Well can navigate to its radionuclide results.""" @@ -343,17 +349,19 @@ def test_well_navigates_to_radionuclides(self, well_for_relationships): # Create a chemistry sample for this well to satisfy the FK chem_sample = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid.uuid4(), - sample_point_id="NAVRAD01", # Required, max 10 chars + nma_sample_pt_id=uuid.uuid4(), + nma_sample_point_id="NAVRAD01", # Required, max 10 chars thing_id=well.id, ) session.add(chem_sample) - session.flush() + session.commit() + session.refresh(chem_sample) - # Create radionuclide record for this well using the same sample_pt_id + # Create radionuclide record for this well using the chemistry_sample_info_id radio = NMA_Radionuclides( - global_id=uuid.uuid4(), - sample_pt_id=chem_sample.sample_pt_id, + nma_global_id=uuid.uuid4(), + chemistry_sample_info_id=chem_sample.id, + nma_sample_pt_id=chem_sample.nma_sample_pt_id, thing_id=well.id, ) session.add(radio) @@ -371,8 +379,8 @@ def test_well_navigates_to_associated_data(self, well_for_relationships): # Create associated data for this well assoc = NMA_AssociatedData( - assoc_id=uuid.uuid4(), - point_id="NAVASSOC1", # Max 10 chars + nma_assoc_id=uuid.uuid4(), + nma_point_id="NAVASSOC1", # Max 10 chars thing_id=well.id, ) session.add(assoc) @@ -382,7 +390,7 @@ def test_well_navigates_to_associated_data(self, well_for_relationships): # Navigate through relationship assert hasattr(well, "associated_data") assert len(well.associated_data) >= 1 - assert any(a.point_id == "NAVASSOC1" for a in well.associated_data) + assert any(a.nma_point_id == "NAVASSOC1" for a in well.associated_data) def test_well_navigates_to_soil_rock_results(self, well_for_relationships): """Well can navigate to its soil/rock results.""" @@ -391,7 +399,7 @@ def test_well_navigates_to_soil_rock_results(self, well_for_relationships): # Create soil/rock result for this well soil = NMA_Soil_Rock_Results( - point_id="NAV-SOIL-01", + nma_point_id="NAV-SOIL-01", thing_id=well.id, ) session.add(soil) @@ -401,7 +409,7 @@ def test_well_navigates_to_soil_rock_results(self, well_for_relationships): # Navigate through relationship assert hasattr(well, "soil_rock_results") assert len(well.soil_rock_results) >= 1 - assert any(s.point_id == "NAV-SOIL-01" for s in well.soil_rock_results) + assert any(s.nma_point_id == "NAV-SOIL-01" for s in well.soil_rock_results) # ============================================================================= @@ -431,13 +439,13 @@ def test_deleting_well_cascades_to_chemistry_samples(self): session.commit() sample = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid.uuid4(), - sample_point_id="CASCCHEM1", # Max 10 chars + nma_sample_pt_id=uuid.uuid4(), + nma_sample_point_id="CASCCHEM1", # Max 10 chars thing_id=well.id, ) session.add(sample) session.commit() - sample_id = sample.sample_pt_id # PK is sample_pt_id + sample_id = sample.id # Integer PK # Delete the well session.delete(well) @@ -465,16 +473,16 @@ def test_deleting_well_cascades_to_hydraulics_data(self): session.add(well) session.commit() - hyd_global_id = uuid.uuid4() hydraulics = NMA_HydraulicsData( - global_id=hyd_global_id, - point_id="CASCHYD01", # Max 10 chars + nma_global_id=uuid.uuid4(), + nma_point_id="CASCHYD01", # Max 10 chars thing_id=well.id, test_top=0, test_bottom=100, ) session.add(hydraulics) session.commit() + hyd_id = hydraulics.id # Integer PK # Delete the well session.delete(well) @@ -484,7 +492,7 @@ def test_deleting_well_cascades_to_hydraulics_data(self): session.expire_all() # Verify hydraulics data was also deleted - orphan = session.get(NMA_HydraulicsData, hyd_global_id) + orphan = session.get(NMA_HydraulicsData, hyd_id) assert orphan is None, "Hydraulics data should be deleted with well" def test_deleting_well_cascades_to_stratigraphy_logs(self): @@ -502,14 +510,14 @@ def test_deleting_well_cascades_to_stratigraphy_logs(self): session.add(well) session.commit() - strat_global_id = uuid.uuid4() strat = NMA_Stratigraphy( - global_id=strat_global_id, - point_id="CASCSTRAT", # Max 10 chars + nma_global_id=uuid.uuid4(), + nma_point_id="CASCSTRAT", # Max 10 chars thing_id=well.id, ) session.add(strat) session.commit() + strat_id = strat.id # Integer PK # Delete the well session.delete(well) @@ -519,7 +527,7 @@ def test_deleting_well_cascades_to_stratigraphy_logs(self): session.expire_all() # Verify stratigraphy was also deleted - orphan = session.get(NMA_Stratigraphy, strat_global_id) + orphan = session.get(NMA_Stratigraphy, strat_id) assert orphan is None, "Stratigraphy log should be deleted with well" def test_deleting_well_cascades_to_radionuclides(self): @@ -539,22 +547,24 @@ def test_deleting_well_cascades_to_radionuclides(self): # Create a chemistry sample for this well to satisfy the FK chem_sample = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid.uuid4(), - sample_point_id="CASCRAD01", # Required, max 10 chars + nma_sample_pt_id=uuid.uuid4(), + nma_sample_point_id="CASCRAD01", # Required, max 10 chars thing_id=well.id, ) session.add(chem_sample) - session.flush() + session.commit() + session.refresh(chem_sample) - # Create radionuclide record using the chemistry sample's sample_pt_id + # Create radionuclide record using the chemistry_sample_info_id radio = NMA_Radionuclides( - global_id=uuid.uuid4(), - sample_pt_id=chem_sample.sample_pt_id, + nma_global_id=uuid.uuid4(), + chemistry_sample_info_id=chem_sample.id, + nma_sample_pt_id=chem_sample.nma_sample_pt_id, thing_id=well.id, ) session.add(radio) session.commit() - radio_id = radio.global_id # PK is global_id + radio_id = radio.id # Integer PK # Delete the well session.delete(well) @@ -582,14 +592,14 @@ def test_deleting_well_cascades_to_associated_data(self): session.add(well) session.commit() - assoc_uuid = uuid.uuid4() assoc = NMA_AssociatedData( - assoc_id=assoc_uuid, - point_id="CASCASSOC", # Max 10 chars + nma_assoc_id=uuid.uuid4(), + nma_point_id="CASCASSOC", # Max 10 chars thing_id=well.id, ) session.add(assoc) session.commit() + assoc_id = assoc.id # Integer PK # Delete the well session.delete(well) @@ -599,7 +609,7 @@ def test_deleting_well_cascades_to_associated_data(self): session.expire_all() # Verify associated data was also deleted - orphan = session.get(NMA_AssociatedData, assoc_uuid) + orphan = session.get(NMA_AssociatedData, assoc_id) assert orphan is None, "Associated data should be deleted with well" def test_deleting_well_cascades_to_soil_rock_results(self): @@ -618,7 +628,7 @@ def test_deleting_well_cascades_to_soil_rock_results(self): session.commit() soil = NMA_Soil_Rock_Results( - point_id="CASCSOIL1", + nma_point_id="CASCSOIL1", thing_id=well.id, ) session.add(soil) From 68455355eee97af77418a9d253b2c441027fe8ca Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Wed, 28 Jan 2026 01:56:48 -0800 Subject: [PATCH 20/22] feat(alembic): add Integer PK migration for NMA legacy tables Add migration to refactor NMA tables from UUID to Integer primary keys: - Add `id` (Integer PK with IDENTITY) to 8 NMA tables - Rename UUID columns with `nma_` prefix for audit/traceability - Convert FK references from UUID to Integer - Make `chemistry_sample_info_id` NOT NULL for chemistry child tables Also fixes alembic/env.py to handle None names for unnamed constraints, and updates test files to use correct DB column names via bracket notation (e.g., `__table__.c["nma_GlobalID"]` instead of `__table__.c.nma_global_id`). Co-Authored-By: Claude Opus 4.5 --- alembic/env.py | 3 + ...51fd_refactor_nma_tables_to_integer_pks.py | 435 ++++++++++++++++++ .../test_admin_minor_trace_chemistry.py | 20 +- tests/test_associated_data_legacy.py | 3 +- tests/test_chemistry_sampleinfo_legacy.py | 3 +- tests/test_field_parameters_legacy.py | 3 +- tests/test_hydraulics_data_legacy.py | 2 +- tests/test_major_chemistry_legacy.py | 3 +- tests/test_nma_chemistry_lineage.py | 116 ++--- tests/test_radionuclides_legacy.py | 3 +- tests/test_stratigraphy_legacy.py | 3 +- 11 files changed, 521 insertions(+), 73 deletions(-) create mode 100644 alembic/versions/3cb924ca51fd_refactor_nma_tables_to_integer_pks.py diff --git a/alembic/env.py b/alembic/env.py index 089144e8..526711ae 100644 --- a/alembic/env.py +++ b/alembic/env.py @@ -71,6 +71,9 @@ def build_database_url(): def include_object(object, name, type_, reflected, compare_to): # only include tables in sql alchemy model, not auto-generated tables from PostGIS or TIGER + # Handle None names for unnamed constraints + if name is None: + return True if type_ == "table" or name.endswith("_version") or name == "transaction": return name in model_tables return True diff --git a/alembic/versions/3cb924ca51fd_refactor_nma_tables_to_integer_pks.py b/alembic/versions/3cb924ca51fd_refactor_nma_tables_to_integer_pks.py new file mode 100644 index 00000000..e188d634 --- /dev/null +++ b/alembic/versions/3cb924ca51fd_refactor_nma_tables_to_integer_pks.py @@ -0,0 +1,435 @@ +"""refactor_nma_tables_to_integer_pks + +Revision ID: 3cb924ca51fd +Revises: 76e3ae8b99cb +Create Date: 2026-01-28 01:37:56.509497 + +""" +from typing import Sequence, Union + +from alembic import op +import geoalchemy2 +import sqlalchemy as sa +import sqlalchemy_utils +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision: str = '3cb924ca51fd' +down_revision: Union[str, Sequence[str], None] = '76e3ae8b99cb' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + """Upgrade schema. + + Refactor NMA legacy tables from UUID to Integer primary keys: + - Add id (Integer PK with IDENTITY) to 8 NMA tables + - Rename UUID columns with nma_ prefix for audit + - Convert FK references from UUID to Integer + - Make chemistry_sample_info_id NOT NULL for chemistry child tables + """ + # ========================================================================== + # PHASE 1: Drop ALL foreign keys that reference NMA_Chemistry_SampleInfo.SamplePtID + # This must happen BEFORE we can modify NMA_Chemistry_SampleInfo + # ========================================================================== + op.drop_constraint(op.f('NMA_MinorTraceChemistry_chemistry_sample_info_id_fkey'), 'NMA_MinorTraceChemistry', type_='foreignkey') + op.drop_constraint(op.f('NMA_Radionuclides_SamplePtID_fkey'), 'NMA_Radionuclides', type_='foreignkey') + op.drop_constraint(op.f('NMA_MajorChemistry_SamplePtID_fkey'), 'NMA_MajorChemistry', type_='foreignkey') + op.drop_constraint(op.f('NMA_FieldParameters_SamplePtID_fkey'), 'NMA_FieldParameters', type_='foreignkey') + + # ========================================================================== + # PHASE 2: Modify NMA_Chemistry_SampleInfo (parent table) + # ========================================================================== + # Add new columns first + op.add_column('NMA_Chemistry_SampleInfo', sa.Column('id', sa.Integer(), sa.Identity(always=False, start=1), nullable=False)) + op.add_column('NMA_Chemistry_SampleInfo', sa.Column('nma_SamplePtID', sa.UUID(), nullable=True)) + op.add_column('NMA_Chemistry_SampleInfo', sa.Column('nma_WCLab_ID', sa.String(length=18), nullable=True)) + op.add_column('NMA_Chemistry_SampleInfo', sa.Column('nma_SamplePointID', sa.String(length=10), nullable=False)) + op.add_column('NMA_Chemistry_SampleInfo', sa.Column('nma_OBJECTID', sa.Integer(), nullable=True)) + op.add_column('NMA_Chemistry_SampleInfo', sa.Column('nma_LocationId', sa.UUID(), nullable=True)) + + # Drop old PK and create new PK on id + op.drop_constraint('NMA_Chemistry_SampleInfo_pkey', 'NMA_Chemistry_SampleInfo', type_='primary') + op.create_primary_key('NMA_Chemistry_SampleInfo_pkey', 'NMA_Chemistry_SampleInfo', ['id']) + + op.drop_constraint(op.f('NMA_Chemistry_SampleInfo_OBJECTID_key'), 'NMA_Chemistry_SampleInfo', type_='unique') + op.create_unique_constraint(None, 'NMA_Chemistry_SampleInfo', ['nma_SamplePtID']) + op.create_unique_constraint(None, 'NMA_Chemistry_SampleInfo', ['nma_OBJECTID']) + op.drop_column('NMA_Chemistry_SampleInfo', 'SamplePointID') + op.drop_column('NMA_Chemistry_SampleInfo', 'SamplePtID') + op.drop_column('NMA_Chemistry_SampleInfo', 'WCLab_ID') + op.drop_column('NMA_Chemistry_SampleInfo', 'OBJECTID') + op.drop_column('NMA_Chemistry_SampleInfo', 'LocationId') + + # ========================================================================== + # PHASE 3: Modify child tables and create new FKs pointing to NMA_Chemistry_SampleInfo.id + # ========================================================================== + + # --- NMA_FieldParameters --- + op.add_column('NMA_FieldParameters', sa.Column('id', sa.Integer(), sa.Identity(always=False, start=1), nullable=False)) + op.add_column('NMA_FieldParameters', sa.Column('nma_GlobalID', sa.UUID(), nullable=True)) + op.add_column('NMA_FieldParameters', sa.Column('chemistry_sample_info_id', sa.Integer(), nullable=False)) + op.add_column('NMA_FieldParameters', sa.Column('nma_SamplePtID', sa.UUID(), nullable=True)) + op.add_column('NMA_FieldParameters', sa.Column('nma_SamplePointID', sa.String(length=10), nullable=True)) + op.add_column('NMA_FieldParameters', sa.Column('nma_OBJECTID', sa.Integer(), nullable=True)) + op.add_column('NMA_FieldParameters', sa.Column('nma_WCLab_ID', sa.String(length=25), nullable=True)) + op.drop_index(op.f('FieldParameters$GlobalID'), table_name='NMA_FieldParameters') + op.drop_index(op.f('FieldParameters$OBJECTID'), table_name='NMA_FieldParameters') + op.drop_index(op.f('FieldParameters$SamplePointID'), table_name='NMA_FieldParameters') + op.drop_index(op.f('FieldParameters$SamplePtID'), table_name='NMA_FieldParameters') + op.drop_index(op.f('FieldParameters$WCLab_ID'), table_name='NMA_FieldParameters') + op.drop_index(op.f('FieldParameters$ChemistrySampleInfoFieldParameters'), table_name='NMA_FieldParameters') + op.create_index('FieldParameters$ChemistrySampleInfoFieldParameters', 'NMA_FieldParameters', ['chemistry_sample_info_id'], unique=False) + op.create_index('FieldParameters$nma_GlobalID', 'NMA_FieldParameters', ['nma_GlobalID'], unique=True) + op.create_index('FieldParameters$nma_OBJECTID', 'NMA_FieldParameters', ['nma_OBJECTID'], unique=True) + op.create_index('FieldParameters$nma_SamplePointID', 'NMA_FieldParameters', ['nma_SamplePointID'], unique=False) + op.create_index('FieldParameters$nma_WCLab_ID', 'NMA_FieldParameters', ['nma_WCLab_ID'], unique=False) + op.create_unique_constraint(None, 'NMA_FieldParameters', ['nma_GlobalID']) + op.create_foreign_key(None, 'NMA_FieldParameters', 'NMA_Chemistry_SampleInfo', ['chemistry_sample_info_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE') + op.drop_column('NMA_FieldParameters', 'SamplePointID') + op.drop_column('NMA_FieldParameters', 'SamplePtID') + op.drop_column('NMA_FieldParameters', 'WCLab_ID') + op.drop_column('NMA_FieldParameters', 'OBJECTID') + op.drop_column('NMA_FieldParameters', 'GlobalID') + + # --- NMA_AssociatedData --- + op.add_column('NMA_AssociatedData', sa.Column('id', sa.Integer(), sa.Identity(always=False, start=1), nullable=False)) + op.add_column('NMA_AssociatedData', sa.Column('nma_AssocID', sa.UUID(), nullable=True)) + op.add_column('NMA_AssociatedData', sa.Column('nma_LocationId', sa.UUID(), nullable=True)) + op.add_column('NMA_AssociatedData', sa.Column('nma_PointID', sa.String(length=10), nullable=True)) + op.add_column('NMA_AssociatedData', sa.Column('nma_OBJECTID', sa.Integer(), nullable=True)) + op.drop_constraint(op.f('AssociatedData$LocationId'), 'NMA_AssociatedData', type_='unique') + op.drop_index(op.f('AssociatedData$PointID'), table_name='NMA_AssociatedData') + op.drop_constraint(op.f('NMA_AssociatedData_OBJECTID_key'), 'NMA_AssociatedData', type_='unique') + op.create_unique_constraint(None, 'NMA_AssociatedData', ['nma_LocationId']) + op.create_unique_constraint(None, 'NMA_AssociatedData', ['nma_AssocID']) + op.create_unique_constraint(None, 'NMA_AssociatedData', ['nma_OBJECTID']) + op.drop_column('NMA_AssociatedData', 'OBJECTID') + op.drop_column('NMA_AssociatedData', 'LocationId') + op.drop_column('NMA_AssociatedData', 'AssocID') + op.drop_column('NMA_AssociatedData', 'PointID') + + # --- NMA_HydraulicsData --- + op.add_column('NMA_HydraulicsData', sa.Column('id', sa.Integer(), sa.Identity(always=False, start=1), nullable=False)) + op.add_column('NMA_HydraulicsData', sa.Column('nma_GlobalID', sa.UUID(), nullable=True)) + op.add_column('NMA_HydraulicsData', sa.Column('nma_WellID', sa.UUID(), nullable=True)) + op.add_column('NMA_HydraulicsData', sa.Column('nma_PointID', sa.String(length=50), nullable=True)) + op.add_column('NMA_HydraulicsData', sa.Column('nma_OBJECTID', sa.Integer(), nullable=True)) + op.drop_index(op.f('ix_nma_hydraulicsdata_objectid'), table_name='NMA_HydraulicsData') + op.drop_index(op.f('ix_nma_hydraulicsdata_pointid'), table_name='NMA_HydraulicsData') + op.drop_index(op.f('ix_nma_hydraulicsdata_wellid'), table_name='NMA_HydraulicsData') + op.create_unique_constraint(None, 'NMA_HydraulicsData', ['nma_GlobalID']) + op.create_unique_constraint(None, 'NMA_HydraulicsData', ['nma_OBJECTID']) + op.drop_column('NMA_HydraulicsData', 'WellID') + op.drop_column('NMA_HydraulicsData', 'OBJECTID') + op.drop_column('NMA_HydraulicsData', 'PointID') + op.drop_column('NMA_HydraulicsData', 'GlobalID') + + # --- NMA_MajorChemistry --- + op.add_column('NMA_MajorChemistry', sa.Column('id', sa.Integer(), sa.Identity(always=False, start=1), nullable=False)) + op.add_column('NMA_MajorChemistry', sa.Column('nma_GlobalID', sa.UUID(), nullable=True)) + op.add_column('NMA_MajorChemistry', sa.Column('chemistry_sample_info_id', sa.Integer(), nullable=False)) + op.add_column('NMA_MajorChemistry', sa.Column('nma_SamplePtID', sa.UUID(), nullable=True)) + op.add_column('NMA_MajorChemistry', sa.Column('nma_SamplePointID', sa.String(length=10), nullable=True)) + op.add_column('NMA_MajorChemistry', sa.Column('nma_OBJECTID', sa.Integer(), nullable=True)) + op.add_column('NMA_MajorChemistry', sa.Column('nma_WCLab_ID', sa.String(length=25), nullable=True)) + op.drop_index(op.f('MajorChemistry$AnalysesAgency'), table_name='NMA_MajorChemistry') + op.drop_index(op.f('MajorChemistry$Analyte'), table_name='NMA_MajorChemistry') + op.drop_index(op.f('MajorChemistry$Chemistry SampleInfoMajorChemistry'), table_name='NMA_MajorChemistry') + op.drop_index(op.f('MajorChemistry$SamplePointID'), table_name='NMA_MajorChemistry') + op.drop_index(op.f('MajorChemistry$SamplePointIDAnalyte'), table_name='NMA_MajorChemistry') + op.drop_index(op.f('MajorChemistry$SamplePtID'), table_name='NMA_MajorChemistry') + op.drop_index(op.f('MajorChemistry$WCLab_ID'), table_name='NMA_MajorChemistry') + op.drop_constraint(op.f('NMA_MajorChemistry_OBJECTID_key'), 'NMA_MajorChemistry', type_='unique') + op.create_unique_constraint(None, 'NMA_MajorChemistry', ['nma_GlobalID']) + op.create_unique_constraint(None, 'NMA_MajorChemistry', ['nma_OBJECTID']) + op.create_foreign_key(None, 'NMA_MajorChemistry', 'NMA_Chemistry_SampleInfo', ['chemistry_sample_info_id'], ['id'], ondelete='CASCADE') + op.drop_column('NMA_MajorChemistry', 'SamplePointID') + op.drop_column('NMA_MajorChemistry', 'SamplePtID') + op.drop_column('NMA_MajorChemistry', 'WCLab_ID') + op.drop_column('NMA_MajorChemistry', 'OBJECTID') + op.drop_column('NMA_MajorChemistry', 'GlobalID') + + # --- NMA_MinorTraceChemistry --- + op.add_column('NMA_MinorTraceChemistry', sa.Column('id', sa.Integer(), sa.Identity(always=False, start=1), nullable=False)) + op.add_column('NMA_MinorTraceChemistry', sa.Column('nma_GlobalID', sa.UUID(), nullable=True)) + op.add_column('NMA_MinorTraceChemistry', sa.Column('nma_chemistry_sample_info_uuid', sa.UUID(), nullable=True)) + op.alter_column('NMA_MinorTraceChemistry', 'chemistry_sample_info_id', + existing_type=sa.UUID(), + type_=sa.Integer(), + nullable=False, + postgresql_using='NULL') + op.create_unique_constraint(None, 'NMA_MinorTraceChemistry', ['nma_GlobalID']) + op.create_foreign_key(None, 'NMA_MinorTraceChemistry', 'NMA_Chemistry_SampleInfo', ['chemistry_sample_info_id'], ['id'], ondelete='CASCADE') + op.drop_column('NMA_MinorTraceChemistry', 'GlobalID') + + # --- NMA_Radionuclides --- + op.add_column('NMA_Radionuclides', sa.Column('id', sa.Integer(), sa.Identity(always=False, start=1), nullable=False)) + op.add_column('NMA_Radionuclides', sa.Column('nma_GlobalID', sa.UUID(), nullable=True)) + op.add_column('NMA_Radionuclides', sa.Column('chemistry_sample_info_id', sa.Integer(), nullable=False)) + op.add_column('NMA_Radionuclides', sa.Column('nma_SamplePtID', sa.UUID(), nullable=True)) + op.add_column('NMA_Radionuclides', sa.Column('nma_SamplePointID', sa.String(length=10), nullable=True)) + op.add_column('NMA_Radionuclides', sa.Column('nma_OBJECTID', sa.Integer(), nullable=True)) + op.add_column('NMA_Radionuclides', sa.Column('nma_WCLab_ID', sa.String(length=25), nullable=True)) + op.drop_constraint(op.f('NMA_Radionuclides_OBJECTID_key'), 'NMA_Radionuclides', type_='unique') + op.drop_index(op.f('Radionuclides$AnalysesAgency'), table_name='NMA_Radionuclides') + op.drop_index(op.f('Radionuclides$Analyte'), table_name='NMA_Radionuclides') + op.drop_index(op.f('Radionuclides$Chemistry SampleInfoRadionuclides'), table_name='NMA_Radionuclides') + op.drop_index(op.f('Radionuclides$SamplePointID'), table_name='NMA_Radionuclides') + op.drop_index(op.f('Radionuclides$SamplePtID'), table_name='NMA_Radionuclides') + op.drop_index(op.f('Radionuclides$WCLab_ID'), table_name='NMA_Radionuclides') + op.create_unique_constraint(None, 'NMA_Radionuclides', ['nma_GlobalID']) + op.create_unique_constraint(None, 'NMA_Radionuclides', ['nma_OBJECTID']) + op.create_foreign_key(None, 'NMA_Radionuclides', 'NMA_Chemistry_SampleInfo', ['chemistry_sample_info_id'], ['id'], ondelete='CASCADE') + op.drop_column('NMA_Radionuclides', 'SamplePointID') + op.drop_column('NMA_Radionuclides', 'SamplePtID') + op.drop_column('NMA_Radionuclides', 'WCLab_ID') + op.drop_column('NMA_Radionuclides', 'OBJECTID') + op.drop_column('NMA_Radionuclides', 'GlobalID') + + # --- NMA_Soil_Rock_Results --- + op.add_column('NMA_Soil_Rock_Results', sa.Column('nma_Point_ID', sa.String(length=255), nullable=True)) + op.drop_index(op.f('Soil_Rock_Results$Point_ID'), table_name='NMA_Soil_Rock_Results') + op.drop_column('NMA_Soil_Rock_Results', 'Point_ID') + + # --- NMA_Stratigraphy --- + op.add_column('NMA_Stratigraphy', sa.Column('id', sa.Integer(), sa.Identity(always=False, start=1), nullable=False)) + op.add_column('NMA_Stratigraphy', sa.Column('nma_GlobalID', sa.UUID(), nullable=True)) + op.add_column('NMA_Stratigraphy', sa.Column('nma_WellID', sa.UUID(), nullable=True)) + op.add_column('NMA_Stratigraphy', sa.Column('nma_PointID', sa.String(length=10), nullable=False)) + op.add_column('NMA_Stratigraphy', sa.Column('nma_OBJECTID', sa.Integer(), nullable=True)) + op.drop_constraint(op.f('NMA_Stratigraphy_OBJECTID_key'), 'NMA_Stratigraphy', type_='unique') + op.drop_index(op.f('ix_nma_stratigraphy_point_id'), table_name='NMA_Stratigraphy') + op.drop_index(op.f('ix_nma_stratigraphy_thing_id'), table_name='NMA_Stratigraphy') + op.create_unique_constraint(None, 'NMA_Stratigraphy', ['nma_GlobalID']) + op.create_unique_constraint(None, 'NMA_Stratigraphy', ['nma_OBJECTID']) + op.drop_column('NMA_Stratigraphy', 'OBJECTID') + op.drop_column('NMA_Stratigraphy', 'WellID') + op.drop_column('NMA_Stratigraphy', 'PointID') + op.drop_column('NMA_Stratigraphy', 'GlobalID') + + # --- Other tables (index/constraint cleanup from autogenerate) --- + op.drop_index(op.f('SurfaceWaterPhotos$PointID'), table_name='NMA_SurfaceWaterPhotos') + op.drop_index(op.f('SurfaceWaterPhotos$SurfaceID'), table_name='NMA_SurfaceWaterPhotos') + op.drop_constraint(op.f('uq_nma_pressure_daily_globalid'), 'NMA_WaterLevelsContinuous_Pressure_Daily', type_='unique') + op.drop_index(op.f('WeatherPhotos$PointID'), table_name='NMA_WeatherPhotos') + op.drop_index(op.f('WeatherPhotos$WeatherID'), table_name='NMA_WeatherPhotos') + op.alter_column('NMA_view_NGWMN_Lithology', 'PointID', + existing_type=sa.VARCHAR(length=50), + nullable=False) + op.drop_constraint(op.f('uq_nma_view_ngwmn_lithology_objectid'), 'NMA_view_NGWMN_Lithology', type_='unique') + op.drop_constraint(op.f('uq_nma_view_ngwmn_waterlevels_point_date'), 'NMA_view_NGWMN_WaterLevels', type_='unique') + op.alter_column('NMA_view_NGWMN_WellConstruction', 'PointID', + existing_type=sa.VARCHAR(length=50), + nullable=False) + op.drop_constraint(op.f('uq_nma_view_ngwmn_wellconstruction_point_casing_screen'), 'NMA_view_NGWMN_WellConstruction', type_='unique') + op.alter_column('thing', 'nma_formation_zone', + existing_type=sa.VARCHAR(length=25), + comment='Raw FormationZone value from legacy WellData (NM_Aquifer).', + existing_nullable=True) + op.alter_column('thing_version', 'nma_pk_location', + existing_type=sa.VARCHAR(), + comment='To audit the original NM_Aquifer LocationID if it was transferred over', + existing_nullable=True, + autoincrement=False) + op.alter_column('thing_version', 'nma_formation_zone', + existing_type=sa.VARCHAR(length=25), + comment='Raw FormationZone value from legacy WellData (NM_Aquifer).', + existing_nullable=True, + autoincrement=False) + op.alter_column('transducer_observation', 'nma_waterlevelscontinuous_pressure_created', + existing_type=postgresql.TIMESTAMP(), + type_=sa.DateTime(timezone=True), + existing_nullable=True) + op.alter_column('transducer_observation', 'nma_waterlevelscontinuous_pressure_updated', + existing_type=postgresql.TIMESTAMP(), + type_=sa.DateTime(timezone=True), + existing_nullable=True) + + +def downgrade() -> None: + """Downgrade schema.""" + op.alter_column('transducer_observation', 'nma_waterlevelscontinuous_pressure_updated', + existing_type=sa.DateTime(timezone=True), + type_=postgresql.TIMESTAMP(), + existing_nullable=True) + op.alter_column('transducer_observation', 'nma_waterlevelscontinuous_pressure_created', + existing_type=sa.DateTime(timezone=True), + type_=postgresql.TIMESTAMP(), + existing_nullable=True) + op.alter_column('thing_version', 'nma_formation_zone', + existing_type=sa.VARCHAR(length=25), + comment=None, + existing_comment='Raw FormationZone value from legacy WellData (NM_Aquifer).', + existing_nullable=True, + autoincrement=False) + op.alter_column('thing_version', 'nma_pk_location', + existing_type=sa.VARCHAR(), + comment=None, + existing_comment='To audit the original NM_Aquifer LocationID if it was transferred over', + existing_nullable=True, + autoincrement=False) + op.alter_column('thing', 'nma_formation_zone', + existing_type=sa.VARCHAR(length=25), + comment=None, + existing_comment='Raw FormationZone value from legacy WellData (NM_Aquifer).', + existing_nullable=True) + op.create_unique_constraint(op.f('uq_nma_view_ngwmn_wellconstruction_point_casing_screen'), 'NMA_view_NGWMN_WellConstruction', ['PointID', 'CasingTop', 'ScreenTop'], postgresql_nulls_not_distinct=False) + op.alter_column('NMA_view_NGWMN_WellConstruction', 'PointID', + existing_type=sa.VARCHAR(length=50), + nullable=True) + op.create_unique_constraint(op.f('uq_nma_view_ngwmn_waterlevels_point_date'), 'NMA_view_NGWMN_WaterLevels', ['PointID', 'DateMeasured'], postgresql_nulls_not_distinct=False) + op.create_unique_constraint(op.f('uq_nma_view_ngwmn_lithology_objectid'), 'NMA_view_NGWMN_Lithology', ['OBJECTID'], postgresql_nulls_not_distinct=False) + op.alter_column('NMA_view_NGWMN_Lithology', 'PointID', + existing_type=sa.VARCHAR(length=50), + nullable=True) + op.create_index(op.f('WeatherPhotos$WeatherID'), 'NMA_WeatherPhotos', ['WeatherID'], unique=False) + op.create_index(op.f('WeatherPhotos$PointID'), 'NMA_WeatherPhotos', ['PointID'], unique=False) + op.create_unique_constraint(op.f('uq_nma_pressure_daily_globalid'), 'NMA_WaterLevelsContinuous_Pressure_Daily', ['GlobalID'], postgresql_nulls_not_distinct=False) + op.create_index(op.f('SurfaceWaterPhotos$SurfaceID'), 'NMA_SurfaceWaterPhotos', ['SurfaceID'], unique=False) + op.create_index(op.f('SurfaceWaterPhotos$PointID'), 'NMA_SurfaceWaterPhotos', ['PointID'], unique=False) + op.add_column('NMA_Stratigraphy', sa.Column('GlobalID', sa.UUID(), autoincrement=False, nullable=False)) + op.add_column('NMA_Stratigraphy', sa.Column('PointID', sa.VARCHAR(length=10), autoincrement=False, nullable=False)) + op.add_column('NMA_Stratigraphy', sa.Column('WellID', sa.UUID(), autoincrement=False, nullable=True)) + op.add_column('NMA_Stratigraphy', sa.Column('OBJECTID', sa.INTEGER(), autoincrement=False, nullable=True)) + op.drop_constraint(None, 'NMA_Stratigraphy', type_='unique') + op.drop_constraint(None, 'NMA_Stratigraphy', type_='unique') + op.create_index(op.f('ix_nma_stratigraphy_thing_id'), 'NMA_Stratigraphy', ['thing_id'], unique=False) + op.create_index(op.f('ix_nma_stratigraphy_point_id'), 'NMA_Stratigraphy', ['PointID'], unique=False) + op.create_unique_constraint(op.f('NMA_Stratigraphy_OBJECTID_key'), 'NMA_Stratigraphy', ['OBJECTID'], postgresql_nulls_not_distinct=False) + op.drop_column('NMA_Stratigraphy', 'nma_OBJECTID') + op.drop_column('NMA_Stratigraphy', 'nma_PointID') + op.drop_column('NMA_Stratigraphy', 'nma_WellID') + op.drop_column('NMA_Stratigraphy', 'nma_GlobalID') + op.drop_column('NMA_Stratigraphy', 'id') + op.add_column('NMA_Soil_Rock_Results', sa.Column('Point_ID', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) + op.create_index(op.f('Soil_Rock_Results$Point_ID'), 'NMA_Soil_Rock_Results', ['Point_ID'], unique=False) + op.drop_column('NMA_Soil_Rock_Results', 'nma_Point_ID') + op.add_column('NMA_Radionuclides', sa.Column('GlobalID', sa.UUID(), autoincrement=False, nullable=False)) + op.add_column('NMA_Radionuclides', sa.Column('OBJECTID', sa.INTEGER(), autoincrement=False, nullable=True)) + op.add_column('NMA_Radionuclides', sa.Column('WCLab_ID', sa.VARCHAR(length=25), autoincrement=False, nullable=True)) + op.add_column('NMA_Radionuclides', sa.Column('SamplePtID', sa.UUID(), autoincrement=False, nullable=False)) + op.add_column('NMA_Radionuclides', sa.Column('SamplePointID', sa.VARCHAR(length=10), autoincrement=False, nullable=True)) + op.drop_constraint(None, 'NMA_Radionuclides', type_='foreignkey') + op.create_foreign_key(op.f('NMA_Radionuclides_SamplePtID_fkey'), 'NMA_Radionuclides', 'NMA_Chemistry_SampleInfo', ['SamplePtID'], ['SamplePtID'], ondelete='CASCADE') + op.drop_constraint(None, 'NMA_Radionuclides', type_='unique') + op.drop_constraint(None, 'NMA_Radionuclides', type_='unique') + op.create_index(op.f('Radionuclides$WCLab_ID'), 'NMA_Radionuclides', ['WCLab_ID'], unique=False) + op.create_index(op.f('Radionuclides$SamplePtID'), 'NMA_Radionuclides', ['SamplePtID'], unique=False) + op.create_index(op.f('Radionuclides$SamplePointID'), 'NMA_Radionuclides', ['SamplePointID'], unique=False) + op.create_index(op.f('Radionuclides$Chemistry SampleInfoRadionuclides'), 'NMA_Radionuclides', ['SamplePtID'], unique=False) + op.create_index(op.f('Radionuclides$Analyte'), 'NMA_Radionuclides', ['Analyte'], unique=False) + op.create_index(op.f('Radionuclides$AnalysesAgency'), 'NMA_Radionuclides', ['AnalysesAgency'], unique=False) + op.create_unique_constraint(op.f('NMA_Radionuclides_OBJECTID_key'), 'NMA_Radionuclides', ['OBJECTID'], postgresql_nulls_not_distinct=False) + op.drop_column('NMA_Radionuclides', 'nma_WCLab_ID') + op.drop_column('NMA_Radionuclides', 'nma_OBJECTID') + op.drop_column('NMA_Radionuclides', 'nma_SamplePointID') + op.drop_column('NMA_Radionuclides', 'nma_SamplePtID') + op.drop_column('NMA_Radionuclides', 'chemistry_sample_info_id') + op.drop_column('NMA_Radionuclides', 'nma_GlobalID') + op.drop_column('NMA_Radionuclides', 'id') + op.add_column('NMA_MinorTraceChemistry', sa.Column('GlobalID', sa.UUID(), autoincrement=False, nullable=False)) + op.drop_constraint(None, 'NMA_MinorTraceChemistry', type_='foreignkey') + op.create_foreign_key(op.f('NMA_MinorTraceChemistry_chemistry_sample_info_id_fkey'), 'NMA_MinorTraceChemistry', 'NMA_Chemistry_SampleInfo', ['chemistry_sample_info_id'], ['SamplePtID'], ondelete='CASCADE') + op.drop_constraint(None, 'NMA_MinorTraceChemistry', type_='unique') + op.alter_column('NMA_MinorTraceChemistry', 'chemistry_sample_info_id', + existing_type=sa.Integer(), + type_=sa.UUID(), + existing_nullable=False) + op.drop_column('NMA_MinorTraceChemistry', 'nma_chemistry_sample_info_uuid') + op.drop_column('NMA_MinorTraceChemistry', 'nma_GlobalID') + op.drop_column('NMA_MinorTraceChemistry', 'id') + op.add_column('NMA_MajorChemistry', sa.Column('GlobalID', sa.UUID(), autoincrement=False, nullable=False)) + op.add_column('NMA_MajorChemistry', sa.Column('OBJECTID', sa.INTEGER(), autoincrement=False, nullable=True)) + op.add_column('NMA_MajorChemistry', sa.Column('WCLab_ID', sa.VARCHAR(length=25), autoincrement=False, nullable=True)) + op.add_column('NMA_MajorChemistry', sa.Column('SamplePtID', sa.UUID(), autoincrement=False, nullable=False)) + op.add_column('NMA_MajorChemistry', sa.Column('SamplePointID', sa.VARCHAR(length=10), autoincrement=False, nullable=True)) + op.drop_constraint(None, 'NMA_MajorChemistry', type_='foreignkey') + op.create_foreign_key(op.f('NMA_MajorChemistry_SamplePtID_fkey'), 'NMA_MajorChemistry', 'NMA_Chemistry_SampleInfo', ['SamplePtID'], ['SamplePtID'], ondelete='CASCADE') + op.drop_constraint(None, 'NMA_MajorChemistry', type_='unique') + op.drop_constraint(None, 'NMA_MajorChemistry', type_='unique') + op.create_unique_constraint(op.f('NMA_MajorChemistry_OBJECTID_key'), 'NMA_MajorChemistry', ['OBJECTID'], postgresql_nulls_not_distinct=False) + op.create_index(op.f('MajorChemistry$WCLab_ID'), 'NMA_MajorChemistry', ['WCLab_ID'], unique=False) + op.create_index(op.f('MajorChemistry$SamplePtID'), 'NMA_MajorChemistry', ['SamplePtID'], unique=False) + op.create_index(op.f('MajorChemistry$SamplePointIDAnalyte'), 'NMA_MajorChemistry', ['SamplePointID', 'Analyte'], unique=False) + op.create_index(op.f('MajorChemistry$SamplePointID'), 'NMA_MajorChemistry', ['SamplePointID'], unique=False) + op.create_index(op.f('MajorChemistry$Chemistry SampleInfoMajorChemistry'), 'NMA_MajorChemistry', ['SamplePtID'], unique=False) + op.create_index(op.f('MajorChemistry$Analyte'), 'NMA_MajorChemistry', ['Analyte'], unique=False) + op.create_index(op.f('MajorChemistry$AnalysesAgency'), 'NMA_MajorChemistry', ['AnalysesAgency'], unique=False) + op.drop_column('NMA_MajorChemistry', 'nma_WCLab_ID') + op.drop_column('NMA_MajorChemistry', 'nma_OBJECTID') + op.drop_column('NMA_MajorChemistry', 'nma_SamplePointID') + op.drop_column('NMA_MajorChemistry', 'nma_SamplePtID') + op.drop_column('NMA_MajorChemistry', 'chemistry_sample_info_id') + op.drop_column('NMA_MajorChemistry', 'nma_GlobalID') + op.drop_column('NMA_MajorChemistry', 'id') + op.add_column('NMA_HydraulicsData', sa.Column('GlobalID', sa.UUID(), autoincrement=False, nullable=False)) + op.add_column('NMA_HydraulicsData', sa.Column('PointID', sa.VARCHAR(length=50), autoincrement=False, nullable=True)) + op.add_column('NMA_HydraulicsData', sa.Column('OBJECTID', sa.INTEGER(), autoincrement=False, nullable=True)) + op.add_column('NMA_HydraulicsData', sa.Column('WellID', sa.UUID(), autoincrement=False, nullable=True)) + op.drop_constraint(None, 'NMA_HydraulicsData', type_='unique') + op.drop_constraint(None, 'NMA_HydraulicsData', type_='unique') + op.create_index(op.f('ix_nma_hydraulicsdata_wellid'), 'NMA_HydraulicsData', ['WellID'], unique=False) + op.create_index(op.f('ix_nma_hydraulicsdata_pointid'), 'NMA_HydraulicsData', ['PointID'], unique=False) + op.create_index(op.f('ix_nma_hydraulicsdata_objectid'), 'NMA_HydraulicsData', ['OBJECTID'], unique=True) + op.drop_column('NMA_HydraulicsData', 'nma_OBJECTID') + op.drop_column('NMA_HydraulicsData', 'nma_PointID') + op.drop_column('NMA_HydraulicsData', 'nma_WellID') + op.drop_column('NMA_HydraulicsData', 'nma_GlobalID') + op.drop_column('NMA_HydraulicsData', 'id') + op.add_column('NMA_FieldParameters', sa.Column('GlobalID', sa.UUID(), autoincrement=False, nullable=False)) + op.add_column('NMA_FieldParameters', sa.Column('OBJECTID', sa.INTEGER(), sa.Identity(always=False, start=1, increment=1, minvalue=1, maxvalue=2147483647, cycle=False, cache=1), autoincrement=True, nullable=False)) + op.add_column('NMA_FieldParameters', sa.Column('WCLab_ID', sa.VARCHAR(length=25), autoincrement=False, nullable=True)) + op.add_column('NMA_FieldParameters', sa.Column('SamplePtID', sa.UUID(), autoincrement=False, nullable=False)) + op.add_column('NMA_FieldParameters', sa.Column('SamplePointID', sa.VARCHAR(length=10), autoincrement=False, nullable=True)) + op.drop_constraint(None, 'NMA_FieldParameters', type_='foreignkey') + op.create_foreign_key(op.f('NMA_FieldParameters_SamplePtID_fkey'), 'NMA_FieldParameters', 'NMA_Chemistry_SampleInfo', ['SamplePtID'], ['SamplePtID'], onupdate='CASCADE', ondelete='CASCADE') + op.drop_constraint(None, 'NMA_FieldParameters', type_='unique') + op.drop_index('FieldParameters$nma_WCLab_ID', table_name='NMA_FieldParameters') + op.drop_index('FieldParameters$nma_SamplePointID', table_name='NMA_FieldParameters') + op.drop_index('FieldParameters$nma_OBJECTID', table_name='NMA_FieldParameters') + op.drop_index('FieldParameters$nma_GlobalID', table_name='NMA_FieldParameters') + op.drop_index('FieldParameters$ChemistrySampleInfoFieldParameters', table_name='NMA_FieldParameters') + op.create_index(op.f('FieldParameters$ChemistrySampleInfoFieldParameters'), 'NMA_FieldParameters', ['SamplePtID'], unique=False) + op.create_index(op.f('FieldParameters$WCLab_ID'), 'NMA_FieldParameters', ['WCLab_ID'], unique=False) + op.create_index(op.f('FieldParameters$SamplePtID'), 'NMA_FieldParameters', ['SamplePtID'], unique=False) + op.create_index(op.f('FieldParameters$SamplePointID'), 'NMA_FieldParameters', ['SamplePointID'], unique=False) + op.create_index(op.f('FieldParameters$OBJECTID'), 'NMA_FieldParameters', ['OBJECTID'], unique=True) + op.create_index(op.f('FieldParameters$GlobalID'), 'NMA_FieldParameters', ['GlobalID'], unique=True) + op.drop_column('NMA_FieldParameters', 'nma_WCLab_ID') + op.drop_column('NMA_FieldParameters', 'nma_OBJECTID') + op.drop_column('NMA_FieldParameters', 'nma_SamplePointID') + op.drop_column('NMA_FieldParameters', 'nma_SamplePtID') + op.drop_column('NMA_FieldParameters', 'chemistry_sample_info_id') + op.drop_column('NMA_FieldParameters', 'nma_GlobalID') + op.drop_column('NMA_FieldParameters', 'id') + op.add_column('NMA_Chemistry_SampleInfo', sa.Column('LocationId', sa.UUID(), autoincrement=False, nullable=True)) + op.add_column('NMA_Chemistry_SampleInfo', sa.Column('OBJECTID', sa.INTEGER(), autoincrement=False, nullable=True)) + op.add_column('NMA_Chemistry_SampleInfo', sa.Column('WCLab_ID', sa.VARCHAR(length=18), autoincrement=False, nullable=True)) + op.add_column('NMA_Chemistry_SampleInfo', sa.Column('SamplePtID', sa.UUID(), autoincrement=False, nullable=False)) + op.add_column('NMA_Chemistry_SampleInfo', sa.Column('SamplePointID', sa.VARCHAR(length=10), autoincrement=False, nullable=False)) + op.drop_constraint(None, 'NMA_Chemistry_SampleInfo', type_='unique') + op.drop_constraint(None, 'NMA_Chemistry_SampleInfo', type_='unique') + op.create_unique_constraint(op.f('NMA_Chemistry_SampleInfo_OBJECTID_key'), 'NMA_Chemistry_SampleInfo', ['OBJECTID'], postgresql_nulls_not_distinct=False) + op.drop_column('NMA_Chemistry_SampleInfo', 'nma_LocationId') + op.drop_column('NMA_Chemistry_SampleInfo', 'nma_OBJECTID') + op.drop_column('NMA_Chemistry_SampleInfo', 'nma_SamplePointID') + op.drop_column('NMA_Chemistry_SampleInfo', 'nma_WCLab_ID') + op.drop_column('NMA_Chemistry_SampleInfo', 'nma_SamplePtID') + op.drop_column('NMA_Chemistry_SampleInfo', 'id') + op.add_column('NMA_AssociatedData', sa.Column('PointID', sa.VARCHAR(length=10), autoincrement=False, nullable=True)) + op.add_column('NMA_AssociatedData', sa.Column('AssocID', sa.UUID(), autoincrement=False, nullable=False)) + op.add_column('NMA_AssociatedData', sa.Column('LocationId', sa.UUID(), autoincrement=False, nullable=True)) + op.add_column('NMA_AssociatedData', sa.Column('OBJECTID', sa.INTEGER(), autoincrement=False, nullable=True)) + op.drop_constraint(None, 'NMA_AssociatedData', type_='unique') + op.drop_constraint(None, 'NMA_AssociatedData', type_='unique') + op.drop_constraint(None, 'NMA_AssociatedData', type_='unique') + op.create_unique_constraint(op.f('NMA_AssociatedData_OBJECTID_key'), 'NMA_AssociatedData', ['OBJECTID'], postgresql_nulls_not_distinct=False) + op.create_index(op.f('AssociatedData$PointID'), 'NMA_AssociatedData', ['PointID'], unique=False) + op.create_unique_constraint(op.f('AssociatedData$LocationId'), 'NMA_AssociatedData', ['LocationId'], postgresql_nulls_not_distinct=False) + op.drop_column('NMA_AssociatedData', 'nma_OBJECTID') + op.drop_column('NMA_AssociatedData', 'nma_PointID') + op.drop_column('NMA_AssociatedData', 'nma_LocationId') + op.drop_column('NMA_AssociatedData', 'nma_AssocID') + op.drop_column('NMA_AssociatedData', 'id') diff --git a/tests/integration/test_admin_minor_trace_chemistry.py b/tests/integration/test_admin_minor_trace_chemistry.py index 272256e5..683dd054 100644 --- a/tests/integration/test_admin_minor_trace_chemistry.py +++ b/tests/integration/test_admin_minor_trace_chemistry.py @@ -73,8 +73,8 @@ def minor_trace_chemistry_record(): # Create parent NMA_Chemistry_SampleInfo sample_info = NMA_Chemistry_SampleInfo( - sample_pt_id=uuid.uuid4(), - sample_point_id="INTTEST01", + nma_sample_pt_id=uuid.uuid4(), + nma_sample_point_id="INTTEST01", thing_id=thing.id, ) session.add(sample_info) @@ -83,8 +83,8 @@ def minor_trace_chemistry_record(): # Create MinorTraceChemistry record chemistry = NMA_MinorTraceChemistry( - global_id=uuid.uuid4(), - chemistry_sample_info_id=sample_info.sample_pt_id, + nma_global_id=uuid.uuid4(), + chemistry_sample_info_id=sample_info.id, # Integer FK analyte="Arsenic", symbol="As", sample_value=0.005, @@ -135,7 +135,7 @@ class TestMinorTraceChemistryDetailView: def test_detail_view_returns_200(self, admin_client, minor_trace_chemistry_record): """Detail view should return 200 OK for existing record.""" - pk = str(minor_trace_chemistry_record.global_id) + pk = str(minor_trace_chemistry_record.id) # Integer PK response = admin_client.get(f"{ADMIN_BASE_URL}/detail/{pk}") assert response.status_code == 200, ( f"Expected 200, got {response.status_code}. " @@ -146,7 +146,7 @@ def test_detail_view_shows_analyte( self, admin_client, minor_trace_chemistry_record ): """Detail view should display the analyte.""" - pk = str(minor_trace_chemistry_record.global_id) + pk = str(minor_trace_chemistry_record.id) # Integer PK response = admin_client.get(f"{ADMIN_BASE_URL}/detail/{pk}") assert response.status_code == 200 assert "Arsenic" in response.text @@ -155,7 +155,7 @@ def test_detail_view_shows_parent_relationship( self, admin_client, minor_trace_chemistry_record ): """Detail view should display the parent NMA_Chemistry_SampleInfo.""" - pk = str(minor_trace_chemistry_record.global_id) + pk = str(minor_trace_chemistry_record.id) # Integer PK response = admin_client.get(f"{ADMIN_BASE_URL}/detail/{pk}") assert response.status_code == 200 # The parent relationship should be displayed somehow @@ -164,7 +164,7 @@ def test_detail_view_shows_parent_relationship( def test_detail_view_404_for_nonexistent_record(self, admin_client): """Detail view should return 404 for non-existent record.""" - fake_pk = str(uuid.uuid4()) + fake_pk = "999999999" # Integer PK that doesn't exist response = admin_client.get(f"{ADMIN_BASE_URL}/detail/{fake_pk}") assert response.status_code == 404 @@ -184,7 +184,7 @@ def test_create_endpoint_forbidden(self, admin_client): def test_edit_endpoint_forbidden(self, admin_client, minor_trace_chemistry_record): """Edit endpoint should be forbidden for read-only view.""" - pk = str(minor_trace_chemistry_record.global_id) + pk = str(minor_trace_chemistry_record.id) # Integer PK response = admin_client.get(f"{ADMIN_BASE_URL}/edit/{pk}") # Should be 403 or redirect, not 200 assert response.status_code in ( @@ -197,7 +197,7 @@ def test_delete_endpoint_forbidden( self, admin_client, minor_trace_chemistry_record ): """Delete endpoint should be forbidden for read-only view.""" - pk = str(minor_trace_chemistry_record.global_id) + pk = str(minor_trace_chemistry_record.id) # Integer PK response = admin_client.post( f"{ADMIN_BASE_URL}/delete", data={"pks": [pk]}, diff --git a/tests/test_associated_data_legacy.py b/tests/test_associated_data_legacy.py index 6448feca..78a5eb1e 100644 --- a/tests/test_associated_data_legacy.py +++ b/tests/test_associated_data_legacy.py @@ -145,7 +145,8 @@ def test_associated_data_has_integer_pk(): def test_associated_data_nma_assoc_id_is_unique(): """NMA_AssociatedData.nma_assoc_id is UNIQUE.""" - col = NMA_AssociatedData.__table__.c.nma_assoc_id + # Use database column name (nma_AssocID), not Python attribute name (nma_assoc_id) + col = NMA_AssociatedData.__table__.c["nma_AssocID"] assert col.unique is True diff --git a/tests/test_chemistry_sampleinfo_legacy.py b/tests/test_chemistry_sampleinfo_legacy.py index b48a2b5c..2b46b352 100644 --- a/tests/test_chemistry_sampleinfo_legacy.py +++ b/tests/test_chemistry_sampleinfo_legacy.py @@ -221,7 +221,8 @@ def test_chemistry_sampleinfo_has_integer_pk(): def test_chemistry_sampleinfo_nma_sample_pt_id_is_unique(): """NMA_Chemistry_SampleInfo.nma_sample_pt_id is UNIQUE.""" - col = NMA_Chemistry_SampleInfo.__table__.c.nma_sample_pt_id + # Use database column name (nma_SamplePtID), not Python attribute name + col = NMA_Chemistry_SampleInfo.__table__.c["nma_SamplePtID"] assert col.unique is True diff --git a/tests/test_field_parameters_legacy.py b/tests/test_field_parameters_legacy.py index 2ad3f9ea..5795a610 100644 --- a/tests/test_field_parameters_legacy.py +++ b/tests/test_field_parameters_legacy.py @@ -383,7 +383,8 @@ def test_field_parameters_has_integer_pk(): def test_field_parameters_nma_global_id_is_unique(): """NMA_FieldParameters.nma_global_id is UNIQUE.""" - col = NMA_FieldParameters.__table__.c.nma_global_id + # Use database column name (nma_GlobalID), not Python attribute name + col = NMA_FieldParameters.__table__.c["nma_GlobalID"] assert col.unique is True diff --git a/tests/test_hydraulics_data_legacy.py b/tests/test_hydraulics_data_legacy.py index 4097195f..37586764 100644 --- a/tests/test_hydraulics_data_legacy.py +++ b/tests/test_hydraulics_data_legacy.py @@ -313,7 +313,7 @@ def test_hydraulics_data_has_integer_pk(): def test_hydraulics_data_nma_global_id_is_unique(): """NMA_HydraulicsData.nma_global_id is UNIQUE.""" - col = NMA_HydraulicsData.__table__.c.nma_global_id + col = NMA_HydraulicsData.__table__.c["nma_GlobalID"] assert col.unique is True diff --git a/tests/test_major_chemistry_legacy.py b/tests/test_major_chemistry_legacy.py index 94d5f037..536d3a23 100644 --- a/tests/test_major_chemistry_legacy.py +++ b/tests/test_major_chemistry_legacy.py @@ -305,7 +305,8 @@ def test_major_chemistry_has_integer_pk(): def test_major_chemistry_nma_global_id_is_unique(): """NMA_MajorChemistry.nma_global_id is UNIQUE.""" - col = NMA_MajorChemistry.__table__.c.nma_global_id + # Use database column name (nma_GlobalID), not Python attribute name + col = NMA_MajorChemistry.__table__.c["nma_GlobalID"] assert col.unique is True diff --git a/tests/test_nma_chemistry_lineage.py b/tests/test_nma_chemistry_lineage.py index 3cef600f..b828fb47 100644 --- a/tests/test_nma_chemistry_lineage.py +++ b/tests/test_nma_chemistry_lineage.py @@ -99,14 +99,17 @@ def test_nma_minor_trace_chemistry_columns(): """ NMA_MinorTraceChemistry should have required columns. - Omitted legacy columns: globalid, objectid, ssma_timestamp, - samplepointid, sampleptid, wclab_id + Updated for Integer PK schema: + - id: Integer PK (autoincrement) + - nma_global_id: Legacy GlobalID UUID (UNIQUE) + - chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id """ from db.nma_legacy import NMA_MinorTraceChemistry expected_columns = [ - "global_id", # PK - "chemistry_sample_info_id", # new FK (UUID, not string) + "id", # Integer PK + "nma_global_id", # Legacy UUID + "chemistry_sample_info_id", # Integer FK # from legacy "analyte", "sample_value", @@ -135,16 +138,16 @@ def test_nma_minor_trace_chemistry_save_all_columns(shared_well): well = session.get(Thing, shared_well) sample_info = NMA_Chemistry_SampleInfo( - object_id=_next_object_id(), - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_object_id=_next_object_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing=well, ) session.add(sample_info) session.commit() mtc = NMA_MinorTraceChemistry( - global_id=_next_global_id(), + nma_global_id=_next_global_id(), chemistry_sample_info=sample_info, analyte="As", sample_value=0.015, @@ -163,8 +166,9 @@ def test_nma_minor_trace_chemistry_save_all_columns(shared_well): session.refresh(mtc) # Verify all columns saved - assert mtc.global_id is not None - assert mtc.chemistry_sample_info_id == sample_info.sample_pt_id + assert mtc.id is not None # Integer PK + assert mtc.nma_global_id is not None # Legacy UUID + assert mtc.chemistry_sample_info_id == sample_info.id # Integer FK assert mtc.analyte == "As" assert mtc.sample_value == 0.015 assert mtc.units == "mg/L" @@ -223,9 +227,9 @@ def test_assign_thing_to_sample_info(shared_well): well = session.get(Thing, shared_well) sample_info = NMA_Chemistry_SampleInfo( - object_id=_next_object_id(), - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_object_id=_next_object_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing=well, # OO: assign object ) session.add(sample_info) @@ -248,9 +252,9 @@ def test_append_sample_info_to_thing(shared_well): well = session.get(Thing, shared_well) sample_info = NMA_Chemistry_SampleInfo( - object_id=_next_object_id(), - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_object_id=_next_object_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), ) well.chemistry_sample_infos.append(sample_info) session.commit() @@ -280,9 +284,9 @@ def test_sample_info_requires_thing(): # Validator raises ValueError before database is even touched with pytest.raises(ValueError, match="requires a parent Thing"): NMA_Chemistry_SampleInfo( - object_id=_next_object_id(), - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_object_id=_next_object_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing_id=None, # Explicit None triggers validator ) @@ -306,9 +310,9 @@ def test_sample_info_minor_trace_chemistries_empty_by_default(shared_well): well = session.get(Thing, shared_well) sample_info = NMA_Chemistry_SampleInfo( - object_id=_next_object_id(), - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_object_id=_next_object_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing=well, ) session.add(sample_info) @@ -330,16 +334,16 @@ def test_assign_sample_info_to_mtc(shared_well): well = session.get(Thing, shared_well) sample_info = NMA_Chemistry_SampleInfo( - object_id=_next_object_id(), - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_object_id=_next_object_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing=well, ) session.add(sample_info) session.commit() mtc = NMA_MinorTraceChemistry( - global_id=_next_global_id(), + nma_global_id=_next_global_id(), analyte="As", sample_value=0.01, units="mg/L", @@ -365,16 +369,16 @@ def test_append_mtc_to_sample_info(shared_well): well = session.get(Thing, shared_well) sample_info = NMA_Chemistry_SampleInfo( - object_id=_next_object_id(), - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_object_id=_next_object_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing=well, ) session.add(sample_info) session.commit() mtc = NMA_MinorTraceChemistry( - global_id=_next_global_id(), + nma_global_id=_next_global_id(), analyte="U", sample_value=15.2, units="ug/L", @@ -384,7 +388,7 @@ def test_append_mtc_to_sample_info(shared_well): # Verify bidirectional assert mtc.chemistry_sample_info == sample_info - assert mtc.chemistry_sample_info_id == sample_info.sample_pt_id + assert mtc.chemistry_sample_info_id == sample_info.id # Integer FK session.delete(sample_info) session.commit() @@ -426,16 +430,16 @@ def test_full_lineage_navigation(shared_well): well = session.get(Thing, shared_well) sample_info = NMA_Chemistry_SampleInfo( - object_id=_next_object_id(), - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_object_id=_next_object_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing=well, ) session.add(sample_info) session.commit() mtc = NMA_MinorTraceChemistry( - global_id=_next_global_id(), + nma_global_id=_next_global_id(), analyte="Se", sample_value=0.005, units="mg/L", @@ -460,16 +464,16 @@ def test_reverse_lineage_navigation(shared_well): well = session.get(Thing, shared_well) sample_info = NMA_Chemistry_SampleInfo( - object_id=_next_object_id(), - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_object_id=_next_object_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing=well, ) session.add(sample_info) session.commit() mtc = NMA_MinorTraceChemistry( - global_id=_next_global_id(), + nma_global_id=_next_global_id(), analyte="Pb", sample_value=0.002, units="mg/L", @@ -483,7 +487,7 @@ def test_reverse_lineage_navigation(shared_well): matching = [ si for si in well.chemistry_sample_infos - if si.sample_pt_id == sample_info.sample_pt_id + if si.id == sample_info.id ] assert len(matching) == 1 assert len(matching[0].minor_trace_chemistries) == 1 @@ -505,9 +509,9 @@ def test_cascade_delete_sample_info_deletes_mtc(shared_well): well = session.get(Thing, shared_well) sample_info = NMA_Chemistry_SampleInfo( - object_id=_next_object_id(), - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_object_id=_next_object_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing=well, ) session.add(sample_info) @@ -517,7 +521,7 @@ def test_cascade_delete_sample_info_deletes_mtc(shared_well): for analyte in ["As", "U", "Se", "Pb"]: sample_info.minor_trace_chemistries.append( NMA_MinorTraceChemistry( - global_id=_next_global_id(), + nma_global_id=_next_global_id(), analyte=analyte, sample_value=0.01, units="mg/L", @@ -525,7 +529,7 @@ def test_cascade_delete_sample_info_deletes_mtc(shared_well): ) session.commit() - sample_info_id = sample_info.sample_pt_id + sample_info_id = sample_info.id # Integer PK assert ( session.query(NMA_MinorTraceChemistry) .filter_by(chemistry_sample_info_id=sample_info_id) @@ -562,16 +566,16 @@ def test_cascade_delete_thing_deletes_sample_infos(): session.commit() sample_info = NMA_Chemistry_SampleInfo( - object_id=_next_object_id(), - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_object_id=_next_object_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing=test_thing, ) session.add(sample_info) session.commit() # SamplePtID is the PK for NMA_Chemistry_SampleInfo. - sample_info_id = sample_info.sample_pt_id + sample_info_id = sample_info.id # Integer PK # Delete thing session.delete(test_thing) @@ -602,9 +606,9 @@ def test_multiple_sample_infos_per_thing(): for i in range(3): sample_info = NMA_Chemistry_SampleInfo( - object_id=_next_object_id(), - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_object_id=_next_object_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing=test_thing, ) session.add(sample_info) @@ -627,9 +631,9 @@ def test_multiple_mtc_per_sample_info(shared_well): well = session.get(Thing, shared_well) sample_info = NMA_Chemistry_SampleInfo( - object_id=_next_object_id(), - sample_pt_id=_next_sample_pt_id(), - sample_point_id=_next_sample_point_id(), + nma_object_id=_next_object_id(), + nma_sample_pt_id=_next_sample_pt_id(), + nma_sample_point_id=_next_sample_point_id(), thing=well, ) session.add(sample_info) @@ -639,7 +643,7 @@ def test_multiple_mtc_per_sample_info(shared_well): for analyte in analytes: sample_info.minor_trace_chemistries.append( NMA_MinorTraceChemistry( - global_id=_next_global_id(), + nma_global_id=_next_global_id(), analyte=analyte, sample_value=0.01, units="mg/L", diff --git a/tests/test_radionuclides_legacy.py b/tests/test_radionuclides_legacy.py index 74fdf6ca..dae929aa 100644 --- a/tests/test_radionuclides_legacy.py +++ b/tests/test_radionuclides_legacy.py @@ -355,7 +355,8 @@ def test_radionuclides_has_integer_pk(): def test_radionuclides_nma_global_id_is_unique(): """NMA_Radionuclides.nma_global_id is UNIQUE.""" - col = NMA_Radionuclides.__table__.c.nma_global_id + # Use database column name (nma_GlobalID), not Python attribute name + col = NMA_Radionuclides.__table__.c["nma_GlobalID"] assert col.unique is True diff --git a/tests/test_stratigraphy_legacy.py b/tests/test_stratigraphy_legacy.py index 0e4e6966..4a62cf20 100644 --- a/tests/test_stratigraphy_legacy.py +++ b/tests/test_stratigraphy_legacy.py @@ -126,7 +126,8 @@ def test_stratigraphy_has_integer_pk(): def test_stratigraphy_nma_global_id_is_unique(): """NMA_Stratigraphy.nma_global_id is UNIQUE.""" - col = NMA_Stratigraphy.__table__.c.nma_global_id + # Use database column name (nma_GlobalID), not Python attribute name + col = NMA_Stratigraphy.__table__.c["nma_GlobalID"] assert col.unique is True From e19cf0221a5c0dc8be90553ea7aff9f5a240f4ee Mon Sep 17 00:00:00 2001 From: kbighorse Date: Wed, 28 Jan 2026 10:21:14 +0000 Subject: [PATCH 21/22] Formatting changes --- ...51fd_refactor_nma_tables_to_integer_pks.py | 1400 ++++++++++++----- db/nma_legacy.py | 5 +- .../test_well_data_relationships.py | 3 +- tests/test_major_chemistry_legacy.py | 3 +- tests/test_nma_chemistry_lineage.py | 6 +- transfers/field_parameters_transfer.py | 15 +- transfers/major_chemistry.py | 15 +- transfers/minor_trace_chemistry_transfer.py | 15 +- transfers/radionuclides.py | 14 +- 9 files changed, 1077 insertions(+), 399 deletions(-) diff --git a/alembic/versions/3cb924ca51fd_refactor_nma_tables_to_integer_pks.py b/alembic/versions/3cb924ca51fd_refactor_nma_tables_to_integer_pks.py index e188d634..fdfb8c55 100644 --- a/alembic/versions/3cb924ca51fd_refactor_nma_tables_to_integer_pks.py +++ b/alembic/versions/3cb924ca51fd_refactor_nma_tables_to_integer_pks.py @@ -5,6 +5,7 @@ Create Date: 2026-01-28 01:37:56.509497 """ + from typing import Sequence, Union from alembic import op @@ -14,8 +15,8 @@ from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. -revision: str = '3cb924ca51fd' -down_revision: Union[str, Sequence[str], None] = '76e3ae8b99cb' +revision: str = "3cb924ca51fd" +down_revision: Union[str, Sequence[str], None] = "76e3ae8b99cb" branch_labels: Union[str, Sequence[str], None] = None depends_on: Union[str, Sequence[str], None] = None @@ -33,403 +34,1066 @@ def upgrade() -> None: # PHASE 1: Drop ALL foreign keys that reference NMA_Chemistry_SampleInfo.SamplePtID # This must happen BEFORE we can modify NMA_Chemistry_SampleInfo # ========================================================================== - op.drop_constraint(op.f('NMA_MinorTraceChemistry_chemistry_sample_info_id_fkey'), 'NMA_MinorTraceChemistry', type_='foreignkey') - op.drop_constraint(op.f('NMA_Radionuclides_SamplePtID_fkey'), 'NMA_Radionuclides', type_='foreignkey') - op.drop_constraint(op.f('NMA_MajorChemistry_SamplePtID_fkey'), 'NMA_MajorChemistry', type_='foreignkey') - op.drop_constraint(op.f('NMA_FieldParameters_SamplePtID_fkey'), 'NMA_FieldParameters', type_='foreignkey') + op.drop_constraint( + op.f("NMA_MinorTraceChemistry_chemistry_sample_info_id_fkey"), + "NMA_MinorTraceChemistry", + type_="foreignkey", + ) + op.drop_constraint( + op.f("NMA_Radionuclides_SamplePtID_fkey"), + "NMA_Radionuclides", + type_="foreignkey", + ) + op.drop_constraint( + op.f("NMA_MajorChemistry_SamplePtID_fkey"), + "NMA_MajorChemistry", + type_="foreignkey", + ) + op.drop_constraint( + op.f("NMA_FieldParameters_SamplePtID_fkey"), + "NMA_FieldParameters", + type_="foreignkey", + ) # ========================================================================== # PHASE 2: Modify NMA_Chemistry_SampleInfo (parent table) # ========================================================================== # Add new columns first - op.add_column('NMA_Chemistry_SampleInfo', sa.Column('id', sa.Integer(), sa.Identity(always=False, start=1), nullable=False)) - op.add_column('NMA_Chemistry_SampleInfo', sa.Column('nma_SamplePtID', sa.UUID(), nullable=True)) - op.add_column('NMA_Chemistry_SampleInfo', sa.Column('nma_WCLab_ID', sa.String(length=18), nullable=True)) - op.add_column('NMA_Chemistry_SampleInfo', sa.Column('nma_SamplePointID', sa.String(length=10), nullable=False)) - op.add_column('NMA_Chemistry_SampleInfo', sa.Column('nma_OBJECTID', sa.Integer(), nullable=True)) - op.add_column('NMA_Chemistry_SampleInfo', sa.Column('nma_LocationId', sa.UUID(), nullable=True)) + op.add_column( + "NMA_Chemistry_SampleInfo", + sa.Column( + "id", sa.Integer(), sa.Identity(always=False, start=1), nullable=False + ), + ) + op.add_column( + "NMA_Chemistry_SampleInfo", + sa.Column("nma_SamplePtID", sa.UUID(), nullable=True), + ) + op.add_column( + "NMA_Chemistry_SampleInfo", + sa.Column("nma_WCLab_ID", sa.String(length=18), nullable=True), + ) + op.add_column( + "NMA_Chemistry_SampleInfo", + sa.Column("nma_SamplePointID", sa.String(length=10), nullable=False), + ) + op.add_column( + "NMA_Chemistry_SampleInfo", + sa.Column("nma_OBJECTID", sa.Integer(), nullable=True), + ) + op.add_column( + "NMA_Chemistry_SampleInfo", + sa.Column("nma_LocationId", sa.UUID(), nullable=True), + ) # Drop old PK and create new PK on id - op.drop_constraint('NMA_Chemistry_SampleInfo_pkey', 'NMA_Chemistry_SampleInfo', type_='primary') - op.create_primary_key('NMA_Chemistry_SampleInfo_pkey', 'NMA_Chemistry_SampleInfo', ['id']) + op.drop_constraint( + "NMA_Chemistry_SampleInfo_pkey", "NMA_Chemistry_SampleInfo", type_="primary" + ) + op.create_primary_key( + "NMA_Chemistry_SampleInfo_pkey", "NMA_Chemistry_SampleInfo", ["id"] + ) - op.drop_constraint(op.f('NMA_Chemistry_SampleInfo_OBJECTID_key'), 'NMA_Chemistry_SampleInfo', type_='unique') - op.create_unique_constraint(None, 'NMA_Chemistry_SampleInfo', ['nma_SamplePtID']) - op.create_unique_constraint(None, 'NMA_Chemistry_SampleInfo', ['nma_OBJECTID']) - op.drop_column('NMA_Chemistry_SampleInfo', 'SamplePointID') - op.drop_column('NMA_Chemistry_SampleInfo', 'SamplePtID') - op.drop_column('NMA_Chemistry_SampleInfo', 'WCLab_ID') - op.drop_column('NMA_Chemistry_SampleInfo', 'OBJECTID') - op.drop_column('NMA_Chemistry_SampleInfo', 'LocationId') + op.drop_constraint( + op.f("NMA_Chemistry_SampleInfo_OBJECTID_key"), + "NMA_Chemistry_SampleInfo", + type_="unique", + ) + op.create_unique_constraint(None, "NMA_Chemistry_SampleInfo", ["nma_SamplePtID"]) + op.create_unique_constraint(None, "NMA_Chemistry_SampleInfo", ["nma_OBJECTID"]) + op.drop_column("NMA_Chemistry_SampleInfo", "SamplePointID") + op.drop_column("NMA_Chemistry_SampleInfo", "SamplePtID") + op.drop_column("NMA_Chemistry_SampleInfo", "WCLab_ID") + op.drop_column("NMA_Chemistry_SampleInfo", "OBJECTID") + op.drop_column("NMA_Chemistry_SampleInfo", "LocationId") # ========================================================================== # PHASE 3: Modify child tables and create new FKs pointing to NMA_Chemistry_SampleInfo.id # ========================================================================== # --- NMA_FieldParameters --- - op.add_column('NMA_FieldParameters', sa.Column('id', sa.Integer(), sa.Identity(always=False, start=1), nullable=False)) - op.add_column('NMA_FieldParameters', sa.Column('nma_GlobalID', sa.UUID(), nullable=True)) - op.add_column('NMA_FieldParameters', sa.Column('chemistry_sample_info_id', sa.Integer(), nullable=False)) - op.add_column('NMA_FieldParameters', sa.Column('nma_SamplePtID', sa.UUID(), nullable=True)) - op.add_column('NMA_FieldParameters', sa.Column('nma_SamplePointID', sa.String(length=10), nullable=True)) - op.add_column('NMA_FieldParameters', sa.Column('nma_OBJECTID', sa.Integer(), nullable=True)) - op.add_column('NMA_FieldParameters', sa.Column('nma_WCLab_ID', sa.String(length=25), nullable=True)) - op.drop_index(op.f('FieldParameters$GlobalID'), table_name='NMA_FieldParameters') - op.drop_index(op.f('FieldParameters$OBJECTID'), table_name='NMA_FieldParameters') - op.drop_index(op.f('FieldParameters$SamplePointID'), table_name='NMA_FieldParameters') - op.drop_index(op.f('FieldParameters$SamplePtID'), table_name='NMA_FieldParameters') - op.drop_index(op.f('FieldParameters$WCLab_ID'), table_name='NMA_FieldParameters') - op.drop_index(op.f('FieldParameters$ChemistrySampleInfoFieldParameters'), table_name='NMA_FieldParameters') - op.create_index('FieldParameters$ChemistrySampleInfoFieldParameters', 'NMA_FieldParameters', ['chemistry_sample_info_id'], unique=False) - op.create_index('FieldParameters$nma_GlobalID', 'NMA_FieldParameters', ['nma_GlobalID'], unique=True) - op.create_index('FieldParameters$nma_OBJECTID', 'NMA_FieldParameters', ['nma_OBJECTID'], unique=True) - op.create_index('FieldParameters$nma_SamplePointID', 'NMA_FieldParameters', ['nma_SamplePointID'], unique=False) - op.create_index('FieldParameters$nma_WCLab_ID', 'NMA_FieldParameters', ['nma_WCLab_ID'], unique=False) - op.create_unique_constraint(None, 'NMA_FieldParameters', ['nma_GlobalID']) - op.create_foreign_key(None, 'NMA_FieldParameters', 'NMA_Chemistry_SampleInfo', ['chemistry_sample_info_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE') - op.drop_column('NMA_FieldParameters', 'SamplePointID') - op.drop_column('NMA_FieldParameters', 'SamplePtID') - op.drop_column('NMA_FieldParameters', 'WCLab_ID') - op.drop_column('NMA_FieldParameters', 'OBJECTID') - op.drop_column('NMA_FieldParameters', 'GlobalID') + op.add_column( + "NMA_FieldParameters", + sa.Column( + "id", sa.Integer(), sa.Identity(always=False, start=1), nullable=False + ), + ) + op.add_column( + "NMA_FieldParameters", sa.Column("nma_GlobalID", sa.UUID(), nullable=True) + ) + op.add_column( + "NMA_FieldParameters", + sa.Column("chemistry_sample_info_id", sa.Integer(), nullable=False), + ) + op.add_column( + "NMA_FieldParameters", sa.Column("nma_SamplePtID", sa.UUID(), nullable=True) + ) + op.add_column( + "NMA_FieldParameters", + sa.Column("nma_SamplePointID", sa.String(length=10), nullable=True), + ) + op.add_column( + "NMA_FieldParameters", sa.Column("nma_OBJECTID", sa.Integer(), nullable=True) + ) + op.add_column( + "NMA_FieldParameters", + sa.Column("nma_WCLab_ID", sa.String(length=25), nullable=True), + ) + op.drop_index(op.f("FieldParameters$GlobalID"), table_name="NMA_FieldParameters") + op.drop_index(op.f("FieldParameters$OBJECTID"), table_name="NMA_FieldParameters") + op.drop_index( + op.f("FieldParameters$SamplePointID"), table_name="NMA_FieldParameters" + ) + op.drop_index(op.f("FieldParameters$SamplePtID"), table_name="NMA_FieldParameters") + op.drop_index(op.f("FieldParameters$WCLab_ID"), table_name="NMA_FieldParameters") + op.drop_index( + op.f("FieldParameters$ChemistrySampleInfoFieldParameters"), + table_name="NMA_FieldParameters", + ) + op.create_index( + "FieldParameters$ChemistrySampleInfoFieldParameters", + "NMA_FieldParameters", + ["chemistry_sample_info_id"], + unique=False, + ) + op.create_index( + "FieldParameters$nma_GlobalID", + "NMA_FieldParameters", + ["nma_GlobalID"], + unique=True, + ) + op.create_index( + "FieldParameters$nma_OBJECTID", + "NMA_FieldParameters", + ["nma_OBJECTID"], + unique=True, + ) + op.create_index( + "FieldParameters$nma_SamplePointID", + "NMA_FieldParameters", + ["nma_SamplePointID"], + unique=False, + ) + op.create_index( + "FieldParameters$nma_WCLab_ID", + "NMA_FieldParameters", + ["nma_WCLab_ID"], + unique=False, + ) + op.create_unique_constraint(None, "NMA_FieldParameters", ["nma_GlobalID"]) + op.create_foreign_key( + None, + "NMA_FieldParameters", + "NMA_Chemistry_SampleInfo", + ["chemistry_sample_info_id"], + ["id"], + onupdate="CASCADE", + ondelete="CASCADE", + ) + op.drop_column("NMA_FieldParameters", "SamplePointID") + op.drop_column("NMA_FieldParameters", "SamplePtID") + op.drop_column("NMA_FieldParameters", "WCLab_ID") + op.drop_column("NMA_FieldParameters", "OBJECTID") + op.drop_column("NMA_FieldParameters", "GlobalID") # --- NMA_AssociatedData --- - op.add_column('NMA_AssociatedData', sa.Column('id', sa.Integer(), sa.Identity(always=False, start=1), nullable=False)) - op.add_column('NMA_AssociatedData', sa.Column('nma_AssocID', sa.UUID(), nullable=True)) - op.add_column('NMA_AssociatedData', sa.Column('nma_LocationId', sa.UUID(), nullable=True)) - op.add_column('NMA_AssociatedData', sa.Column('nma_PointID', sa.String(length=10), nullable=True)) - op.add_column('NMA_AssociatedData', sa.Column('nma_OBJECTID', sa.Integer(), nullable=True)) - op.drop_constraint(op.f('AssociatedData$LocationId'), 'NMA_AssociatedData', type_='unique') - op.drop_index(op.f('AssociatedData$PointID'), table_name='NMA_AssociatedData') - op.drop_constraint(op.f('NMA_AssociatedData_OBJECTID_key'), 'NMA_AssociatedData', type_='unique') - op.create_unique_constraint(None, 'NMA_AssociatedData', ['nma_LocationId']) - op.create_unique_constraint(None, 'NMA_AssociatedData', ['nma_AssocID']) - op.create_unique_constraint(None, 'NMA_AssociatedData', ['nma_OBJECTID']) - op.drop_column('NMA_AssociatedData', 'OBJECTID') - op.drop_column('NMA_AssociatedData', 'LocationId') - op.drop_column('NMA_AssociatedData', 'AssocID') - op.drop_column('NMA_AssociatedData', 'PointID') + op.add_column( + "NMA_AssociatedData", + sa.Column( + "id", sa.Integer(), sa.Identity(always=False, start=1), nullable=False + ), + ) + op.add_column( + "NMA_AssociatedData", sa.Column("nma_AssocID", sa.UUID(), nullable=True) + ) + op.add_column( + "NMA_AssociatedData", sa.Column("nma_LocationId", sa.UUID(), nullable=True) + ) + op.add_column( + "NMA_AssociatedData", + sa.Column("nma_PointID", sa.String(length=10), nullable=True), + ) + op.add_column( + "NMA_AssociatedData", sa.Column("nma_OBJECTID", sa.Integer(), nullable=True) + ) + op.drop_constraint( + op.f("AssociatedData$LocationId"), "NMA_AssociatedData", type_="unique" + ) + op.drop_index(op.f("AssociatedData$PointID"), table_name="NMA_AssociatedData") + op.drop_constraint( + op.f("NMA_AssociatedData_OBJECTID_key"), "NMA_AssociatedData", type_="unique" + ) + op.create_unique_constraint(None, "NMA_AssociatedData", ["nma_LocationId"]) + op.create_unique_constraint(None, "NMA_AssociatedData", ["nma_AssocID"]) + op.create_unique_constraint(None, "NMA_AssociatedData", ["nma_OBJECTID"]) + op.drop_column("NMA_AssociatedData", "OBJECTID") + op.drop_column("NMA_AssociatedData", "LocationId") + op.drop_column("NMA_AssociatedData", "AssocID") + op.drop_column("NMA_AssociatedData", "PointID") # --- NMA_HydraulicsData --- - op.add_column('NMA_HydraulicsData', sa.Column('id', sa.Integer(), sa.Identity(always=False, start=1), nullable=False)) - op.add_column('NMA_HydraulicsData', sa.Column('nma_GlobalID', sa.UUID(), nullable=True)) - op.add_column('NMA_HydraulicsData', sa.Column('nma_WellID', sa.UUID(), nullable=True)) - op.add_column('NMA_HydraulicsData', sa.Column('nma_PointID', sa.String(length=50), nullable=True)) - op.add_column('NMA_HydraulicsData', sa.Column('nma_OBJECTID', sa.Integer(), nullable=True)) - op.drop_index(op.f('ix_nma_hydraulicsdata_objectid'), table_name='NMA_HydraulicsData') - op.drop_index(op.f('ix_nma_hydraulicsdata_pointid'), table_name='NMA_HydraulicsData') - op.drop_index(op.f('ix_nma_hydraulicsdata_wellid'), table_name='NMA_HydraulicsData') - op.create_unique_constraint(None, 'NMA_HydraulicsData', ['nma_GlobalID']) - op.create_unique_constraint(None, 'NMA_HydraulicsData', ['nma_OBJECTID']) - op.drop_column('NMA_HydraulicsData', 'WellID') - op.drop_column('NMA_HydraulicsData', 'OBJECTID') - op.drop_column('NMA_HydraulicsData', 'PointID') - op.drop_column('NMA_HydraulicsData', 'GlobalID') + op.add_column( + "NMA_HydraulicsData", + sa.Column( + "id", sa.Integer(), sa.Identity(always=False, start=1), nullable=False + ), + ) + op.add_column( + "NMA_HydraulicsData", sa.Column("nma_GlobalID", sa.UUID(), nullable=True) + ) + op.add_column( + "NMA_HydraulicsData", sa.Column("nma_WellID", sa.UUID(), nullable=True) + ) + op.add_column( + "NMA_HydraulicsData", + sa.Column("nma_PointID", sa.String(length=50), nullable=True), + ) + op.add_column( + "NMA_HydraulicsData", sa.Column("nma_OBJECTID", sa.Integer(), nullable=True) + ) + op.drop_index( + op.f("ix_nma_hydraulicsdata_objectid"), table_name="NMA_HydraulicsData" + ) + op.drop_index( + op.f("ix_nma_hydraulicsdata_pointid"), table_name="NMA_HydraulicsData" + ) + op.drop_index(op.f("ix_nma_hydraulicsdata_wellid"), table_name="NMA_HydraulicsData") + op.create_unique_constraint(None, "NMA_HydraulicsData", ["nma_GlobalID"]) + op.create_unique_constraint(None, "NMA_HydraulicsData", ["nma_OBJECTID"]) + op.drop_column("NMA_HydraulicsData", "WellID") + op.drop_column("NMA_HydraulicsData", "OBJECTID") + op.drop_column("NMA_HydraulicsData", "PointID") + op.drop_column("NMA_HydraulicsData", "GlobalID") # --- NMA_MajorChemistry --- - op.add_column('NMA_MajorChemistry', sa.Column('id', sa.Integer(), sa.Identity(always=False, start=1), nullable=False)) - op.add_column('NMA_MajorChemistry', sa.Column('nma_GlobalID', sa.UUID(), nullable=True)) - op.add_column('NMA_MajorChemistry', sa.Column('chemistry_sample_info_id', sa.Integer(), nullable=False)) - op.add_column('NMA_MajorChemistry', sa.Column('nma_SamplePtID', sa.UUID(), nullable=True)) - op.add_column('NMA_MajorChemistry', sa.Column('nma_SamplePointID', sa.String(length=10), nullable=True)) - op.add_column('NMA_MajorChemistry', sa.Column('nma_OBJECTID', sa.Integer(), nullable=True)) - op.add_column('NMA_MajorChemistry', sa.Column('nma_WCLab_ID', sa.String(length=25), nullable=True)) - op.drop_index(op.f('MajorChemistry$AnalysesAgency'), table_name='NMA_MajorChemistry') - op.drop_index(op.f('MajorChemistry$Analyte'), table_name='NMA_MajorChemistry') - op.drop_index(op.f('MajorChemistry$Chemistry SampleInfoMajorChemistry'), table_name='NMA_MajorChemistry') - op.drop_index(op.f('MajorChemistry$SamplePointID'), table_name='NMA_MajorChemistry') - op.drop_index(op.f('MajorChemistry$SamplePointIDAnalyte'), table_name='NMA_MajorChemistry') - op.drop_index(op.f('MajorChemistry$SamplePtID'), table_name='NMA_MajorChemistry') - op.drop_index(op.f('MajorChemistry$WCLab_ID'), table_name='NMA_MajorChemistry') - op.drop_constraint(op.f('NMA_MajorChemistry_OBJECTID_key'), 'NMA_MajorChemistry', type_='unique') - op.create_unique_constraint(None, 'NMA_MajorChemistry', ['nma_GlobalID']) - op.create_unique_constraint(None, 'NMA_MajorChemistry', ['nma_OBJECTID']) - op.create_foreign_key(None, 'NMA_MajorChemistry', 'NMA_Chemistry_SampleInfo', ['chemistry_sample_info_id'], ['id'], ondelete='CASCADE') - op.drop_column('NMA_MajorChemistry', 'SamplePointID') - op.drop_column('NMA_MajorChemistry', 'SamplePtID') - op.drop_column('NMA_MajorChemistry', 'WCLab_ID') - op.drop_column('NMA_MajorChemistry', 'OBJECTID') - op.drop_column('NMA_MajorChemistry', 'GlobalID') + op.add_column( + "NMA_MajorChemistry", + sa.Column( + "id", sa.Integer(), sa.Identity(always=False, start=1), nullable=False + ), + ) + op.add_column( + "NMA_MajorChemistry", sa.Column("nma_GlobalID", sa.UUID(), nullable=True) + ) + op.add_column( + "NMA_MajorChemistry", + sa.Column("chemistry_sample_info_id", sa.Integer(), nullable=False), + ) + op.add_column( + "NMA_MajorChemistry", sa.Column("nma_SamplePtID", sa.UUID(), nullable=True) + ) + op.add_column( + "NMA_MajorChemistry", + sa.Column("nma_SamplePointID", sa.String(length=10), nullable=True), + ) + op.add_column( + "NMA_MajorChemistry", sa.Column("nma_OBJECTID", sa.Integer(), nullable=True) + ) + op.add_column( + "NMA_MajorChemistry", + sa.Column("nma_WCLab_ID", sa.String(length=25), nullable=True), + ) + op.drop_index( + op.f("MajorChemistry$AnalysesAgency"), table_name="NMA_MajorChemistry" + ) + op.drop_index(op.f("MajorChemistry$Analyte"), table_name="NMA_MajorChemistry") + op.drop_index( + op.f("MajorChemistry$Chemistry SampleInfoMajorChemistry"), + table_name="NMA_MajorChemistry", + ) + op.drop_index(op.f("MajorChemistry$SamplePointID"), table_name="NMA_MajorChemistry") + op.drop_index( + op.f("MajorChemistry$SamplePointIDAnalyte"), table_name="NMA_MajorChemistry" + ) + op.drop_index(op.f("MajorChemistry$SamplePtID"), table_name="NMA_MajorChemistry") + op.drop_index(op.f("MajorChemistry$WCLab_ID"), table_name="NMA_MajorChemistry") + op.drop_constraint( + op.f("NMA_MajorChemistry_OBJECTID_key"), "NMA_MajorChemistry", type_="unique" + ) + op.create_unique_constraint(None, "NMA_MajorChemistry", ["nma_GlobalID"]) + op.create_unique_constraint(None, "NMA_MajorChemistry", ["nma_OBJECTID"]) + op.create_foreign_key( + None, + "NMA_MajorChemistry", + "NMA_Chemistry_SampleInfo", + ["chemistry_sample_info_id"], + ["id"], + ondelete="CASCADE", + ) + op.drop_column("NMA_MajorChemistry", "SamplePointID") + op.drop_column("NMA_MajorChemistry", "SamplePtID") + op.drop_column("NMA_MajorChemistry", "WCLab_ID") + op.drop_column("NMA_MajorChemistry", "OBJECTID") + op.drop_column("NMA_MajorChemistry", "GlobalID") # --- NMA_MinorTraceChemistry --- - op.add_column('NMA_MinorTraceChemistry', sa.Column('id', sa.Integer(), sa.Identity(always=False, start=1), nullable=False)) - op.add_column('NMA_MinorTraceChemistry', sa.Column('nma_GlobalID', sa.UUID(), nullable=True)) - op.add_column('NMA_MinorTraceChemistry', sa.Column('nma_chemistry_sample_info_uuid', sa.UUID(), nullable=True)) - op.alter_column('NMA_MinorTraceChemistry', 'chemistry_sample_info_id', - existing_type=sa.UUID(), - type_=sa.Integer(), - nullable=False, - postgresql_using='NULL') - op.create_unique_constraint(None, 'NMA_MinorTraceChemistry', ['nma_GlobalID']) - op.create_foreign_key(None, 'NMA_MinorTraceChemistry', 'NMA_Chemistry_SampleInfo', ['chemistry_sample_info_id'], ['id'], ondelete='CASCADE') - op.drop_column('NMA_MinorTraceChemistry', 'GlobalID') + op.add_column( + "NMA_MinorTraceChemistry", + sa.Column( + "id", sa.Integer(), sa.Identity(always=False, start=1), nullable=False + ), + ) + op.add_column( + "NMA_MinorTraceChemistry", sa.Column("nma_GlobalID", sa.UUID(), nullable=True) + ) + op.add_column( + "NMA_MinorTraceChemistry", + sa.Column("nma_chemistry_sample_info_uuid", sa.UUID(), nullable=True), + ) + op.alter_column( + "NMA_MinorTraceChemistry", + "chemistry_sample_info_id", + existing_type=sa.UUID(), + type_=sa.Integer(), + nullable=False, + postgresql_using="NULL", + ) + op.create_unique_constraint(None, "NMA_MinorTraceChemistry", ["nma_GlobalID"]) + op.create_foreign_key( + None, + "NMA_MinorTraceChemistry", + "NMA_Chemistry_SampleInfo", + ["chemistry_sample_info_id"], + ["id"], + ondelete="CASCADE", + ) + op.drop_column("NMA_MinorTraceChemistry", "GlobalID") # --- NMA_Radionuclides --- - op.add_column('NMA_Radionuclides', sa.Column('id', sa.Integer(), sa.Identity(always=False, start=1), nullable=False)) - op.add_column('NMA_Radionuclides', sa.Column('nma_GlobalID', sa.UUID(), nullable=True)) - op.add_column('NMA_Radionuclides', sa.Column('chemistry_sample_info_id', sa.Integer(), nullable=False)) - op.add_column('NMA_Radionuclides', sa.Column('nma_SamplePtID', sa.UUID(), nullable=True)) - op.add_column('NMA_Radionuclides', sa.Column('nma_SamplePointID', sa.String(length=10), nullable=True)) - op.add_column('NMA_Radionuclides', sa.Column('nma_OBJECTID', sa.Integer(), nullable=True)) - op.add_column('NMA_Radionuclides', sa.Column('nma_WCLab_ID', sa.String(length=25), nullable=True)) - op.drop_constraint(op.f('NMA_Radionuclides_OBJECTID_key'), 'NMA_Radionuclides', type_='unique') - op.drop_index(op.f('Radionuclides$AnalysesAgency'), table_name='NMA_Radionuclides') - op.drop_index(op.f('Radionuclides$Analyte'), table_name='NMA_Radionuclides') - op.drop_index(op.f('Radionuclides$Chemistry SampleInfoRadionuclides'), table_name='NMA_Radionuclides') - op.drop_index(op.f('Radionuclides$SamplePointID'), table_name='NMA_Radionuclides') - op.drop_index(op.f('Radionuclides$SamplePtID'), table_name='NMA_Radionuclides') - op.drop_index(op.f('Radionuclides$WCLab_ID'), table_name='NMA_Radionuclides') - op.create_unique_constraint(None, 'NMA_Radionuclides', ['nma_GlobalID']) - op.create_unique_constraint(None, 'NMA_Radionuclides', ['nma_OBJECTID']) - op.create_foreign_key(None, 'NMA_Radionuclides', 'NMA_Chemistry_SampleInfo', ['chemistry_sample_info_id'], ['id'], ondelete='CASCADE') - op.drop_column('NMA_Radionuclides', 'SamplePointID') - op.drop_column('NMA_Radionuclides', 'SamplePtID') - op.drop_column('NMA_Radionuclides', 'WCLab_ID') - op.drop_column('NMA_Radionuclides', 'OBJECTID') - op.drop_column('NMA_Radionuclides', 'GlobalID') + op.add_column( + "NMA_Radionuclides", + sa.Column( + "id", sa.Integer(), sa.Identity(always=False, start=1), nullable=False + ), + ) + op.add_column( + "NMA_Radionuclides", sa.Column("nma_GlobalID", sa.UUID(), nullable=True) + ) + op.add_column( + "NMA_Radionuclides", + sa.Column("chemistry_sample_info_id", sa.Integer(), nullable=False), + ) + op.add_column( + "NMA_Radionuclides", sa.Column("nma_SamplePtID", sa.UUID(), nullable=True) + ) + op.add_column( + "NMA_Radionuclides", + sa.Column("nma_SamplePointID", sa.String(length=10), nullable=True), + ) + op.add_column( + "NMA_Radionuclides", sa.Column("nma_OBJECTID", sa.Integer(), nullable=True) + ) + op.add_column( + "NMA_Radionuclides", + sa.Column("nma_WCLab_ID", sa.String(length=25), nullable=True), + ) + op.drop_constraint( + op.f("NMA_Radionuclides_OBJECTID_key"), "NMA_Radionuclides", type_="unique" + ) + op.drop_index(op.f("Radionuclides$AnalysesAgency"), table_name="NMA_Radionuclides") + op.drop_index(op.f("Radionuclides$Analyte"), table_name="NMA_Radionuclides") + op.drop_index( + op.f("Radionuclides$Chemistry SampleInfoRadionuclides"), + table_name="NMA_Radionuclides", + ) + op.drop_index(op.f("Radionuclides$SamplePointID"), table_name="NMA_Radionuclides") + op.drop_index(op.f("Radionuclides$SamplePtID"), table_name="NMA_Radionuclides") + op.drop_index(op.f("Radionuclides$WCLab_ID"), table_name="NMA_Radionuclides") + op.create_unique_constraint(None, "NMA_Radionuclides", ["nma_GlobalID"]) + op.create_unique_constraint(None, "NMA_Radionuclides", ["nma_OBJECTID"]) + op.create_foreign_key( + None, + "NMA_Radionuclides", + "NMA_Chemistry_SampleInfo", + ["chemistry_sample_info_id"], + ["id"], + ondelete="CASCADE", + ) + op.drop_column("NMA_Radionuclides", "SamplePointID") + op.drop_column("NMA_Radionuclides", "SamplePtID") + op.drop_column("NMA_Radionuclides", "WCLab_ID") + op.drop_column("NMA_Radionuclides", "OBJECTID") + op.drop_column("NMA_Radionuclides", "GlobalID") # --- NMA_Soil_Rock_Results --- - op.add_column('NMA_Soil_Rock_Results', sa.Column('nma_Point_ID', sa.String(length=255), nullable=True)) - op.drop_index(op.f('Soil_Rock_Results$Point_ID'), table_name='NMA_Soil_Rock_Results') - op.drop_column('NMA_Soil_Rock_Results', 'Point_ID') + op.add_column( + "NMA_Soil_Rock_Results", + sa.Column("nma_Point_ID", sa.String(length=255), nullable=True), + ) + op.drop_index( + op.f("Soil_Rock_Results$Point_ID"), table_name="NMA_Soil_Rock_Results" + ) + op.drop_column("NMA_Soil_Rock_Results", "Point_ID") # --- NMA_Stratigraphy --- - op.add_column('NMA_Stratigraphy', sa.Column('id', sa.Integer(), sa.Identity(always=False, start=1), nullable=False)) - op.add_column('NMA_Stratigraphy', sa.Column('nma_GlobalID', sa.UUID(), nullable=True)) - op.add_column('NMA_Stratigraphy', sa.Column('nma_WellID', sa.UUID(), nullable=True)) - op.add_column('NMA_Stratigraphy', sa.Column('nma_PointID', sa.String(length=10), nullable=False)) - op.add_column('NMA_Stratigraphy', sa.Column('nma_OBJECTID', sa.Integer(), nullable=True)) - op.drop_constraint(op.f('NMA_Stratigraphy_OBJECTID_key'), 'NMA_Stratigraphy', type_='unique') - op.drop_index(op.f('ix_nma_stratigraphy_point_id'), table_name='NMA_Stratigraphy') - op.drop_index(op.f('ix_nma_stratigraphy_thing_id'), table_name='NMA_Stratigraphy') - op.create_unique_constraint(None, 'NMA_Stratigraphy', ['nma_GlobalID']) - op.create_unique_constraint(None, 'NMA_Stratigraphy', ['nma_OBJECTID']) - op.drop_column('NMA_Stratigraphy', 'OBJECTID') - op.drop_column('NMA_Stratigraphy', 'WellID') - op.drop_column('NMA_Stratigraphy', 'PointID') - op.drop_column('NMA_Stratigraphy', 'GlobalID') + op.add_column( + "NMA_Stratigraphy", + sa.Column( + "id", sa.Integer(), sa.Identity(always=False, start=1), nullable=False + ), + ) + op.add_column( + "NMA_Stratigraphy", sa.Column("nma_GlobalID", sa.UUID(), nullable=True) + ) + op.add_column("NMA_Stratigraphy", sa.Column("nma_WellID", sa.UUID(), nullable=True)) + op.add_column( + "NMA_Stratigraphy", + sa.Column("nma_PointID", sa.String(length=10), nullable=False), + ) + op.add_column( + "NMA_Stratigraphy", sa.Column("nma_OBJECTID", sa.Integer(), nullable=True) + ) + op.drop_constraint( + op.f("NMA_Stratigraphy_OBJECTID_key"), "NMA_Stratigraphy", type_="unique" + ) + op.drop_index(op.f("ix_nma_stratigraphy_point_id"), table_name="NMA_Stratigraphy") + op.drop_index(op.f("ix_nma_stratigraphy_thing_id"), table_name="NMA_Stratigraphy") + op.create_unique_constraint(None, "NMA_Stratigraphy", ["nma_GlobalID"]) + op.create_unique_constraint(None, "NMA_Stratigraphy", ["nma_OBJECTID"]) + op.drop_column("NMA_Stratigraphy", "OBJECTID") + op.drop_column("NMA_Stratigraphy", "WellID") + op.drop_column("NMA_Stratigraphy", "PointID") + op.drop_column("NMA_Stratigraphy", "GlobalID") # --- Other tables (index/constraint cleanup from autogenerate) --- - op.drop_index(op.f('SurfaceWaterPhotos$PointID'), table_name='NMA_SurfaceWaterPhotos') - op.drop_index(op.f('SurfaceWaterPhotos$SurfaceID'), table_name='NMA_SurfaceWaterPhotos') - op.drop_constraint(op.f('uq_nma_pressure_daily_globalid'), 'NMA_WaterLevelsContinuous_Pressure_Daily', type_='unique') - op.drop_index(op.f('WeatherPhotos$PointID'), table_name='NMA_WeatherPhotos') - op.drop_index(op.f('WeatherPhotos$WeatherID'), table_name='NMA_WeatherPhotos') - op.alter_column('NMA_view_NGWMN_Lithology', 'PointID', - existing_type=sa.VARCHAR(length=50), - nullable=False) - op.drop_constraint(op.f('uq_nma_view_ngwmn_lithology_objectid'), 'NMA_view_NGWMN_Lithology', type_='unique') - op.drop_constraint(op.f('uq_nma_view_ngwmn_waterlevels_point_date'), 'NMA_view_NGWMN_WaterLevels', type_='unique') - op.alter_column('NMA_view_NGWMN_WellConstruction', 'PointID', - existing_type=sa.VARCHAR(length=50), - nullable=False) - op.drop_constraint(op.f('uq_nma_view_ngwmn_wellconstruction_point_casing_screen'), 'NMA_view_NGWMN_WellConstruction', type_='unique') - op.alter_column('thing', 'nma_formation_zone', - existing_type=sa.VARCHAR(length=25), - comment='Raw FormationZone value from legacy WellData (NM_Aquifer).', - existing_nullable=True) - op.alter_column('thing_version', 'nma_pk_location', - existing_type=sa.VARCHAR(), - comment='To audit the original NM_Aquifer LocationID if it was transferred over', - existing_nullable=True, - autoincrement=False) - op.alter_column('thing_version', 'nma_formation_zone', - existing_type=sa.VARCHAR(length=25), - comment='Raw FormationZone value from legacy WellData (NM_Aquifer).', - existing_nullable=True, - autoincrement=False) - op.alter_column('transducer_observation', 'nma_waterlevelscontinuous_pressure_created', - existing_type=postgresql.TIMESTAMP(), - type_=sa.DateTime(timezone=True), - existing_nullable=True) - op.alter_column('transducer_observation', 'nma_waterlevelscontinuous_pressure_updated', - existing_type=postgresql.TIMESTAMP(), - type_=sa.DateTime(timezone=True), - existing_nullable=True) + op.drop_index( + op.f("SurfaceWaterPhotos$PointID"), table_name="NMA_SurfaceWaterPhotos" + ) + op.drop_index( + op.f("SurfaceWaterPhotos$SurfaceID"), table_name="NMA_SurfaceWaterPhotos" + ) + op.drop_constraint( + op.f("uq_nma_pressure_daily_globalid"), + "NMA_WaterLevelsContinuous_Pressure_Daily", + type_="unique", + ) + op.drop_index(op.f("WeatherPhotos$PointID"), table_name="NMA_WeatherPhotos") + op.drop_index(op.f("WeatherPhotos$WeatherID"), table_name="NMA_WeatherPhotos") + op.alter_column( + "NMA_view_NGWMN_Lithology", + "PointID", + existing_type=sa.VARCHAR(length=50), + nullable=False, + ) + op.drop_constraint( + op.f("uq_nma_view_ngwmn_lithology_objectid"), + "NMA_view_NGWMN_Lithology", + type_="unique", + ) + op.drop_constraint( + op.f("uq_nma_view_ngwmn_waterlevels_point_date"), + "NMA_view_NGWMN_WaterLevels", + type_="unique", + ) + op.alter_column( + "NMA_view_NGWMN_WellConstruction", + "PointID", + existing_type=sa.VARCHAR(length=50), + nullable=False, + ) + op.drop_constraint( + op.f("uq_nma_view_ngwmn_wellconstruction_point_casing_screen"), + "NMA_view_NGWMN_WellConstruction", + type_="unique", + ) + op.alter_column( + "thing", + "nma_formation_zone", + existing_type=sa.VARCHAR(length=25), + comment="Raw FormationZone value from legacy WellData (NM_Aquifer).", + existing_nullable=True, + ) + op.alter_column( + "thing_version", + "nma_pk_location", + existing_type=sa.VARCHAR(), + comment="To audit the original NM_Aquifer LocationID if it was transferred over", + existing_nullable=True, + autoincrement=False, + ) + op.alter_column( + "thing_version", + "nma_formation_zone", + existing_type=sa.VARCHAR(length=25), + comment="Raw FormationZone value from legacy WellData (NM_Aquifer).", + existing_nullable=True, + autoincrement=False, + ) + op.alter_column( + "transducer_observation", + "nma_waterlevelscontinuous_pressure_created", + existing_type=postgresql.TIMESTAMP(), + type_=sa.DateTime(timezone=True), + existing_nullable=True, + ) + op.alter_column( + "transducer_observation", + "nma_waterlevelscontinuous_pressure_updated", + existing_type=postgresql.TIMESTAMP(), + type_=sa.DateTime(timezone=True), + existing_nullable=True, + ) def downgrade() -> None: """Downgrade schema.""" - op.alter_column('transducer_observation', 'nma_waterlevelscontinuous_pressure_updated', - existing_type=sa.DateTime(timezone=True), - type_=postgresql.TIMESTAMP(), - existing_nullable=True) - op.alter_column('transducer_observation', 'nma_waterlevelscontinuous_pressure_created', - existing_type=sa.DateTime(timezone=True), - type_=postgresql.TIMESTAMP(), - existing_nullable=True) - op.alter_column('thing_version', 'nma_formation_zone', - existing_type=sa.VARCHAR(length=25), - comment=None, - existing_comment='Raw FormationZone value from legacy WellData (NM_Aquifer).', - existing_nullable=True, - autoincrement=False) - op.alter_column('thing_version', 'nma_pk_location', - existing_type=sa.VARCHAR(), - comment=None, - existing_comment='To audit the original NM_Aquifer LocationID if it was transferred over', - existing_nullable=True, - autoincrement=False) - op.alter_column('thing', 'nma_formation_zone', - existing_type=sa.VARCHAR(length=25), - comment=None, - existing_comment='Raw FormationZone value from legacy WellData (NM_Aquifer).', - existing_nullable=True) - op.create_unique_constraint(op.f('uq_nma_view_ngwmn_wellconstruction_point_casing_screen'), 'NMA_view_NGWMN_WellConstruction', ['PointID', 'CasingTop', 'ScreenTop'], postgresql_nulls_not_distinct=False) - op.alter_column('NMA_view_NGWMN_WellConstruction', 'PointID', - existing_type=sa.VARCHAR(length=50), - nullable=True) - op.create_unique_constraint(op.f('uq_nma_view_ngwmn_waterlevels_point_date'), 'NMA_view_NGWMN_WaterLevels', ['PointID', 'DateMeasured'], postgresql_nulls_not_distinct=False) - op.create_unique_constraint(op.f('uq_nma_view_ngwmn_lithology_objectid'), 'NMA_view_NGWMN_Lithology', ['OBJECTID'], postgresql_nulls_not_distinct=False) - op.alter_column('NMA_view_NGWMN_Lithology', 'PointID', - existing_type=sa.VARCHAR(length=50), - nullable=True) - op.create_index(op.f('WeatherPhotos$WeatherID'), 'NMA_WeatherPhotos', ['WeatherID'], unique=False) - op.create_index(op.f('WeatherPhotos$PointID'), 'NMA_WeatherPhotos', ['PointID'], unique=False) - op.create_unique_constraint(op.f('uq_nma_pressure_daily_globalid'), 'NMA_WaterLevelsContinuous_Pressure_Daily', ['GlobalID'], postgresql_nulls_not_distinct=False) - op.create_index(op.f('SurfaceWaterPhotos$SurfaceID'), 'NMA_SurfaceWaterPhotos', ['SurfaceID'], unique=False) - op.create_index(op.f('SurfaceWaterPhotos$PointID'), 'NMA_SurfaceWaterPhotos', ['PointID'], unique=False) - op.add_column('NMA_Stratigraphy', sa.Column('GlobalID', sa.UUID(), autoincrement=False, nullable=False)) - op.add_column('NMA_Stratigraphy', sa.Column('PointID', sa.VARCHAR(length=10), autoincrement=False, nullable=False)) - op.add_column('NMA_Stratigraphy', sa.Column('WellID', sa.UUID(), autoincrement=False, nullable=True)) - op.add_column('NMA_Stratigraphy', sa.Column('OBJECTID', sa.INTEGER(), autoincrement=False, nullable=True)) - op.drop_constraint(None, 'NMA_Stratigraphy', type_='unique') - op.drop_constraint(None, 'NMA_Stratigraphy', type_='unique') - op.create_index(op.f('ix_nma_stratigraphy_thing_id'), 'NMA_Stratigraphy', ['thing_id'], unique=False) - op.create_index(op.f('ix_nma_stratigraphy_point_id'), 'NMA_Stratigraphy', ['PointID'], unique=False) - op.create_unique_constraint(op.f('NMA_Stratigraphy_OBJECTID_key'), 'NMA_Stratigraphy', ['OBJECTID'], postgresql_nulls_not_distinct=False) - op.drop_column('NMA_Stratigraphy', 'nma_OBJECTID') - op.drop_column('NMA_Stratigraphy', 'nma_PointID') - op.drop_column('NMA_Stratigraphy', 'nma_WellID') - op.drop_column('NMA_Stratigraphy', 'nma_GlobalID') - op.drop_column('NMA_Stratigraphy', 'id') - op.add_column('NMA_Soil_Rock_Results', sa.Column('Point_ID', sa.VARCHAR(length=255), autoincrement=False, nullable=True)) - op.create_index(op.f('Soil_Rock_Results$Point_ID'), 'NMA_Soil_Rock_Results', ['Point_ID'], unique=False) - op.drop_column('NMA_Soil_Rock_Results', 'nma_Point_ID') - op.add_column('NMA_Radionuclides', sa.Column('GlobalID', sa.UUID(), autoincrement=False, nullable=False)) - op.add_column('NMA_Radionuclides', sa.Column('OBJECTID', sa.INTEGER(), autoincrement=False, nullable=True)) - op.add_column('NMA_Radionuclides', sa.Column('WCLab_ID', sa.VARCHAR(length=25), autoincrement=False, nullable=True)) - op.add_column('NMA_Radionuclides', sa.Column('SamplePtID', sa.UUID(), autoincrement=False, nullable=False)) - op.add_column('NMA_Radionuclides', sa.Column('SamplePointID', sa.VARCHAR(length=10), autoincrement=False, nullable=True)) - op.drop_constraint(None, 'NMA_Radionuclides', type_='foreignkey') - op.create_foreign_key(op.f('NMA_Radionuclides_SamplePtID_fkey'), 'NMA_Radionuclides', 'NMA_Chemistry_SampleInfo', ['SamplePtID'], ['SamplePtID'], ondelete='CASCADE') - op.drop_constraint(None, 'NMA_Radionuclides', type_='unique') - op.drop_constraint(None, 'NMA_Radionuclides', type_='unique') - op.create_index(op.f('Radionuclides$WCLab_ID'), 'NMA_Radionuclides', ['WCLab_ID'], unique=False) - op.create_index(op.f('Radionuclides$SamplePtID'), 'NMA_Radionuclides', ['SamplePtID'], unique=False) - op.create_index(op.f('Radionuclides$SamplePointID'), 'NMA_Radionuclides', ['SamplePointID'], unique=False) - op.create_index(op.f('Radionuclides$Chemistry SampleInfoRadionuclides'), 'NMA_Radionuclides', ['SamplePtID'], unique=False) - op.create_index(op.f('Radionuclides$Analyte'), 'NMA_Radionuclides', ['Analyte'], unique=False) - op.create_index(op.f('Radionuclides$AnalysesAgency'), 'NMA_Radionuclides', ['AnalysesAgency'], unique=False) - op.create_unique_constraint(op.f('NMA_Radionuclides_OBJECTID_key'), 'NMA_Radionuclides', ['OBJECTID'], postgresql_nulls_not_distinct=False) - op.drop_column('NMA_Radionuclides', 'nma_WCLab_ID') - op.drop_column('NMA_Radionuclides', 'nma_OBJECTID') - op.drop_column('NMA_Radionuclides', 'nma_SamplePointID') - op.drop_column('NMA_Radionuclides', 'nma_SamplePtID') - op.drop_column('NMA_Radionuclides', 'chemistry_sample_info_id') - op.drop_column('NMA_Radionuclides', 'nma_GlobalID') - op.drop_column('NMA_Radionuclides', 'id') - op.add_column('NMA_MinorTraceChemistry', sa.Column('GlobalID', sa.UUID(), autoincrement=False, nullable=False)) - op.drop_constraint(None, 'NMA_MinorTraceChemistry', type_='foreignkey') - op.create_foreign_key(op.f('NMA_MinorTraceChemistry_chemistry_sample_info_id_fkey'), 'NMA_MinorTraceChemistry', 'NMA_Chemistry_SampleInfo', ['chemistry_sample_info_id'], ['SamplePtID'], ondelete='CASCADE') - op.drop_constraint(None, 'NMA_MinorTraceChemistry', type_='unique') - op.alter_column('NMA_MinorTraceChemistry', 'chemistry_sample_info_id', - existing_type=sa.Integer(), - type_=sa.UUID(), - existing_nullable=False) - op.drop_column('NMA_MinorTraceChemistry', 'nma_chemistry_sample_info_uuid') - op.drop_column('NMA_MinorTraceChemistry', 'nma_GlobalID') - op.drop_column('NMA_MinorTraceChemistry', 'id') - op.add_column('NMA_MajorChemistry', sa.Column('GlobalID', sa.UUID(), autoincrement=False, nullable=False)) - op.add_column('NMA_MajorChemistry', sa.Column('OBJECTID', sa.INTEGER(), autoincrement=False, nullable=True)) - op.add_column('NMA_MajorChemistry', sa.Column('WCLab_ID', sa.VARCHAR(length=25), autoincrement=False, nullable=True)) - op.add_column('NMA_MajorChemistry', sa.Column('SamplePtID', sa.UUID(), autoincrement=False, nullable=False)) - op.add_column('NMA_MajorChemistry', sa.Column('SamplePointID', sa.VARCHAR(length=10), autoincrement=False, nullable=True)) - op.drop_constraint(None, 'NMA_MajorChemistry', type_='foreignkey') - op.create_foreign_key(op.f('NMA_MajorChemistry_SamplePtID_fkey'), 'NMA_MajorChemistry', 'NMA_Chemistry_SampleInfo', ['SamplePtID'], ['SamplePtID'], ondelete='CASCADE') - op.drop_constraint(None, 'NMA_MajorChemistry', type_='unique') - op.drop_constraint(None, 'NMA_MajorChemistry', type_='unique') - op.create_unique_constraint(op.f('NMA_MajorChemistry_OBJECTID_key'), 'NMA_MajorChemistry', ['OBJECTID'], postgresql_nulls_not_distinct=False) - op.create_index(op.f('MajorChemistry$WCLab_ID'), 'NMA_MajorChemistry', ['WCLab_ID'], unique=False) - op.create_index(op.f('MajorChemistry$SamplePtID'), 'NMA_MajorChemistry', ['SamplePtID'], unique=False) - op.create_index(op.f('MajorChemistry$SamplePointIDAnalyte'), 'NMA_MajorChemistry', ['SamplePointID', 'Analyte'], unique=False) - op.create_index(op.f('MajorChemistry$SamplePointID'), 'NMA_MajorChemistry', ['SamplePointID'], unique=False) - op.create_index(op.f('MajorChemistry$Chemistry SampleInfoMajorChemistry'), 'NMA_MajorChemistry', ['SamplePtID'], unique=False) - op.create_index(op.f('MajorChemistry$Analyte'), 'NMA_MajorChemistry', ['Analyte'], unique=False) - op.create_index(op.f('MajorChemistry$AnalysesAgency'), 'NMA_MajorChemistry', ['AnalysesAgency'], unique=False) - op.drop_column('NMA_MajorChemistry', 'nma_WCLab_ID') - op.drop_column('NMA_MajorChemistry', 'nma_OBJECTID') - op.drop_column('NMA_MajorChemistry', 'nma_SamplePointID') - op.drop_column('NMA_MajorChemistry', 'nma_SamplePtID') - op.drop_column('NMA_MajorChemistry', 'chemistry_sample_info_id') - op.drop_column('NMA_MajorChemistry', 'nma_GlobalID') - op.drop_column('NMA_MajorChemistry', 'id') - op.add_column('NMA_HydraulicsData', sa.Column('GlobalID', sa.UUID(), autoincrement=False, nullable=False)) - op.add_column('NMA_HydraulicsData', sa.Column('PointID', sa.VARCHAR(length=50), autoincrement=False, nullable=True)) - op.add_column('NMA_HydraulicsData', sa.Column('OBJECTID', sa.INTEGER(), autoincrement=False, nullable=True)) - op.add_column('NMA_HydraulicsData', sa.Column('WellID', sa.UUID(), autoincrement=False, nullable=True)) - op.drop_constraint(None, 'NMA_HydraulicsData', type_='unique') - op.drop_constraint(None, 'NMA_HydraulicsData', type_='unique') - op.create_index(op.f('ix_nma_hydraulicsdata_wellid'), 'NMA_HydraulicsData', ['WellID'], unique=False) - op.create_index(op.f('ix_nma_hydraulicsdata_pointid'), 'NMA_HydraulicsData', ['PointID'], unique=False) - op.create_index(op.f('ix_nma_hydraulicsdata_objectid'), 'NMA_HydraulicsData', ['OBJECTID'], unique=True) - op.drop_column('NMA_HydraulicsData', 'nma_OBJECTID') - op.drop_column('NMA_HydraulicsData', 'nma_PointID') - op.drop_column('NMA_HydraulicsData', 'nma_WellID') - op.drop_column('NMA_HydraulicsData', 'nma_GlobalID') - op.drop_column('NMA_HydraulicsData', 'id') - op.add_column('NMA_FieldParameters', sa.Column('GlobalID', sa.UUID(), autoincrement=False, nullable=False)) - op.add_column('NMA_FieldParameters', sa.Column('OBJECTID', sa.INTEGER(), sa.Identity(always=False, start=1, increment=1, minvalue=1, maxvalue=2147483647, cycle=False, cache=1), autoincrement=True, nullable=False)) - op.add_column('NMA_FieldParameters', sa.Column('WCLab_ID', sa.VARCHAR(length=25), autoincrement=False, nullable=True)) - op.add_column('NMA_FieldParameters', sa.Column('SamplePtID', sa.UUID(), autoincrement=False, nullable=False)) - op.add_column('NMA_FieldParameters', sa.Column('SamplePointID', sa.VARCHAR(length=10), autoincrement=False, nullable=True)) - op.drop_constraint(None, 'NMA_FieldParameters', type_='foreignkey') - op.create_foreign_key(op.f('NMA_FieldParameters_SamplePtID_fkey'), 'NMA_FieldParameters', 'NMA_Chemistry_SampleInfo', ['SamplePtID'], ['SamplePtID'], onupdate='CASCADE', ondelete='CASCADE') - op.drop_constraint(None, 'NMA_FieldParameters', type_='unique') - op.drop_index('FieldParameters$nma_WCLab_ID', table_name='NMA_FieldParameters') - op.drop_index('FieldParameters$nma_SamplePointID', table_name='NMA_FieldParameters') - op.drop_index('FieldParameters$nma_OBJECTID', table_name='NMA_FieldParameters') - op.drop_index('FieldParameters$nma_GlobalID', table_name='NMA_FieldParameters') - op.drop_index('FieldParameters$ChemistrySampleInfoFieldParameters', table_name='NMA_FieldParameters') - op.create_index(op.f('FieldParameters$ChemistrySampleInfoFieldParameters'), 'NMA_FieldParameters', ['SamplePtID'], unique=False) - op.create_index(op.f('FieldParameters$WCLab_ID'), 'NMA_FieldParameters', ['WCLab_ID'], unique=False) - op.create_index(op.f('FieldParameters$SamplePtID'), 'NMA_FieldParameters', ['SamplePtID'], unique=False) - op.create_index(op.f('FieldParameters$SamplePointID'), 'NMA_FieldParameters', ['SamplePointID'], unique=False) - op.create_index(op.f('FieldParameters$OBJECTID'), 'NMA_FieldParameters', ['OBJECTID'], unique=True) - op.create_index(op.f('FieldParameters$GlobalID'), 'NMA_FieldParameters', ['GlobalID'], unique=True) - op.drop_column('NMA_FieldParameters', 'nma_WCLab_ID') - op.drop_column('NMA_FieldParameters', 'nma_OBJECTID') - op.drop_column('NMA_FieldParameters', 'nma_SamplePointID') - op.drop_column('NMA_FieldParameters', 'nma_SamplePtID') - op.drop_column('NMA_FieldParameters', 'chemistry_sample_info_id') - op.drop_column('NMA_FieldParameters', 'nma_GlobalID') - op.drop_column('NMA_FieldParameters', 'id') - op.add_column('NMA_Chemistry_SampleInfo', sa.Column('LocationId', sa.UUID(), autoincrement=False, nullable=True)) - op.add_column('NMA_Chemistry_SampleInfo', sa.Column('OBJECTID', sa.INTEGER(), autoincrement=False, nullable=True)) - op.add_column('NMA_Chemistry_SampleInfo', sa.Column('WCLab_ID', sa.VARCHAR(length=18), autoincrement=False, nullable=True)) - op.add_column('NMA_Chemistry_SampleInfo', sa.Column('SamplePtID', sa.UUID(), autoincrement=False, nullable=False)) - op.add_column('NMA_Chemistry_SampleInfo', sa.Column('SamplePointID', sa.VARCHAR(length=10), autoincrement=False, nullable=False)) - op.drop_constraint(None, 'NMA_Chemistry_SampleInfo', type_='unique') - op.drop_constraint(None, 'NMA_Chemistry_SampleInfo', type_='unique') - op.create_unique_constraint(op.f('NMA_Chemistry_SampleInfo_OBJECTID_key'), 'NMA_Chemistry_SampleInfo', ['OBJECTID'], postgresql_nulls_not_distinct=False) - op.drop_column('NMA_Chemistry_SampleInfo', 'nma_LocationId') - op.drop_column('NMA_Chemistry_SampleInfo', 'nma_OBJECTID') - op.drop_column('NMA_Chemistry_SampleInfo', 'nma_SamplePointID') - op.drop_column('NMA_Chemistry_SampleInfo', 'nma_WCLab_ID') - op.drop_column('NMA_Chemistry_SampleInfo', 'nma_SamplePtID') - op.drop_column('NMA_Chemistry_SampleInfo', 'id') - op.add_column('NMA_AssociatedData', sa.Column('PointID', sa.VARCHAR(length=10), autoincrement=False, nullable=True)) - op.add_column('NMA_AssociatedData', sa.Column('AssocID', sa.UUID(), autoincrement=False, nullable=False)) - op.add_column('NMA_AssociatedData', sa.Column('LocationId', sa.UUID(), autoincrement=False, nullable=True)) - op.add_column('NMA_AssociatedData', sa.Column('OBJECTID', sa.INTEGER(), autoincrement=False, nullable=True)) - op.drop_constraint(None, 'NMA_AssociatedData', type_='unique') - op.drop_constraint(None, 'NMA_AssociatedData', type_='unique') - op.drop_constraint(None, 'NMA_AssociatedData', type_='unique') - op.create_unique_constraint(op.f('NMA_AssociatedData_OBJECTID_key'), 'NMA_AssociatedData', ['OBJECTID'], postgresql_nulls_not_distinct=False) - op.create_index(op.f('AssociatedData$PointID'), 'NMA_AssociatedData', ['PointID'], unique=False) - op.create_unique_constraint(op.f('AssociatedData$LocationId'), 'NMA_AssociatedData', ['LocationId'], postgresql_nulls_not_distinct=False) - op.drop_column('NMA_AssociatedData', 'nma_OBJECTID') - op.drop_column('NMA_AssociatedData', 'nma_PointID') - op.drop_column('NMA_AssociatedData', 'nma_LocationId') - op.drop_column('NMA_AssociatedData', 'nma_AssocID') - op.drop_column('NMA_AssociatedData', 'id') + op.alter_column( + "transducer_observation", + "nma_waterlevelscontinuous_pressure_updated", + existing_type=sa.DateTime(timezone=True), + type_=postgresql.TIMESTAMP(), + existing_nullable=True, + ) + op.alter_column( + "transducer_observation", + "nma_waterlevelscontinuous_pressure_created", + existing_type=sa.DateTime(timezone=True), + type_=postgresql.TIMESTAMP(), + existing_nullable=True, + ) + op.alter_column( + "thing_version", + "nma_formation_zone", + existing_type=sa.VARCHAR(length=25), + comment=None, + existing_comment="Raw FormationZone value from legacy WellData (NM_Aquifer).", + existing_nullable=True, + autoincrement=False, + ) + op.alter_column( + "thing_version", + "nma_pk_location", + existing_type=sa.VARCHAR(), + comment=None, + existing_comment="To audit the original NM_Aquifer LocationID if it was transferred over", + existing_nullable=True, + autoincrement=False, + ) + op.alter_column( + "thing", + "nma_formation_zone", + existing_type=sa.VARCHAR(length=25), + comment=None, + existing_comment="Raw FormationZone value from legacy WellData (NM_Aquifer).", + existing_nullable=True, + ) + op.create_unique_constraint( + op.f("uq_nma_view_ngwmn_wellconstruction_point_casing_screen"), + "NMA_view_NGWMN_WellConstruction", + ["PointID", "CasingTop", "ScreenTop"], + postgresql_nulls_not_distinct=False, + ) + op.alter_column( + "NMA_view_NGWMN_WellConstruction", + "PointID", + existing_type=sa.VARCHAR(length=50), + nullable=True, + ) + op.create_unique_constraint( + op.f("uq_nma_view_ngwmn_waterlevels_point_date"), + "NMA_view_NGWMN_WaterLevels", + ["PointID", "DateMeasured"], + postgresql_nulls_not_distinct=False, + ) + op.create_unique_constraint( + op.f("uq_nma_view_ngwmn_lithology_objectid"), + "NMA_view_NGWMN_Lithology", + ["OBJECTID"], + postgresql_nulls_not_distinct=False, + ) + op.alter_column( + "NMA_view_NGWMN_Lithology", + "PointID", + existing_type=sa.VARCHAR(length=50), + nullable=True, + ) + op.create_index( + op.f("WeatherPhotos$WeatherID"), + "NMA_WeatherPhotos", + ["WeatherID"], + unique=False, + ) + op.create_index( + op.f("WeatherPhotos$PointID"), "NMA_WeatherPhotos", ["PointID"], unique=False + ) + op.create_unique_constraint( + op.f("uq_nma_pressure_daily_globalid"), + "NMA_WaterLevelsContinuous_Pressure_Daily", + ["GlobalID"], + postgresql_nulls_not_distinct=False, + ) + op.create_index( + op.f("SurfaceWaterPhotos$SurfaceID"), + "NMA_SurfaceWaterPhotos", + ["SurfaceID"], + unique=False, + ) + op.create_index( + op.f("SurfaceWaterPhotos$PointID"), + "NMA_SurfaceWaterPhotos", + ["PointID"], + unique=False, + ) + op.add_column( + "NMA_Stratigraphy", + sa.Column("GlobalID", sa.UUID(), autoincrement=False, nullable=False), + ) + op.add_column( + "NMA_Stratigraphy", + sa.Column( + "PointID", sa.VARCHAR(length=10), autoincrement=False, nullable=False + ), + ) + op.add_column( + "NMA_Stratigraphy", + sa.Column("WellID", sa.UUID(), autoincrement=False, nullable=True), + ) + op.add_column( + "NMA_Stratigraphy", + sa.Column("OBJECTID", sa.INTEGER(), autoincrement=False, nullable=True), + ) + op.drop_constraint(None, "NMA_Stratigraphy", type_="unique") + op.drop_constraint(None, "NMA_Stratigraphy", type_="unique") + op.create_index( + op.f("ix_nma_stratigraphy_thing_id"), + "NMA_Stratigraphy", + ["thing_id"], + unique=False, + ) + op.create_index( + op.f("ix_nma_stratigraphy_point_id"), + "NMA_Stratigraphy", + ["PointID"], + unique=False, + ) + op.create_unique_constraint( + op.f("NMA_Stratigraphy_OBJECTID_key"), + "NMA_Stratigraphy", + ["OBJECTID"], + postgresql_nulls_not_distinct=False, + ) + op.drop_column("NMA_Stratigraphy", "nma_OBJECTID") + op.drop_column("NMA_Stratigraphy", "nma_PointID") + op.drop_column("NMA_Stratigraphy", "nma_WellID") + op.drop_column("NMA_Stratigraphy", "nma_GlobalID") + op.drop_column("NMA_Stratigraphy", "id") + op.add_column( + "NMA_Soil_Rock_Results", + sa.Column( + "Point_ID", sa.VARCHAR(length=255), autoincrement=False, nullable=True + ), + ) + op.create_index( + op.f("Soil_Rock_Results$Point_ID"), + "NMA_Soil_Rock_Results", + ["Point_ID"], + unique=False, + ) + op.drop_column("NMA_Soil_Rock_Results", "nma_Point_ID") + op.add_column( + "NMA_Radionuclides", + sa.Column("GlobalID", sa.UUID(), autoincrement=False, nullable=False), + ) + op.add_column( + "NMA_Radionuclides", + sa.Column("OBJECTID", sa.INTEGER(), autoincrement=False, nullable=True), + ) + op.add_column( + "NMA_Radionuclides", + sa.Column( + "WCLab_ID", sa.VARCHAR(length=25), autoincrement=False, nullable=True + ), + ) + op.add_column( + "NMA_Radionuclides", + sa.Column("SamplePtID", sa.UUID(), autoincrement=False, nullable=False), + ) + op.add_column( + "NMA_Radionuclides", + sa.Column( + "SamplePointID", sa.VARCHAR(length=10), autoincrement=False, nullable=True + ), + ) + op.drop_constraint(None, "NMA_Radionuclides", type_="foreignkey") + op.create_foreign_key( + op.f("NMA_Radionuclides_SamplePtID_fkey"), + "NMA_Radionuclides", + "NMA_Chemistry_SampleInfo", + ["SamplePtID"], + ["SamplePtID"], + ondelete="CASCADE", + ) + op.drop_constraint(None, "NMA_Radionuclides", type_="unique") + op.drop_constraint(None, "NMA_Radionuclides", type_="unique") + op.create_index( + op.f("Radionuclides$WCLab_ID"), "NMA_Radionuclides", ["WCLab_ID"], unique=False + ) + op.create_index( + op.f("Radionuclides$SamplePtID"), + "NMA_Radionuclides", + ["SamplePtID"], + unique=False, + ) + op.create_index( + op.f("Radionuclides$SamplePointID"), + "NMA_Radionuclides", + ["SamplePointID"], + unique=False, + ) + op.create_index( + op.f("Radionuclides$Chemistry SampleInfoRadionuclides"), + "NMA_Radionuclides", + ["SamplePtID"], + unique=False, + ) + op.create_index( + op.f("Radionuclides$Analyte"), "NMA_Radionuclides", ["Analyte"], unique=False + ) + op.create_index( + op.f("Radionuclides$AnalysesAgency"), + "NMA_Radionuclides", + ["AnalysesAgency"], + unique=False, + ) + op.create_unique_constraint( + op.f("NMA_Radionuclides_OBJECTID_key"), + "NMA_Radionuclides", + ["OBJECTID"], + postgresql_nulls_not_distinct=False, + ) + op.drop_column("NMA_Radionuclides", "nma_WCLab_ID") + op.drop_column("NMA_Radionuclides", "nma_OBJECTID") + op.drop_column("NMA_Radionuclides", "nma_SamplePointID") + op.drop_column("NMA_Radionuclides", "nma_SamplePtID") + op.drop_column("NMA_Radionuclides", "chemistry_sample_info_id") + op.drop_column("NMA_Radionuclides", "nma_GlobalID") + op.drop_column("NMA_Radionuclides", "id") + op.add_column( + "NMA_MinorTraceChemistry", + sa.Column("GlobalID", sa.UUID(), autoincrement=False, nullable=False), + ) + op.drop_constraint(None, "NMA_MinorTraceChemistry", type_="foreignkey") + op.create_foreign_key( + op.f("NMA_MinorTraceChemistry_chemistry_sample_info_id_fkey"), + "NMA_MinorTraceChemistry", + "NMA_Chemistry_SampleInfo", + ["chemistry_sample_info_id"], + ["SamplePtID"], + ondelete="CASCADE", + ) + op.drop_constraint(None, "NMA_MinorTraceChemistry", type_="unique") + op.alter_column( + "NMA_MinorTraceChemistry", + "chemistry_sample_info_id", + existing_type=sa.Integer(), + type_=sa.UUID(), + existing_nullable=False, + ) + op.drop_column("NMA_MinorTraceChemistry", "nma_chemistry_sample_info_uuid") + op.drop_column("NMA_MinorTraceChemistry", "nma_GlobalID") + op.drop_column("NMA_MinorTraceChemistry", "id") + op.add_column( + "NMA_MajorChemistry", + sa.Column("GlobalID", sa.UUID(), autoincrement=False, nullable=False), + ) + op.add_column( + "NMA_MajorChemistry", + sa.Column("OBJECTID", sa.INTEGER(), autoincrement=False, nullable=True), + ) + op.add_column( + "NMA_MajorChemistry", + sa.Column( + "WCLab_ID", sa.VARCHAR(length=25), autoincrement=False, nullable=True + ), + ) + op.add_column( + "NMA_MajorChemistry", + sa.Column("SamplePtID", sa.UUID(), autoincrement=False, nullable=False), + ) + op.add_column( + "NMA_MajorChemistry", + sa.Column( + "SamplePointID", sa.VARCHAR(length=10), autoincrement=False, nullable=True + ), + ) + op.drop_constraint(None, "NMA_MajorChemistry", type_="foreignkey") + op.create_foreign_key( + op.f("NMA_MajorChemistry_SamplePtID_fkey"), + "NMA_MajorChemistry", + "NMA_Chemistry_SampleInfo", + ["SamplePtID"], + ["SamplePtID"], + ondelete="CASCADE", + ) + op.drop_constraint(None, "NMA_MajorChemistry", type_="unique") + op.drop_constraint(None, "NMA_MajorChemistry", type_="unique") + op.create_unique_constraint( + op.f("NMA_MajorChemistry_OBJECTID_key"), + "NMA_MajorChemistry", + ["OBJECTID"], + postgresql_nulls_not_distinct=False, + ) + op.create_index( + op.f("MajorChemistry$WCLab_ID"), + "NMA_MajorChemistry", + ["WCLab_ID"], + unique=False, + ) + op.create_index( + op.f("MajorChemistry$SamplePtID"), + "NMA_MajorChemistry", + ["SamplePtID"], + unique=False, + ) + op.create_index( + op.f("MajorChemistry$SamplePointIDAnalyte"), + "NMA_MajorChemistry", + ["SamplePointID", "Analyte"], + unique=False, + ) + op.create_index( + op.f("MajorChemistry$SamplePointID"), + "NMA_MajorChemistry", + ["SamplePointID"], + unique=False, + ) + op.create_index( + op.f("MajorChemistry$Chemistry SampleInfoMajorChemistry"), + "NMA_MajorChemistry", + ["SamplePtID"], + unique=False, + ) + op.create_index( + op.f("MajorChemistry$Analyte"), "NMA_MajorChemistry", ["Analyte"], unique=False + ) + op.create_index( + op.f("MajorChemistry$AnalysesAgency"), + "NMA_MajorChemistry", + ["AnalysesAgency"], + unique=False, + ) + op.drop_column("NMA_MajorChemistry", "nma_WCLab_ID") + op.drop_column("NMA_MajorChemistry", "nma_OBJECTID") + op.drop_column("NMA_MajorChemistry", "nma_SamplePointID") + op.drop_column("NMA_MajorChemistry", "nma_SamplePtID") + op.drop_column("NMA_MajorChemistry", "chemistry_sample_info_id") + op.drop_column("NMA_MajorChemistry", "nma_GlobalID") + op.drop_column("NMA_MajorChemistry", "id") + op.add_column( + "NMA_HydraulicsData", + sa.Column("GlobalID", sa.UUID(), autoincrement=False, nullable=False), + ) + op.add_column( + "NMA_HydraulicsData", + sa.Column("PointID", sa.VARCHAR(length=50), autoincrement=False, nullable=True), + ) + op.add_column( + "NMA_HydraulicsData", + sa.Column("OBJECTID", sa.INTEGER(), autoincrement=False, nullable=True), + ) + op.add_column( + "NMA_HydraulicsData", + sa.Column("WellID", sa.UUID(), autoincrement=False, nullable=True), + ) + op.drop_constraint(None, "NMA_HydraulicsData", type_="unique") + op.drop_constraint(None, "NMA_HydraulicsData", type_="unique") + op.create_index( + op.f("ix_nma_hydraulicsdata_wellid"), + "NMA_HydraulicsData", + ["WellID"], + unique=False, + ) + op.create_index( + op.f("ix_nma_hydraulicsdata_pointid"), + "NMA_HydraulicsData", + ["PointID"], + unique=False, + ) + op.create_index( + op.f("ix_nma_hydraulicsdata_objectid"), + "NMA_HydraulicsData", + ["OBJECTID"], + unique=True, + ) + op.drop_column("NMA_HydraulicsData", "nma_OBJECTID") + op.drop_column("NMA_HydraulicsData", "nma_PointID") + op.drop_column("NMA_HydraulicsData", "nma_WellID") + op.drop_column("NMA_HydraulicsData", "nma_GlobalID") + op.drop_column("NMA_HydraulicsData", "id") + op.add_column( + "NMA_FieldParameters", + sa.Column("GlobalID", sa.UUID(), autoincrement=False, nullable=False), + ) + op.add_column( + "NMA_FieldParameters", + sa.Column( + "OBJECTID", + sa.INTEGER(), + sa.Identity( + always=False, + start=1, + increment=1, + minvalue=1, + maxvalue=2147483647, + cycle=False, + cache=1, + ), + autoincrement=True, + nullable=False, + ), + ) + op.add_column( + "NMA_FieldParameters", + sa.Column( + "WCLab_ID", sa.VARCHAR(length=25), autoincrement=False, nullable=True + ), + ) + op.add_column( + "NMA_FieldParameters", + sa.Column("SamplePtID", sa.UUID(), autoincrement=False, nullable=False), + ) + op.add_column( + "NMA_FieldParameters", + sa.Column( + "SamplePointID", sa.VARCHAR(length=10), autoincrement=False, nullable=True + ), + ) + op.drop_constraint(None, "NMA_FieldParameters", type_="foreignkey") + op.create_foreign_key( + op.f("NMA_FieldParameters_SamplePtID_fkey"), + "NMA_FieldParameters", + "NMA_Chemistry_SampleInfo", + ["SamplePtID"], + ["SamplePtID"], + onupdate="CASCADE", + ondelete="CASCADE", + ) + op.drop_constraint(None, "NMA_FieldParameters", type_="unique") + op.drop_index("FieldParameters$nma_WCLab_ID", table_name="NMA_FieldParameters") + op.drop_index("FieldParameters$nma_SamplePointID", table_name="NMA_FieldParameters") + op.drop_index("FieldParameters$nma_OBJECTID", table_name="NMA_FieldParameters") + op.drop_index("FieldParameters$nma_GlobalID", table_name="NMA_FieldParameters") + op.drop_index( + "FieldParameters$ChemistrySampleInfoFieldParameters", + table_name="NMA_FieldParameters", + ) + op.create_index( + op.f("FieldParameters$ChemistrySampleInfoFieldParameters"), + "NMA_FieldParameters", + ["SamplePtID"], + unique=False, + ) + op.create_index( + op.f("FieldParameters$WCLab_ID"), + "NMA_FieldParameters", + ["WCLab_ID"], + unique=False, + ) + op.create_index( + op.f("FieldParameters$SamplePtID"), + "NMA_FieldParameters", + ["SamplePtID"], + unique=False, + ) + op.create_index( + op.f("FieldParameters$SamplePointID"), + "NMA_FieldParameters", + ["SamplePointID"], + unique=False, + ) + op.create_index( + op.f("FieldParameters$OBJECTID"), + "NMA_FieldParameters", + ["OBJECTID"], + unique=True, + ) + op.create_index( + op.f("FieldParameters$GlobalID"), + "NMA_FieldParameters", + ["GlobalID"], + unique=True, + ) + op.drop_column("NMA_FieldParameters", "nma_WCLab_ID") + op.drop_column("NMA_FieldParameters", "nma_OBJECTID") + op.drop_column("NMA_FieldParameters", "nma_SamplePointID") + op.drop_column("NMA_FieldParameters", "nma_SamplePtID") + op.drop_column("NMA_FieldParameters", "chemistry_sample_info_id") + op.drop_column("NMA_FieldParameters", "nma_GlobalID") + op.drop_column("NMA_FieldParameters", "id") + op.add_column( + "NMA_Chemistry_SampleInfo", + sa.Column("LocationId", sa.UUID(), autoincrement=False, nullable=True), + ) + op.add_column( + "NMA_Chemistry_SampleInfo", + sa.Column("OBJECTID", sa.INTEGER(), autoincrement=False, nullable=True), + ) + op.add_column( + "NMA_Chemistry_SampleInfo", + sa.Column( + "WCLab_ID", sa.VARCHAR(length=18), autoincrement=False, nullable=True + ), + ) + op.add_column( + "NMA_Chemistry_SampleInfo", + sa.Column("SamplePtID", sa.UUID(), autoincrement=False, nullable=False), + ) + op.add_column( + "NMA_Chemistry_SampleInfo", + sa.Column( + "SamplePointID", sa.VARCHAR(length=10), autoincrement=False, nullable=False + ), + ) + op.drop_constraint(None, "NMA_Chemistry_SampleInfo", type_="unique") + op.drop_constraint(None, "NMA_Chemistry_SampleInfo", type_="unique") + op.create_unique_constraint( + op.f("NMA_Chemistry_SampleInfo_OBJECTID_key"), + "NMA_Chemistry_SampleInfo", + ["OBJECTID"], + postgresql_nulls_not_distinct=False, + ) + op.drop_column("NMA_Chemistry_SampleInfo", "nma_LocationId") + op.drop_column("NMA_Chemistry_SampleInfo", "nma_OBJECTID") + op.drop_column("NMA_Chemistry_SampleInfo", "nma_SamplePointID") + op.drop_column("NMA_Chemistry_SampleInfo", "nma_WCLab_ID") + op.drop_column("NMA_Chemistry_SampleInfo", "nma_SamplePtID") + op.drop_column("NMA_Chemistry_SampleInfo", "id") + op.add_column( + "NMA_AssociatedData", + sa.Column("PointID", sa.VARCHAR(length=10), autoincrement=False, nullable=True), + ) + op.add_column( + "NMA_AssociatedData", + sa.Column("AssocID", sa.UUID(), autoincrement=False, nullable=False), + ) + op.add_column( + "NMA_AssociatedData", + sa.Column("LocationId", sa.UUID(), autoincrement=False, nullable=True), + ) + op.add_column( + "NMA_AssociatedData", + sa.Column("OBJECTID", sa.INTEGER(), autoincrement=False, nullable=True), + ) + op.drop_constraint(None, "NMA_AssociatedData", type_="unique") + op.drop_constraint(None, "NMA_AssociatedData", type_="unique") + op.drop_constraint(None, "NMA_AssociatedData", type_="unique") + op.create_unique_constraint( + op.f("NMA_AssociatedData_OBJECTID_key"), + "NMA_AssociatedData", + ["OBJECTID"], + postgresql_nulls_not_distinct=False, + ) + op.create_index( + op.f("AssociatedData$PointID"), "NMA_AssociatedData", ["PointID"], unique=False + ) + op.create_unique_constraint( + op.f("AssociatedData$LocationId"), + "NMA_AssociatedData", + ["LocationId"], + postgresql_nulls_not_distinct=False, + ) + op.drop_column("NMA_AssociatedData", "nma_OBJECTID") + op.drop_column("NMA_AssociatedData", "nma_PointID") + op.drop_column("NMA_AssociatedData", "nma_LocationId") + op.drop_column("NMA_AssociatedData", "nma_AssocID") + op.drop_column("NMA_AssociatedData", "id") diff --git a/db/nma_legacy.py b/db/nma_legacy.py index dbe66740..36034f32 100644 --- a/db/nma_legacy.py +++ b/db/nma_legacy.py @@ -872,7 +872,10 @@ class NMA_FieldParameters(Base): __table_args__ = ( # Explicit Indexes (updated for new column names) Index("FieldParameters$AnalysesAgency", "AnalysesAgency"), - Index("FieldParameters$ChemistrySampleInfoFieldParameters", "chemistry_sample_info_id"), + Index( + "FieldParameters$ChemistrySampleInfoFieldParameters", + "chemistry_sample_info_id", + ), Index("FieldParameters$FieldParameter", "FieldParameter"), Index("FieldParameters$nma_SamplePointID", "nma_SamplePointID"), Index("FieldParameters$nma_WCLab_ID", "nma_WCLab_ID"), diff --git a/tests/integration/test_well_data_relationships.py b/tests/integration/test_well_data_relationships.py index b1ae4878..99fceadd 100644 --- a/tests/integration/test_well_data_relationships.py +++ b/tests/integration/test_well_data_relationships.py @@ -297,7 +297,8 @@ def test_well_navigates_to_chemistry_samples(self, well_for_relationships): assert hasattr(well, "chemistry_sample_infos") assert len(well.chemistry_sample_infos) >= 1 assert any( - s.nma_sample_point_id == "NAVCHEM01" for s in well.chemistry_sample_infos + s.nma_sample_point_id == "NAVCHEM01" + for s in well.chemistry_sample_infos ) def test_well_navigates_to_hydraulics_data(self, well_for_relationships): diff --git a/tests/test_major_chemistry_legacy.py b/tests/test_major_chemistry_legacy.py index 536d3a23..a745ce24 100644 --- a/tests/test_major_chemistry_legacy.py +++ b/tests/test_major_chemistry_legacy.py @@ -177,7 +177,8 @@ def test_query_major_chemistry_by_nma_sample_point_id(water_well_thing): results = ( session.query(NMA_MajorChemistry) .filter( - NMA_MajorChemistry.nma_sample_point_id == sample_info.nma_sample_point_id + NMA_MajorChemistry.nma_sample_point_id + == sample_info.nma_sample_point_id ) .all() ) diff --git a/tests/test_nma_chemistry_lineage.py b/tests/test_nma_chemistry_lineage.py index b828fb47..d8c4207e 100644 --- a/tests/test_nma_chemistry_lineage.py +++ b/tests/test_nma_chemistry_lineage.py @@ -484,11 +484,7 @@ def test_reverse_lineage_navigation(shared_well): session.refresh(well) # Reverse navigation - filter to just this sample_info - matching = [ - si - for si in well.chemistry_sample_infos - if si.id == sample_info.id - ] + matching = [si for si in well.chemistry_sample_infos if si.id == sample_info.id] assert len(matching) == 1 assert len(matching[0].minor_trace_chemistries) == 1 assert matching[0].minor_trace_chemistries[0] == mtc diff --git a/transfers/field_parameters_transfer.py b/transfers/field_parameters_transfer.py index e1780df5..d7dc77d7 100644 --- a/transfers/field_parameters_transfer.py +++ b/transfers/field_parameters_transfer.py @@ -64,13 +64,16 @@ def __init__(self, *args, batch_size: int = 1000, **kwargs): def _build_sample_info_cache(self) -> None: """Build cache of nma_sample_pt_id -> id for FK lookups.""" with session_ctx() as session: - sample_infos = session.query( - NMA_Chemistry_SampleInfo.nma_sample_pt_id, - NMA_Chemistry_SampleInfo.id - ).filter(NMA_Chemistry_SampleInfo.nma_sample_pt_id.isnot(None)).all() + sample_infos = ( + session.query( + NMA_Chemistry_SampleInfo.nma_sample_pt_id, + NMA_Chemistry_SampleInfo.id, + ) + .filter(NMA_Chemistry_SampleInfo.nma_sample_pt_id.isnot(None)) + .all() + ) self._sample_info_cache = { - nma_sample_pt_id: csi_id - for nma_sample_pt_id, csi_id in sample_infos + nma_sample_pt_id: csi_id for nma_sample_pt_id, csi_id in sample_infos } logger.info( f"Built ChemistrySampleInfo cache with {len(self._sample_info_cache)} entries" diff --git a/transfers/major_chemistry.py b/transfers/major_chemistry.py index 175e7d4d..1aab8da7 100644 --- a/transfers/major_chemistry.py +++ b/transfers/major_chemistry.py @@ -62,13 +62,16 @@ def __init__(self, *args, batch_size: int = 1000, **kwargs): def _build_sample_info_cache(self) -> None: """Build cache of nma_sample_pt_id -> id for FK lookups.""" with session_ctx() as session: - sample_infos = session.query( - NMA_Chemistry_SampleInfo.nma_sample_pt_id, - NMA_Chemistry_SampleInfo.id - ).filter(NMA_Chemistry_SampleInfo.nma_sample_pt_id.isnot(None)).all() + sample_infos = ( + session.query( + NMA_Chemistry_SampleInfo.nma_sample_pt_id, + NMA_Chemistry_SampleInfo.id, + ) + .filter(NMA_Chemistry_SampleInfo.nma_sample_pt_id.isnot(None)) + .all() + ) self._sample_info_cache = { - nma_sample_pt_id: csi_id - for nma_sample_pt_id, csi_id in sample_infos + nma_sample_pt_id: csi_id for nma_sample_pt_id, csi_id in sample_infos } logger.info( f"Built ChemistrySampleInfo cache with {len(self._sample_info_cache)} entries" diff --git a/transfers/minor_trace_chemistry_transfer.py b/transfers/minor_trace_chemistry_transfer.py index 9cbd7218..daeef792 100644 --- a/transfers/minor_trace_chemistry_transfer.py +++ b/transfers/minor_trace_chemistry_transfer.py @@ -64,13 +64,16 @@ def __init__(self, *args, batch_size: int = 1000, **kwargs): def _build_sample_info_cache(self): """Build cache of ChemistrySampleInfo.nma_sample_pt_id -> ChemistrySampleInfo.id.""" with session_ctx() as session: - sample_infos = session.query( - NMA_Chemistry_SampleInfo.nma_sample_pt_id, - NMA_Chemistry_SampleInfo.id - ).filter(NMA_Chemistry_SampleInfo.nma_sample_pt_id.isnot(None)).all() + sample_infos = ( + session.query( + NMA_Chemistry_SampleInfo.nma_sample_pt_id, + NMA_Chemistry_SampleInfo.id, + ) + .filter(NMA_Chemistry_SampleInfo.nma_sample_pt_id.isnot(None)) + .all() + ) self._sample_info_cache = { - nma_sample_pt_id: csi_id - for nma_sample_pt_id, csi_id in sample_infos + nma_sample_pt_id: csi_id for nma_sample_pt_id, csi_id in sample_infos } logger.info( f"Built ChemistrySampleInfo cache with {len(self._sample_info_cache)} entries" diff --git a/transfers/radionuclides.py b/transfers/radionuclides.py index ba17f038..589dbec8 100644 --- a/transfers/radionuclides.py +++ b/transfers/radionuclides.py @@ -62,11 +62,15 @@ def __init__(self, *args, batch_size: int = 1000, **kwargs): def _build_sample_info_cache(self) -> None: """Build cache of nma_sample_pt_id -> (id, thing_id) for FK lookups.""" with session_ctx() as session: - sample_infos = session.query( - NMA_Chemistry_SampleInfo.nma_sample_pt_id, - NMA_Chemistry_SampleInfo.id, - NMA_Chemistry_SampleInfo.thing_id, - ).filter(NMA_Chemistry_SampleInfo.nma_sample_pt_id.isnot(None)).all() + sample_infos = ( + session.query( + NMA_Chemistry_SampleInfo.nma_sample_pt_id, + NMA_Chemistry_SampleInfo.id, + NMA_Chemistry_SampleInfo.thing_id, + ) + .filter(NMA_Chemistry_SampleInfo.nma_sample_pt_id.isnot(None)) + .all() + ) self._sample_info_cache = { nma_sample_pt_id: (csi_id, thing_id) for nma_sample_pt_id, csi_id, thing_id in sample_infos From bba4313fd9d730bffe5f93bc1995f59376add47f Mon Sep 17 00:00:00 2001 From: Kimball Bighorse Date: Wed, 28 Jan 2026 02:36:25 -0800 Subject: [PATCH 22/22] fix: update admin views and tests for Integer PK schema Post-merge fixes: - admin/views/associated_data.py: Update to use nma_ prefixed columns (Integer PK) - admin/views/major_chemistry.py: Update to use nma_ prefixed columns (Integer PK) - tests/test_stratigraphy_legacy.py: Add required strat_top/strat_bottom fields - tests/integration/test_well_data_relationships.py: Add required strat_top/strat_bottom fields Co-Authored-By: Claude Opus 4.5 --- admin/views/associated_data.py | 106 ++++++++++++------ admin/views/major_chemistry.py | 73 +++++++----- .../test_well_data_relationships.py | 4 + tests/test_stratigraphy_legacy.py | 8 +- 4 files changed, 124 insertions(+), 67 deletions(-) diff --git a/admin/views/associated_data.py b/admin/views/associated_data.py index a706d0ad..f58dcd62 100644 --- a/admin/views/associated_data.py +++ b/admin/views/associated_data.py @@ -1,3 +1,31 @@ +# =============================================================================== +# Copyright 2026 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# =============================================================================== +""" +AssociatedDataAdmin view for legacy NMA_AssociatedData. + +Updated for Integer PK schema: +- id: Integer PK (autoincrement) +- nma_assoc_id: Legacy UUID PK (AssocID), UNIQUE for audit +- nma_location_id: Legacy LocationId UUID, UNIQUE +- nma_point_id: Legacy PointID string +- nma_object_id: Legacy OBJECTID, UNIQUE +""" + +from starlette.requests import Request + from admin.views.base import OcotilloModelView @@ -12,68 +40,74 @@ class AssociatedDataAdmin(OcotilloModelView): label = "NMA Associated Data" icon = "fa fa-link" - # Pagination - page_size = 50 - page_size_options = [25, 50, 100, 200] + # Integer PK + pk_attr = "id" + pk_type = int + + def can_create(self, request: Request) -> bool: + return False + + def can_edit(self, request: Request) -> bool: + return False + + def can_delete(self, request: Request) -> bool: + return False # ========== List View ========== + list_fields = [ - "location_id", - "point_id", - "assoc_id", + "id", + "nma_assoc_id", + "nma_location_id", + "nma_point_id", + "nma_object_id", "notes", "formation", - "object_id", "thing_id", ] sortable_fields = [ - "assoc_id", - "object_id", - "point_id", + "id", + "nma_assoc_id", + "nma_object_id", + "nma_point_id", ] - fields_default_sort = [("point_id", False), ("object_id", False)] + fields_default_sort = [("nma_point_id", False), ("nma_object_id", False)] searchable_fields = [ - "point_id", - "assoc_id", + "nma_point_id", + "nma_assoc_id", "notes", "formation", ] - # ========== Detail View ========== + page_size = 50 + page_size_options = [25, 50, 100, 200] + + # ========== Form View ========== + fields = [ - "location_id", - "point_id", - "assoc_id", + "id", + "nma_assoc_id", + "nma_location_id", + "nma_point_id", + "nma_object_id", "notes", "formation", - "object_id", "thing_id", ] - # ========== Legacy Field Labels ========== field_labels = { - "location_id": "LocationId", - "point_id": "PointID", - "assoc_id": "AssocID", + "id": "ID", + "nma_assoc_id": "NMA AssocID (Legacy)", + "nma_location_id": "NMA LocationId (Legacy)", + "nma_point_id": "NMA PointID (Legacy)", + "nma_object_id": "NMA OBJECTID (Legacy)", "notes": "Notes", "formation": "Formation", - "object_id": "OBJECTID", - "thing_id": "ThingID", + "thing_id": "Thing ID", } - # ========== READ ONLY ========== - enable_publish_actions = ( - False # hides publish/unpublish actions inherited from base - ) - def can_create(self, request) -> bool: - return False - - def can_edit(self, request) -> bool: - return False - - def can_delete(self, request) -> bool: - return False +# ============= EOF ============================================= diff --git a/admin/views/major_chemistry.py b/admin/views/major_chemistry.py index f822ed90..9578f60d 100644 --- a/admin/views/major_chemistry.py +++ b/admin/views/major_chemistry.py @@ -15,9 +15,16 @@ # =============================================================================== """ MajorChemistryAdmin view for legacy NMA_MajorChemistry. -""" -import uuid +Updated for Integer PK schema: +- id: Integer PK (autoincrement) +- nma_global_id: Legacy UUID PK (GlobalID), UNIQUE for audit +- chemistry_sample_info_id: Integer FK to NMA_Chemistry_SampleInfo.id +- nma_sample_pt_id: Legacy UUID FK (SamplePtID) for audit +- nma_sample_point_id: Legacy SamplePointID string +- nma_object_id: Legacy OBJECTID +- nma_wclab_id: Legacy WCLab_ID +""" from starlette.requests import Request from starlette_admin.fields import HasOne @@ -36,8 +43,10 @@ class MajorChemistryAdmin(OcotilloModelView): name = "NMA Major Chemistry" label = "NMA Major Chemistry" icon = "fa fa-flask" - pk_attr = "global_id" - pk_type = uuid.UUID + + # Integer PK + pk_attr = "id" + pk_type = int def can_create(self, request: Request) -> bool: return False @@ -51,9 +60,11 @@ def can_delete(self, request: Request) -> bool: # ========== List View ========== list_fields = [ - "global_id", - "sample_pt_id", - "sample_point_id", + "id", + "nma_global_id", + "chemistry_sample_info_id", + "nma_sample_pt_id", + "nma_sample_point_id", HasOne("chemistry_sample_info", identity="n-m-a_-chemistry_-sample-info"), "analyte", "symbol", @@ -65,15 +76,17 @@ def can_delete(self, request: Request) -> bool: "notes", "volume", "volume_unit", - "object_id", + "nma_object_id", "analyses_agency", - "wclab_id", + "nma_wclab_id", ] sortable_fields = [ - "global_id", - "sample_pt_id", - "sample_point_id", + "id", + "nma_global_id", + "chemistry_sample_info_id", + "nma_sample_pt_id", + "nma_sample_point_id", "analyte", "symbol", "sample_value", @@ -84,23 +97,23 @@ def can_delete(self, request: Request) -> bool: "notes", "volume", "volume_unit", - "object_id", + "nma_object_id", "analyses_agency", - "wclab_id", + "nma_wclab_id", ] fields_default_sort = [("analysis_date", True)] searchable_fields = [ - "global_id", - "sample_pt_id", - "sample_point_id", + "nma_global_id", + "nma_sample_pt_id", + "nma_sample_point_id", "analyte", "symbol", "analysis_method", "notes", "analyses_agency", - "wclab_id", + "nma_wclab_id", ] page_size = 50 @@ -109,9 +122,11 @@ def can_delete(self, request: Request) -> bool: # ========== Form View ========== fields = [ - "global_id", - "sample_pt_id", - "sample_point_id", + "id", + "nma_global_id", + "chemistry_sample_info_id", + "nma_sample_pt_id", + "nma_sample_point_id", HasOne("chemistry_sample_info", identity="n-m-a_-chemistry_-sample-info"), "analyte", "symbol", @@ -123,15 +138,17 @@ def can_delete(self, request: Request) -> bool: "notes", "volume", "volume_unit", - "object_id", + "nma_object_id", "analyses_agency", - "wclab_id", + "nma_wclab_id", ] field_labels = { - "global_id": "GlobalID", - "sample_pt_id": "SamplePtID", - "sample_point_id": "SamplePointID", + "id": "ID", + "nma_global_id": "NMA GlobalID (Legacy)", + "chemistry_sample_info_id": "Chemistry Sample Info ID", + "nma_sample_pt_id": "NMA SamplePtID (Legacy)", + "nma_sample_point_id": "NMA SamplePointID (Legacy)", "chemistry_sample_info": "Chemistry Sample Info", "analyte": "Analyte", "symbol": "Symbol", @@ -143,9 +160,9 @@ def can_delete(self, request: Request) -> bool: "notes": "Notes", "volume": "Volume", "volume_unit": "Volume Unit", - "object_id": "OBJECTID", + "nma_object_id": "NMA OBJECTID (Legacy)", "analyses_agency": "Analyses Agency", - "wclab_id": "WCLab_ID", + "nma_wclab_id": "NMA WCLab_ID (Legacy)", } diff --git a/tests/integration/test_well_data_relationships.py b/tests/integration/test_well_data_relationships.py index 99fceadd..bc4423bc 100644 --- a/tests/integration/test_well_data_relationships.py +++ b/tests/integration/test_well_data_relationships.py @@ -333,6 +333,8 @@ def test_well_navigates_to_stratigraphy_logs(self, well_for_relationships): nma_global_id=uuid.uuid4(), nma_point_id="NAVSTRAT1", # Max 10 chars thing_id=well.id, + strat_top=0, + strat_bottom=10, ) session.add(strat) session.commit() @@ -515,6 +517,8 @@ def test_deleting_well_cascades_to_stratigraphy_logs(self): nma_global_id=uuid.uuid4(), nma_point_id="CASCSTRAT", # Max 10 chars thing_id=well.id, + strat_top=0, + strat_bottom=10, ) session.add(strat) session.commit() diff --git a/tests/test_stratigraphy_legacy.py b/tests/test_stratigraphy_legacy.py index 4a62cf20..4b0f4b1a 100644 --- a/tests/test_stratigraphy_legacy.py +++ b/tests/test_stratigraphy_legacy.py @@ -49,9 +49,9 @@ def test_create_stratigraphy_with_thing(water_well_thing): nma_global_id=_next_global_id(), nma_point_id="STRAT-01", thing_id=well.id, - strat_top=0.0, - strat_bottom=10.0, - lithology="Sandstone", + strat_top=0, + strat_bottom=10, + lithology="Sand", # Max 4 chars ) session.add(record) session.commit() @@ -100,6 +100,8 @@ def test_stratigraphy_back_populates_thing(water_well_thing): nma_global_id=_next_global_id(), nma_point_id="BPSTRAT01", # Max 10 chars thing_id=well.id, + strat_top=0, + strat_bottom=10, ) session.add(record) session.commit()