From 3f42725a1645ac8332319ab360f7162092ecd6b1 Mon Sep 17 00:00:00 2001 From: Benjamin Capodanno Date: Wed, 18 Feb 2026 18:20:02 -0800 Subject: [PATCH 1/5] feat: remove variants from calibration responses, add dedicated variant endpoints Score set API responses were slow (5+ seconds) because functional classifications eagerly serialized thousands of variants. This moves variant data to dedicated endpoints while keeping a lightweight variant_count summary in the default response. Changes: - Remove `variants` field from functional classification view models - Add `id` and `variant_count` fields to SavedFunctionalClassification - Add `variant_count` column_property on the ORM model using a correlated COUNT subquery against the association table - Add `FunctionalClassificationVariants` response model - Add GET /{urn}/functional-classifications/{id}/variants endpoint - Add GET /{urn}/variants endpoint (all classifications) - Update test constants and assertions for new response shape Breaking change: clients relying on `variants` in calibration responses must migrate to the new dedicated endpoints. --- ...e_calibration_functional_classification.py | 15 +- src/mavedb/routers/score_calibrations.py | 95 +++ src/mavedb/view_models/score_calibration.py | 14 +- tests/helpers/constants.py | 18 +- tests/routers/test_score_calibrations.py | 587 +++++++++++++++++- tests/routers/test_score_set.py | 9 + 6 files changed, 723 insertions(+), 15 deletions(-) diff --git a/src/mavedb/models/score_calibration_functional_classification.py b/src/mavedb/models/score_calibration_functional_classification.py index 1975310ab..b9cdabc49 100644 --- a/src/mavedb/models/score_calibration_functional_classification.py +++ b/src/mavedb/models/score_calibration_functional_classification.py @@ -4,9 +4,9 @@ from typing import TYPE_CHECKING -from sqlalchemy import Boolean, Column, Enum, Float, ForeignKey, Integer, String +from sqlalchemy import Boolean, Column, Enum, Float, ForeignKey, Integer, String, func, select from sqlalchemy.dialects.postgresql import JSONB -from sqlalchemy.orm import Mapped, relationship +from sqlalchemy.orm import Mapped, column_property, relationship from mavedb.db.base import Base from mavedb.lib.validation.utilities import inf_or_float @@ -58,6 +58,17 @@ class ScoreCalibrationFunctionalClassification(Base): secondary=score_calibration_functional_classification_variants_association_table, ) + # Efficient count via correlated subquery — avoids loading all variant objects. + variant_count: Mapped[int] = column_property( + select(func.count()) + .where( + score_calibration_functional_classification_variants_association_table.c.functional_classification_id + == id # refers to the `id` Column defined above + ) + .correlate_except(score_calibration_functional_classification_variants_association_table) + .scalar_subquery() + ) + def score_is_contained_in_range(self, score: float) -> bool: """Check if a given score falls within the defined range.""" if self.range is None or not isinstance(self.range, list) or len(self.range) != 2: diff --git a/src/mavedb/routers/score_calibrations.py b/src/mavedb/routers/score_calibrations.py index 8a413677b..7fc80dd3f 100644 --- a/src/mavedb/routers/score_calibrations.py +++ b/src/mavedb/routers/score_calibrations.py @@ -29,6 +29,7 @@ from mavedb.lib.validation.dataframe.calibration import validate_and_standardize_calibration_classes_dataframe from mavedb.lib.validation.exceptions import ValidationError from mavedb.models.score_calibration import ScoreCalibration +from mavedb.models.score_calibration_functional_classification import ScoreCalibrationFunctionalClassification from mavedb.models.score_set import ScoreSet from mavedb.view_models import score_calibration @@ -684,3 +685,97 @@ def publish_score_calibration_route( db.refresh(item) return item + + +@router.get( + "/{urn}/functional-classifications/{classification_id}/variants", + response_model=score_calibration.FunctionalClassificationVariants, + responses={404: {}}, +) +def get_functional_classification_variants( + *, + urn: str, + classification_id: int, + db: Session = Depends(deps.get_db), + user_data: Optional[UserData] = Depends(get_current_user), +) -> score_calibration.FunctionalClassificationVariants: + """ + Retrieve variants for a specific functional classification within a score calibration. + + Returns the list of variants whose scores fall within the functional classification's + defined range or class. Use this endpoint when you need the full variant data for a + specific classification — the main score set and calibration endpoints return only + a `variant_count` summary for performance. + """ + save_to_logging_context( + {"requested_resource": urn, "requested_classification": classification_id, "resource_property": "variants"} + ) + + calibration = ( + db.query(ScoreCalibration) + .options(selectinload(ScoreCalibration.score_set).selectinload(ScoreSet.contributors)) + .where(ScoreCalibration.urn == urn) + .one_or_none() + ) + if not calibration: + logger.debug("The requested score calibration does not exist", extra=logging_context()) + raise HTTPException(status_code=404, detail="The requested score calibration does not exist") + + assert_permission(user_data, calibration, Action.READ) + + functional_classification = ( + db.query(ScoreCalibrationFunctionalClassification) + .filter( + ScoreCalibrationFunctionalClassification.id == classification_id, + ScoreCalibrationFunctionalClassification.calibration_id == calibration.id, + ) + .one_or_none() + ) + if not functional_classification: + logger.debug("The requested functional classification does not exist", extra=logging_context()) + raise HTTPException(status_code=404, detail="The requested functional classification does not exist") + + return score_calibration.FunctionalClassificationVariants( + functional_classification_id=functional_classification.id, variants=functional_classification.variants + ) + + +@router.get( + "/{urn}/variants", + response_model=list[score_calibration.FunctionalClassificationVariants], + responses={404: {}}, +) +def get_calibration_all_variants( + *, + urn: str, + db: Session = Depends(deps.get_db), + user_data: Optional[UserData] = Depends(get_current_user), +) -> list[score_calibration.FunctionalClassificationVariants]: + """ + Retrieve all variants across all functional classifications for a score calibration. + + Returns a list of variant sets, one per functional classification. Use this endpoint + when you need the full variant data for an entire calibration — the main score set and + calibration endpoints return only a `variant_count` summary for performance. + """ + save_to_logging_context({"requested_resource": urn, "resource_property": "variants"}) + + calibration = ( + db.query(ScoreCalibration) + .options(selectinload(ScoreCalibration.score_set).selectinload(ScoreSet.contributors)) + .where(ScoreCalibration.urn == urn) + .one_or_none() + ) + if not calibration: + logger.debug("The requested score calibration does not exist", extra=logging_context()) + raise HTTPException(status_code=404, detail="The requested score calibration does not exist") + + assert_permission(user_data, calibration, Action.READ) + + results = [] + for fc in calibration.functional_classifications: + results.append( + score_calibration.FunctionalClassificationVariants(functional_classification_id=fc.id, variants=fc.variants) + ) + + return results diff --git a/src/mavedb/view_models/score_calibration.py b/src/mavedb/view_models/score_calibration.py index 985d59499..b4fb81735 100644 --- a/src/mavedb/view_models/score_calibration.py +++ b/src/mavedb/view_models/score_calibration.py @@ -35,10 +35,7 @@ from mavedb.view_models.user import SavedUser, User if TYPE_CHECKING: - from mavedb.view_models.variant import ( - SavedVariantEffectMeasurement, - VariantEffectMeasurement, - ) + from mavedb.view_models.variant import VariantEffectMeasurement ### Functional range models @@ -242,9 +239,10 @@ class FunctionalClassificationCreate(FunctionalClassificationModify): class SavedFunctionalClassification(FunctionalClassificationBase): """Persisted functional range model (includes record type metadata).""" + id: int record_type: str = None # type: ignore acmg_classification: Optional[SavedACMGClassification] = None - variants: Sequence["SavedVariantEffectMeasurement"] = [] + variant_count: int = 0 _record_type_factory = record_type_validator()(set_record_type) @@ -259,6 +257,12 @@ class FunctionalClassification(SavedFunctionalClassification): """Complete functional range model returned by the API.""" acmg_classification: Optional[ACMGClassification] = None + + +class FunctionalClassificationVariants(BaseModel): + """Response model for functional classification variant endpoints.""" + + functional_classification_id: int variants: Sequence["VariantEffectMeasurement"] = [] diff --git a/tests/helpers/constants.py b/tests/helpers/constants.py index f0fbca874..3ab620b50 100644 --- a/tests/helpers/constants.py +++ b/tests/helpers/constants.py @@ -1444,10 +1444,11 @@ TEST_SAVED_FUNCTIONAL_RANGE_NORMAL = { + "id": 1, "recordType": "FunctionalClassification", **{camelize(k): v for k, v in TEST_FUNCTIONAL_RANGE_NORMAL.items() if k not in ("acmg_classification",)}, "acmgClassification": TEST_SAVED_ACMG_BS3_STRONG_CLASSIFICATION, - "variants": [], + "variantCount": 0, } @@ -1464,10 +1465,11 @@ TEST_SAVED_FUNCTIONAL_RANGE_ABNORMAL = { + "id": 2, "recordType": "FunctionalClassification", **{camelize(k): v for k, v in TEST_FUNCTIONAL_RANGE_ABNORMAL.items() if k not in ("acmg_classification",)}, "acmgClassification": TEST_SAVED_ACMG_PS3_STRONG_CLASSIFICATION, - "variants": [], + "variantCount": 0, } @@ -1481,9 +1483,10 @@ TEST_SAVED_FUNCTIONAL_RANGE_NOT_SPECIFIED = { + "id": 3, "recordType": "FunctionalClassification", **{camelize(k): v for k, v in TEST_FUNCTIONAL_RANGE_NOT_SPECIFIED.items()}, - "variants": [], + "variantCount": 0, } @@ -1498,10 +1501,11 @@ TEST_SAVED_FUNCTIONAL_CLASSIFICATION_NORMAL = { + "id": 1, "recordType": "FunctionalClassification", **{camelize(k): v for k, v in TEST_FUNCTIONAL_CLASSIFICATION_NORMAL.items() if k not in ("acmg_classification",)}, "acmgClassification": TEST_SAVED_ACMG_BS3_STRONG_CLASSIFICATION, - "variants": [], + "variantCount": 0, } @@ -1516,10 +1520,11 @@ TEST_SAVED_FUNCTIONAL_CLASSIFICATION_ABNORMAL = { + "id": 2, "recordType": "FunctionalClassification", **{camelize(k): v for k, v in TEST_FUNCTIONAL_CLASSIFICATION_ABNORMAL.items() if k not in ("acmg_classification",)}, "acmgClassification": TEST_SAVED_ACMG_PS3_STRONG_CLASSIFICATION, - "variants": [], + "variantCount": 0, } @@ -1531,9 +1536,10 @@ TEST_SAVED_FUNCTIONAL_CLASSIFICATION_NOT_SPECIFIED = { + "id": 3, "recordType": "FunctionalClassification", **{camelize(k): v for k, v in TEST_FUNCTIONAL_CLASSIFICATION_NOT_SPECIFIED.items()}, - "variants": [], + "variantCount": 0, } diff --git a/tests/routers/test_score_calibrations.py b/tests/routers/test_score_calibrations.py index 8cdbeefe8..5ecbb880c 100644 --- a/tests/routers/test_score_calibrations.py +++ b/tests/routers/test_score_calibrations.py @@ -1716,7 +1716,7 @@ def test_can_create_class_based_score_calibration_form( assert calibration_response["scoreSetUrn"] == score_set["urn"] assert calibration_response["private"] is True assert all( - len(classification["variants"]) == 1 for classification in calibration_response["functionalClassifications"] + classification["variantCount"] == 1 for classification in calibration_response["functionalClassifications"] ) @@ -2685,7 +2685,7 @@ def test_can_modify_score_calibration_to_class_based( calibration_response = response.json() assert calibration_response["urn"] == calibration["urn"] assert all( - len(classification["variants"]) == 1 for classification in calibration_response["functionalClassifications"] + classification["variantCount"] == 1 for classification in calibration_response["functionalClassifications"] ) @@ -3914,3 +3914,586 @@ def test_can_publish_already_published_calibration( assert publish_response_2.status_code == 200 published_calibration_2 = publish_response_2.json() assert published_calibration_2["private"] is False + + +########################################################### +# GET /score-calibrations/{urn}/functional-classifications/{id}/variants +########################################################### + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_get_functional_classification_variants( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + calibration = create_test_score_calibration_in_score_set_via_client( + client, + score_set["urn"], + {**deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED)}, + ) + + fc = calibration["functionalClassifications"][0] + response = client.get( + f"/api/v1/score-calibrations/{calibration['urn']}/functional-classifications/{fc['id']}/variants", + ) + + assert response.status_code == 200 + result = response.json() + assert result["functionalClassificationId"] == fc["id"] + assert isinstance(result["variants"], list) + assert len(result["variants"]) == fc["variantCount"] + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_get_functional_classification_variants_not_found_calibration( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files +): + response = client.get( + "/api/v1/score-calibrations/urn:mavedb:calibration-nonexistent/functional-classifications/1/variants", + ) + assert response.status_code == 404 + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_get_functional_classification_variants_not_found_classification( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + calibration = create_test_score_calibration_in_score_set_via_client( + client, + score_set["urn"], + {**deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED)}, + ) + + response = client.get( + f"/api/v1/score-calibrations/{calibration['urn']}/functional-classifications/99999/variants", + ) + assert response.status_code == 404 + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_anonymous_user_cannot_get_variants_for_private_calibration( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, anonymous_app_overrides +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + calibration = create_test_score_calibration_in_score_set_via_client( + client, + score_set["urn"], + {**deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED)}, + ) + + fc = calibration["functionalClassifications"][0] + with DependencyOverrider(anonymous_app_overrides): + response = client.get( + f"/api/v1/score-calibrations/{calibration['urn']}/functional-classifications/{fc['id']}/variants", + ) + + assert response.status_code == 404 + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_user_without_read_permissions_cannot_get_variants_for_private_calibration( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, extra_user_app_overrides +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + calibration = create_test_score_calibration_in_score_set_via_client( + client, + score_set["urn"], + {**deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED)}, + ) + + fc = calibration["functionalClassifications"][0] + with DependencyOverrider(extra_user_app_overrides): + response = client.get( + f"/api/v1/score-calibrations/{calibration['urn']}/functional-classifications/{fc['id']}/variants", + ) + + assert response.status_code == 404 + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_user_with_read_permissions_can_get_variants_for_private_calibration( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, extra_user_app_overrides +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + calibration = create_test_score_calibration_in_score_set_via_client( + client, + score_set["urn"], + {**deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED)}, + ) + + fc = calibration["functionalClassifications"][0] + + add_contributor( + session, + score_set["urn"], + ScoreSetDbModel, + EXTRA_USER["username"], + EXTRA_USER["first_name"], + EXTRA_USER["last_name"], + ) + + with DependencyOverrider(extra_user_app_overrides): + response = client.get( + f"/api/v1/score-calibrations/{calibration['urn']}/functional-classifications/{fc['id']}/variants", + ) + + assert response.status_code == 200 + result = response.json() + assert result["functionalClassificationId"] == fc["id"] + assert isinstance(result["variants"], list) + assert len(result["variants"]) == fc["variantCount"] + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_admin_user_can_get_variants_for_private_calibration( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, admin_app_overrides +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + calibration = create_test_score_calibration_in_score_set_via_client( + client, + score_set["urn"], + {**deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED)}, + ) + + fc = calibration["functionalClassifications"][0] + + with DependencyOverrider(admin_app_overrides): + response = client.get( + f"/api/v1/score-calibrations/{calibration['urn']}/functional-classifications/{fc['id']}/variants", + ) + + assert response.status_code == 200 + result = response.json() + assert result["functionalClassificationId"] == fc["id"] + assert isinstance(result["variants"], list) + assert len(result["variants"]) == fc["variantCount"] + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_anonymous_user_can_get_variants_for_public_calibration( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, anonymous_app_overrides +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + calibration = create_test_score_calibration_in_score_set_via_client( + client, + score_set["urn"], + {**deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED)}, + ) + publish_test_score_calibration_via_client(client, calibration["urn"]) + + fc = calibration["functionalClassifications"][0] + with DependencyOverrider(anonymous_app_overrides): + response = client.get( + f"/api/v1/score-calibrations/{calibration['urn']}/functional-classifications/{fc['id']}/variants", + ) + + assert response.status_code == 200 + result = response.json() + assert result["functionalClassificationId"] == fc["id"] + assert isinstance(result["variants"], list) + assert len(result["variants"]) == fc["variantCount"] + + +########################################################### +# GET /score-calibrations/{urn}/variants +########################################################### + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_get_calibration_all_variants( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + calibration = create_test_score_calibration_in_score_set_via_client( + client, + score_set["urn"], + {**deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED)}, + ) + + response = client.get( + f"/api/v1/score-calibrations/{calibration['urn']}/variants", + ) + + assert response.status_code == 200 + results = response.json() + assert isinstance(results, list) + assert len(results) == len(calibration["functionalClassifications"]) + for i, fc_variants in enumerate(results): + assert fc_variants["functionalClassificationId"] == calibration["functionalClassifications"][i]["id"] + assert isinstance(fc_variants["variants"], list) + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_get_calibration_all_variants_not_found( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files +): + response = client.get( + "/api/v1/score-calibrations/urn:mavedb:calibration-nonexistent/variants", + ) + assert response.status_code == 404 + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_anonymous_user_cannot_get_all_variants_for_private_calibration( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, anonymous_app_overrides +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + calibration = create_test_score_calibration_in_score_set_via_client( + client, + score_set["urn"], + {**deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED)}, + ) + + with DependencyOverrider(anonymous_app_overrides): + response = client.get( + f"/api/v1/score-calibrations/{calibration['urn']}/variants", + ) + + assert response.status_code == 404 + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_user_without_read_permissions_cannot_get_all_variants_for_private_calibration( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, extra_user_app_overrides +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + calibration = create_test_score_calibration_in_score_set_via_client( + client, + score_set["urn"], + {**deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED)}, + ) + + with DependencyOverrider(extra_user_app_overrides): + response = client.get( + f"/api/v1/score-calibrations/{calibration['urn']}/variants", + ) + + assert response.status_code == 404 + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_user_with_read_permissions_can_get_all_variants_for_private_calibration( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, extra_user_app_overrides +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + calibration = create_test_score_calibration_in_score_set_via_client( + client, + score_set["urn"], + {**deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED)}, + ) + + add_contributor( + session, + score_set["urn"], + ScoreSetDbModel, + EXTRA_USER["username"], + EXTRA_USER["first_name"], + EXTRA_USER["last_name"], + ) + + with DependencyOverrider(extra_user_app_overrides): + response = client.get( + f"/api/v1/score-calibrations/{calibration['urn']}/variants", + ) + + assert response.status_code == 200 + results = response.json() + assert isinstance(results, list) + assert len(results) == len(calibration["functionalClassifications"]) + for i, fc_variants in enumerate(results): + assert fc_variants["functionalClassificationId"] == calibration["functionalClassifications"][i]["id"] + assert isinstance(fc_variants["variants"], list) + assert len(fc_variants["variants"]) == calibration["functionalClassifications"][i]["variantCount"] + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_admin_user_can_get_all_variants_for_private_calibration( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, admin_app_overrides +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + calibration = create_test_score_calibration_in_score_set_via_client( + client, + score_set["urn"], + {**deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED)}, + ) + + with DependencyOverrider(admin_app_overrides): + response = client.get( + f"/api/v1/score-calibrations/{calibration['urn']}/variants", + ) + + assert response.status_code == 200 + results = response.json() + assert isinstance(results, list) + assert len(results) == len(calibration["functionalClassifications"]) + for i, fc_variants in enumerate(results): + assert fc_variants["functionalClassificationId"] == calibration["functionalClassifications"][i]["id"] + assert isinstance(fc_variants["variants"], list) + assert len(fc_variants["variants"]) == calibration["functionalClassifications"][i]["variantCount"] + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_anonymous_user_can_get_all_variants_for_public_calibration( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, anonymous_app_overrides +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + calibration = create_test_score_calibration_in_score_set_via_client( + client, + score_set["urn"], + {**deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED)}, + ) + publish_test_score_calibration_via_client(client, calibration["urn"]) + + with DependencyOverrider(anonymous_app_overrides): + response = client.get( + f"/api/v1/score-calibrations/{calibration['urn']}/variants", + ) + + assert response.status_code == 200 + results = response.json() + assert isinstance(results, list) + assert len(results) == len(calibration["functionalClassifications"]) + for i, fc_variants in enumerate(results): + assert fc_variants["functionalClassificationId"] == calibration["functionalClassifications"][i]["id"] + assert isinstance(fc_variants["variants"], list) + assert len(fc_variants["variants"]) == calibration["functionalClassifications"][i]["variantCount"] diff --git a/tests/routers/test_score_set.py b/tests/routers/test_score_set.py index 13bd7ce73..f22a87824 100644 --- a/tests/routers/test_score_set.py +++ b/tests/routers/test_score_set.py @@ -226,6 +226,15 @@ def test_create_score_set_with_score_calibration(client, mock_publication_fetch, expected_calibration["private"] = True expected_calibration["primary"] = False expected_calibration["investigatorProvided"] = True + # Match functional classifications by a stable identifier (label). + response_fcs_by_label = { + fc["label"]: fc for fc in response_data["scoreCalibrations"][0]["functionalClassifications"] + } + for expected_fc in expected_calibration["functionalClassifications"]: + label = expected_fc["label"] + if label in response_fcs_by_label: + expected_fc["id"] = response_fcs_by_label[label]["id"] + expected_response["scoreCalibrations"] = [expected_calibration] assert sorted(expected_response.keys()) == sorted(response_data.keys()) From f8b1faab286ee06c33105773ea73cf29dba06820 Mon Sep 17 00:00:00 2001 From: Benjamin Capodanno Date: Mon, 23 Feb 2026 13:11:42 -0800 Subject: [PATCH 2/5] fix: simplify permission check logic gates in collections.py --- src/mavedb/routers/collections.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/mavedb/routers/collections.py b/src/mavedb/routers/collections.py index 48f389641..2c84f79d1 100644 --- a/src/mavedb/routers/collections.py +++ b/src/mavedb/routers/collections.py @@ -299,19 +299,15 @@ async def update_collection( assert_permission(user_data, item, Action.UPDATE) - # Editors may update metadata, but not all editors can publish (which is just setting private to public). - if item.private and not item_update.private: + # Ensure users have permission to make the specific changes they are attempting to make. + if item_update.private is not None and item.private != item_update.private: assert_permission(user_data, item, Action.PUBLISH) - # Unpublishing requires the same permissions as publishing. - if not item.private and item_update.private: - assert_permission(user_data, item, Action.PUBLISH) - - if item_update.badge_name: + if item_update.badge_name is not None and item.badge_name != item_update.badge_name: assert_permission(user_data, item, Action.ADD_BADGE) # Only access fields set by the user. Note the value of set fields will be updated even if the value is None - pairs = {k: v for k, v in vars(item_update).items() if k in item_update.__fields_set__} + pairs = {k: v for k, v in vars(item_update).items() if k in item_update.model_fields_set} for var, value in pairs.items(): # vars(item_update).items(): setattr(item, var, value) From 5efa5fa124fa169f5496919c4fe92a22400aa86a Mon Sep 17 00:00:00 2001 From: Benjamin Capodanno Date: Mon, 23 Feb 2026 13:16:15 -0800 Subject: [PATCH 3/5] refactor: add ADD_CALIBRATION permission action for score sets --- src/mavedb/lib/permissions/actions.py | 1 + src/mavedb/lib/permissions/score_set.py | 42 +++++++++++++++- tests/lib/permissions/test_collection.py | 1 + tests/lib/permissions/test_experiment.py | 1 + tests/lib/permissions/test_experiment_set.py | 1 + .../lib/permissions/test_score_calibration.py | 1 + tests/lib/permissions/test_score_set.py | 49 +++++++++++++++++++ tests/lib/permissions/test_user.py | 1 + 8 files changed, 96 insertions(+), 1 deletion(-) diff --git a/src/mavedb/lib/permissions/actions.py b/src/mavedb/lib/permissions/actions.py index cc3a95596..f2402479c 100644 --- a/src/mavedb/lib/permissions/actions.py +++ b/src/mavedb/lib/permissions/actions.py @@ -13,3 +13,4 @@ class Action(Enum): PUBLISH = "publish" ADD_BADGE = "add_badge" CHANGE_RANK = "change_rank" + ADD_CALIBRATION = "add_calibration" diff --git a/src/mavedb/lib/permissions/score_set.py b/src/mavedb/lib/permissions/score_set.py index 6e5806694..ba35eaf91 100644 --- a/src/mavedb/lib/permissions/score_set.py +++ b/src/mavedb/lib/permissions/score_set.py @@ -20,7 +20,7 @@ def has_permission(user_data: Optional[UserData], entity: ScoreSet, action: Acti Args: user_data: The user's authentication data and roles. None for anonymous users. entity: The ScoreSet entity to check permissions for. - action: The action to be performed (READ, UPDATE, DELETE, PUBLISH, SET_SCORES). + action: The action to be performed (READ, UPDATE, DELETE, PUBLISH, SET_SCORES, ADD_CALIBRATION). Returns: PermissionResponse: Contains permission result, HTTP status code, and message. @@ -54,6 +54,7 @@ def has_permission(user_data: Optional[UserData], entity: ScoreSet, action: Acti Action.DELETE: _handle_delete_action, Action.PUBLISH: _handle_publish_action, Action.SET_SCORES: _handle_set_scores_action, + Action.ADD_CALIBRATION: _handle_add_calibration_action, } if action not in handlers: @@ -253,3 +254,42 @@ def _handle_set_scores_action( return PermissionResponse(True) return deny_action_for_entity(entity, private, user_data, user_is_contributor or user_is_owner, "score set") + + +def _handle_add_calibration_action( + user_data: Optional[UserData], + entity: ScoreSet, + private: bool, + user_is_owner: bool, + user_is_contributor: bool, + active_roles: list[UserRole], +) -> PermissionResponse: + """ + Handle ADD_CALIBRATION action permission check for ScoreSet entities. + + Any authenticated user may add a calibration to a published ScoreSet. + For private ScoreSets, only owners, contributors, and admins may add calibrations. + + Args: + user_data: The user's authentication data. + entity: The ScoreSet entity being accessed. + private: Whether the ScoreSet is private. + user_is_owner: Whether the user owns the ScoreSet. + user_is_contributor: Whether the user is a contributor to the ScoreSet. + active_roles: List of the user's active roles. + + Returns: + PermissionResponse: Permission result with appropriate HTTP status. + """ + ## Allow add calibration access under the following conditions: + # Any authenticated user may add a calibration to a published score set. + if not private and user_data is not None: + return PermissionResponse(True) + # The owner or contributors may add a calibration to a private score set. + if user_is_owner or user_is_contributor: + return PermissionResponse(True) + # Admins may add a calibration to any score set. + if roles_permitted(active_roles, [UserRole.admin]): + return PermissionResponse(True) + + return deny_action_for_entity(entity, private, user_data, user_is_contributor or user_is_owner, "score set") diff --git a/tests/lib/permissions/test_collection.py b/tests/lib/permissions/test_collection.py index ab0593bbc..b9852b611 100644 --- a/tests/lib/permissions/test_collection.py +++ b/tests/lib/permissions/test_collection.py @@ -40,6 +40,7 @@ Action.LOOKUP, Action.CHANGE_RANK, Action.SET_SCORES, + Action.ADD_CALIBRATION, ] COLLECTION_ROLE_MAP = { diff --git a/tests/lib/permissions/test_experiment.py b/tests/lib/permissions/test_experiment.py index b4e5dc240..1ea5a6399 100644 --- a/tests/lib/permissions/test_experiment.py +++ b/tests/lib/permissions/test_experiment.py @@ -35,6 +35,7 @@ Action.CHANGE_RANK, Action.SET_SCORES, Action.PUBLISH, + Action.ADD_CALIBRATION, ] diff --git a/tests/lib/permissions/test_experiment_set.py b/tests/lib/permissions/test_experiment_set.py index adf109fb7..9e7c42b20 100644 --- a/tests/lib/permissions/test_experiment_set.py +++ b/tests/lib/permissions/test_experiment_set.py @@ -35,6 +35,7 @@ Action.CHANGE_RANK, Action.SET_SCORES, Action.PUBLISH, + Action.ADD_CALIBRATION, ] diff --git a/tests/lib/permissions/test_score_calibration.py b/tests/lib/permissions/test_score_calibration.py index a33843680..051209063 100644 --- a/tests/lib/permissions/test_score_calibration.py +++ b/tests/lib/permissions/test_score_calibration.py @@ -36,6 +36,7 @@ Action.LOOKUP, Action.ADD_BADGE, Action.SET_SCORES, + Action.ADD_CALIBRATION, ] diff --git a/tests/lib/permissions/test_score_set.py b/tests/lib/permissions/test_score_set.py index 2349359f7..7198fba97 100644 --- a/tests/lib/permissions/test_score_set.py +++ b/tests/lib/permissions/test_score_set.py @@ -11,6 +11,7 @@ from mavedb.lib.permissions.actions import Action from mavedb.lib.permissions.score_set import ( + _handle_add_calibration_action, _handle_delete_action, _handle_publish_action, _handle_read_action, @@ -27,6 +28,7 @@ Action.DELETE: _handle_delete_action, Action.SET_SCORES: _handle_set_scores_action, Action.PUBLISH: _handle_publish_action, + Action.ADD_CALIBRATION: _handle_add_calibration_action, } SCORE_SET_UNSUPPORTED_ACTIONS: List[Action] = [ @@ -324,3 +326,50 @@ def test_handle_publish_action(self, test_case: PermissionTest, entity_helper: E assert result.permitted == test_case.should_be_permitted if not test_case.should_be_permitted and test_case.expected_code: assert result.http_code == test_case.expected_code + + +class TestScoreSetAddCalibrationActionHandler: + """Test the _handle_add_calibration_action helper function directly.""" + + @pytest.mark.parametrize( + "test_case", + [ + # Admins can add calibrations to any ScoreSet + PermissionTest("ScoreSet", "private", "admin", Action.ADD_CALIBRATION, True), + PermissionTest("ScoreSet", "published", "admin", Action.ADD_CALIBRATION, True), + # Owners can add calibrations to any ScoreSet they own + PermissionTest("ScoreSet", "private", "owner", Action.ADD_CALIBRATION, True), + PermissionTest("ScoreSet", "published", "owner", Action.ADD_CALIBRATION, True), + # Contributors can add calibrations to any ScoreSet they contribute to + PermissionTest("ScoreSet", "private", "contributor", Action.ADD_CALIBRATION, True), + PermissionTest("ScoreSet", "published", "contributor", Action.ADD_CALIBRATION, True), + # Mappers cannot add calibrations to private ScoreSets, but can to published + PermissionTest("ScoreSet", "private", "mapper", Action.ADD_CALIBRATION, False, 404), + PermissionTest("ScoreSet", "published", "mapper", Action.ADD_CALIBRATION, True), + # Other users cannot add calibrations to private ScoreSets, but can to published + PermissionTest("ScoreSet", "private", "other_user", Action.ADD_CALIBRATION, False, 404), + PermissionTest("ScoreSet", "published", "other_user", Action.ADD_CALIBRATION, True), + # Anonymous users cannot add calibrations to any ScoreSet + PermissionTest("ScoreSet", "private", "anonymous", Action.ADD_CALIBRATION, False, 404), + PermissionTest("ScoreSet", "published", "anonymous", Action.ADD_CALIBRATION, False, 401), + ], + ids=lambda tc: f"{tc.user_type}_{tc.entity_state}_{tc.action.value}_{'permitted' if tc.should_be_permitted else 'denied'}", + ) + def test_handle_add_calibration_action(self, test_case: PermissionTest, entity_helper: EntityTestHelper) -> None: + """Test _handle_add_calibration_action helper function directly.""" + assert test_case.entity_state is not None, "ScoreSet tests must have entity_state" + score_set = entity_helper.create_score_set(test_case.entity_state) + user_data = entity_helper.create_user_data(test_case.user_type) + + private = test_case.entity_state == "private" + user_is_owner = test_case.user_type == "owner" + user_is_contributor = test_case.user_type == "contributor" + active_roles = user_data.active_roles if user_data else [] + + result = _handle_add_calibration_action( + user_data, score_set, private, user_is_owner, user_is_contributor, active_roles + ) + + assert result.permitted == test_case.should_be_permitted + if not test_case.should_be_permitted and test_case.expected_code: + assert result.http_code == test_case.expected_code diff --git a/tests/lib/permissions/test_user.py b/tests/lib/permissions/test_user.py index b4efa876b..d68f96964 100644 --- a/tests/lib/permissions/test_user.py +++ b/tests/lib/permissions/test_user.py @@ -35,6 +35,7 @@ Action.CHANGE_RANK, Action.SET_SCORES, Action.PUBLISH, + Action.ADD_CALIBRATION, ] From a96cab7eef17d41e4caf51bc3466c8d0a388bba6 Mon Sep 17 00:00:00 2001 From: Benjamin Capodanno Date: Mon, 23 Feb 2026 13:17:28 -0800 Subject: [PATCH 4/5] feat: allow independent users to create score calibrations on published score sets --- .../lib/permissions/score_calibration.py | 10 +- src/mavedb/routers/score_calibrations.py | 31 +- src/mavedb/view_models/score_calibration.py | 37 ++ tests/helpers/constants.py | 14 +- tests/lib/conftest.py | 31 +- .../lib/permissions/test_score_calibration.py | 39 +- tests/routers/test_score_calibrations.py | 417 +++++++++++++++++- tests/view_models/test_score_calibration.py | 32 ++ 8 files changed, 550 insertions(+), 61 deletions(-) diff --git a/src/mavedb/lib/permissions/score_calibration.py b/src/mavedb/lib/permissions/score_calibration.py index 4c068c7cf..1aa711582 100644 --- a/src/mavedb/lib/permissions/score_calibration.py +++ b/src/mavedb/lib/permissions/score_calibration.py @@ -266,11 +266,13 @@ def _handle_change_rank_action( # System admins may change the rank of any ScoreCalibration. if roles_permitted(active_roles, [UserRole.admin]): return PermissionResponse(True) - # Owners may change the rank of their own ScoreCalibration. - if user_is_owner: + + # Score set contributors may always change the rank of calibrations on their score set. + if user_is_contributor_to_score_set: return PermissionResponse(True) - # If the calibration is investigator provided, contributors to the ScoreSet may change its rank. - if entity.investigator_provided and user_is_contributor_to_score_set: + # Owners may change the rank of their own investigator-provided calibrations. + # Community calibration owners may not — the score set team controls ranking of community contributions. + if entity.investigator_provided and user_is_owner: return PermissionResponse(True) user_may_view_private = user_is_owner or (entity.investigator_provided and user_is_contributor_to_score_set) diff --git a/src/mavedb/routers/score_calibrations.py b/src/mavedb/routers/score_calibrations.py index 7fc80dd3f..036eabaa2 100644 --- a/src/mavedb/routers/score_calibrations.py +++ b/src/mavedb/routers/score_calibrations.py @@ -6,7 +6,7 @@ from mavedb import deps from mavedb.lib.authentication import get_current_user -from mavedb.lib.authorization import require_current_user +from mavedb.lib.authorization import require_current_user_with_email from mavedb.lib.flexible_model_loader import json_or_form_loader from mavedb.lib.logging import LoggedRoute from mavedb.lib.logging.context import ( @@ -218,7 +218,7 @@ async def create_score_calibration_route( description=f"CSV file containing variant classifications. This file must contain two columns: '{calibration_variant_column_name}' and '{calibration_class_column_name}'.", ), db: Session = Depends(deps.get_db), - user_data: UserData = Depends(require_current_user), + user_data: UserData = Depends(require_current_user_with_email), ) -> ScoreCalibration: """ Create a new score calibration. @@ -261,9 +261,11 @@ async def create_score_calibration_route( ## Requirements - The score set URN must be provided to associate the calibration with an existing score set - - User must have write permission on the associated score set + - User must have an email address associated with their account - If uploading a classes_file, it must be a valid CSV with variant classification data - + - User must have ADD_CALIBRATION permission on the score set (any authenticated user for + published sets; contributors/owners/admins for private sets) + ## File Upload Details The `classes_file` parameter accepts CSV files containing variant classification data. The file should have appropriate headers and contain columns for variant urns and class names. @@ -281,9 +283,7 @@ async def create_score_calibration_route( logger.debug("ScoreSet not found", extra=logging_context()) raise HTTPException(status_code=404, detail=f"score set with URN '{calibration.score_set_urn}' not found") - # TODO#539: Allow any authenticated user to upload a score calibration for a score set, not just those with - # permission to update the score set itself. - assert_permission(user_data, score_set, Action.UPDATE) + assert_permission(user_data, score_set, Action.ADD_CALIBRATION) if calibration.class_based and not classes_file: raise HTTPException( @@ -367,7 +367,7 @@ async def modify_score_calibration_route( description=f"CSV file containing variant classifications. This file must contain two columns: '{calibration_variant_column_name}' and '{calibration_class_column_name}'.", ), db: Session = Depends(deps.get_db), - user_data: UserData = Depends(require_current_user), + user_data: UserData = Depends(require_current_user_with_email), ) -> ScoreCalibration: """ Modify an existing score calibration by its URN. @@ -409,8 +409,9 @@ async def modify_score_calibration_route( ``` ## Requirements + - User must have an email address associated with their account - User must have update permission on the calibration - - If changing the score_set_urn, user must have permission on the new score set + - If changing the score_set_urn, user must have ADD_CALIBRATION permission on the target score set - All fields in the update are optional - only provided fields will be modified ## File Upload Details @@ -435,9 +436,7 @@ async def modify_score_calibration_route( status_code=404, detail=f"score set with URN '{calibration_update.score_set_urn}' not found" ) - # TODO#539: Allow any authenticated user to upload a score calibration for a score set, not just those with - # permission to update the score set itself. - assert_permission(user_data, score_set_update, Action.UPDATE) + assert_permission(user_data, score_set_update, Action.ADD_CALIBRATION) else: score_set_update = None @@ -505,7 +504,7 @@ async def delete_score_calibration_route( *, urn: str, db: Session = Depends(deps.get_db), - user_data: UserData = Depends(require_current_user), + user_data: UserData = Depends(require_current_user_with_email), ) -> None: """ Delete an existing score calibration by its URN. @@ -542,7 +541,7 @@ async def promote_score_calibration_to_primary_route( False, description="Whether to demote any existing primary calibration", alias="demoteExistingPrimary" ), db: Session = Depends(deps.get_db), - user_data: UserData = Depends(require_current_user), + user_data: UserData = Depends(require_current_user_with_email), ) -> ScoreCalibration: """ Promote a score calibration to be the primary calibration for its associated score set. @@ -608,7 +607,7 @@ def demote_score_calibration_from_primary_route( *, urn: str, db: Session = Depends(deps.get_db), - user_data: UserData = Depends(require_current_user), + user_data: UserData = Depends(require_current_user_with_email), ) -> ScoreCalibration: """ Demote a score calibration from being the primary calibration for its associated score set. @@ -647,7 +646,7 @@ def publish_score_calibration_route( *, urn: str, db: Session = Depends(deps.get_db), - user_data: UserData = Depends(require_current_user), + user_data: UserData = Depends(require_current_user_with_email), ) -> ScoreCalibration: """ Publish a score calibration, making it publicly visible. diff --git a/src/mavedb/view_models/score_calibration.py b/src/mavedb/view_models/score_calibration.py index b4fb81735..abcea58f2 100644 --- a/src/mavedb/view_models/score_calibration.py +++ b/src/mavedb/view_models/score_calibration.py @@ -436,6 +436,43 @@ class ScoreCalibrationModify(ScoreCalibrationBase): classification_sources: Sequence[PublicationIdentifierCreate] method_sources: Sequence[PublicationIdentifierCreate] + # TODO#668: Move this validator to ScoreCalibrationBase once legacy calibrations have been + # backfilled with publication associations. Currently on the write model only so that existing + # calibrations without publications can still be serialized for API read responses. + @model_validator(mode="after") + def functional_classifications_require_publication_sources( + self: "ScoreCalibrationModify", + ) -> "ScoreCalibrationModify": + """Enforce publication source requirements when functional classifications are present. + + Calibrations with functional classifications must cite method and threshold publications. + If any classification includes ACMG evidence, classification publications are also required. + Baseline-score-only calibrations (no functional classifications) are exempt. + """ + if not self.functional_classifications: + return self + + if not self.method_sources: + raise ValidationError( + "Calibrations with functional classifications must provide at least one method source publication.", + custom_loc=["body", "methodSources"], + ) + + if not self.threshold_sources: + raise ValidationError( + "Calibrations with functional classifications must provide at least one threshold source publication.", + custom_loc=["body", "thresholdSources"], + ) + + has_acmg_classification = any(fc.acmg_classification is not None for fc in self.functional_classifications) + if has_acmg_classification and not self.classification_sources: + raise ValidationError( + "Calibrations with ACMG classifications must provide at least one classification source publication.", + custom_loc=["body", "classificationSources"], + ) + + return self + class ScoreCalibrationCreate(ScoreCalibrationModify): """Model used to create a new score calibration.""" diff --git a/tests/helpers/constants.py b/tests/helpers/constants.py index 3ab620b50..890396954 100644 --- a/tests/helpers/constants.py +++ b/tests/helpers/constants.py @@ -1578,9 +1578,9 @@ TEST_FUNCTIONAL_RANGE_ABNORMAL, TEST_FUNCTIONAL_RANGE_NOT_SPECIFIED, ], - "threshold_sources": [], - "classification_sources": [], - "method_sources": [], + "threshold_sources": [{"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}], + "classification_sources": [{"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}], + "method_sources": [{"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}], "calibration_metadata": {}, } @@ -1670,8 +1670,8 @@ TEST_FUNCTIONAL_RANGE_ABNORMAL, ], "threshold_sources": [{"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}], - "classification_sources": [], - "method_sources": [], + "classification_sources": [{"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}], + "method_sources": [{"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}], "calibration_metadata": {}, } @@ -1687,8 +1687,8 @@ TEST_SAVED_FUNCTIONAL_RANGE_ABNORMAL, ], "thresholdSources": [SAVED_PUBMED_PUBLICATION], - "classificationSources": [], - "methodSources": [], + "classificationSources": [SAVED_PUBMED_PUBLICATION], + "methodSources": [SAVED_PUBMED_PUBLICATION], "id": 2, "investigatorProvided": True, "primary": False, diff --git a/tests/lib/conftest.py b/tests/lib/conftest.py index c281f5eb0..c55c79aa9 100644 --- a/tests/lib/conftest.py +++ b/tests/lib/conftest.py @@ -2,12 +2,14 @@ from datetime import datetime from pathlib import Path from shutil import copytree +from types import SimpleNamespace from unittest import mock import pytest from humps import decamelize from mavedb.models.acmg_classification import ACMGClassification +from mavedb.models.enums.score_calibration_relation import ScoreCalibrationRelation from mavedb.models.enums.user_role import UserRole from mavedb.models.experiment import Experiment from mavedb.models.experiment_set import ExperimentSet @@ -183,6 +185,27 @@ def mock_experiment(): return experiment +def _build_mock_publication_associations(saved_calibration_dict: dict) -> list: + """Build mock publication identifier associations from a saved calibration dict. + + The SavedScoreCalibration model_validator(mode="before") transforms + publication_identifier_associations into threshold/classification/method source + fields. Mock calibrations need realistic associations so the transformer produces + non-empty source lists (required by the publication source validator). + """ + relation_map = { + "thresholdSources": ScoreCalibrationRelation.threshold, + "classificationSources": ScoreCalibrationRelation.classification, + "methodSources": ScoreCalibrationRelation.method, + } + associations = [] + for field, relation in relation_map.items(): + for pub_dict in saved_calibration_dict.get(field, []): + pub = SimpleNamespace(**{decamelize(k): v for k, v in pub_dict.items()}) + associations.append(SimpleNamespace(relation=relation, publication=pub)) + return associations + + @pytest.fixture def mock_functional_calibration(mock_user): calibration = mock.Mock(spec=ScoreCalibration) @@ -192,7 +215,9 @@ def mock_functional_calibration(mock_user): calibration.primary = True # Ensure functional calibration is primary for tests calibration.notes = None - calibration.publication_identifier_associations = [] + calibration.publication_identifier_associations = _build_mock_publication_associations( + TEST_SAVED_BRNICH_SCORE_CALIBRATION_RANGE_BASED + ) calibration.created_by = mock_user calibration.modified_by = mock_user return calibration @@ -207,7 +232,9 @@ def mock_pathogenicity_calibration(mock_user): calibration.primary = True # Ensure pathogenicity calibration is primary for tests calibration.notes = None - calibration.publication_identifier_associations = [] + calibration.publication_identifier_associations = _build_mock_publication_associations( + TEST_SAVED_PATHOGENICITY_SCORE_CALIBRATION + ) calibration.created_by = mock_user calibration.modified_by = mock_user return calibration diff --git a/tests/lib/permissions/test_score_calibration.py b/tests/lib/permissions/test_score_calibration.py index 051209063..0c94b8e21 100644 --- a/tests/lib/permissions/test_score_calibration.py +++ b/tests/lib/permissions/test_score_calibration.py @@ -447,43 +447,33 @@ class TestScoreCalibrationChangeRankActionHandler: PermissionTest( "ScoreCalibration", "published", "admin", Action.CHANGE_RANK, True, investigator_provided=False ), - # Owners: Can change rank of their own ScoreCalibrations regardless of state or investigator_provided flag + # Owners: Can change rank of their own investigator-provided ScoreCalibrations, but NOT community-provided ones + # (community calibrations should be promoted by the score set team, not the calibration creator) PermissionTest( "ScoreCalibration", "private", "owner", Action.CHANGE_RANK, True, investigator_provided=True ), PermissionTest( - "ScoreCalibration", "private", "owner", Action.CHANGE_RANK, True, investigator_provided=False + "ScoreCalibration", "private", "owner", Action.CHANGE_RANK, False, 403, investigator_provided=False ), PermissionTest( "ScoreCalibration", "published", "owner", Action.CHANGE_RANK, True, investigator_provided=True ), PermissionTest( - "ScoreCalibration", "published", "owner", Action.CHANGE_RANK, True, investigator_provided=False + "ScoreCalibration", "published", "owner", Action.CHANGE_RANK, False, 403, investigator_provided=False ), - # Contributors to associated ScoreSet: Can change rank of investigator-provided ScoreCalibrations (private or published), but cannot change rank of community-provided ones + # Contributors to associated ScoreSet: Can change rank of both investigator-provided and community-provided + # ScoreCalibrations (the score set team controls which calibration becomes primary) PermissionTest( "ScoreCalibration", "private", "contributor", Action.CHANGE_RANK, True, investigator_provided=True ), PermissionTest( - "ScoreCalibration", - "private", - "contributor", - Action.CHANGE_RANK, - False, - 404, - investigator_provided=False, + "ScoreCalibration", "private", "contributor", Action.CHANGE_RANK, True, investigator_provided=False ), PermissionTest( "ScoreCalibration", "published", "contributor", Action.CHANGE_RANK, True, investigator_provided=True ), PermissionTest( - "ScoreCalibration", - "published", - "contributor", - Action.CHANGE_RANK, - False, - 403, - investigator_provided=False, + "ScoreCalibration", "published", "contributor", Action.CHANGE_RANK, True, investigator_provided=False ), # Other users: Cannot change rank of any ScoreCalibrations PermissionTest( @@ -510,6 +500,19 @@ class TestScoreCalibrationChangeRankActionHandler: 403, investigator_provided=False, ), + # Mappers: Cannot change rank of any ScoreCalibrations + PermissionTest( + "ScoreCalibration", "private", "mapper", Action.CHANGE_RANK, False, 404, investigator_provided=True + ), + PermissionTest( + "ScoreCalibration", "private", "mapper", Action.CHANGE_RANK, False, 404, investigator_provided=False + ), + PermissionTest( + "ScoreCalibration", "published", "mapper", Action.CHANGE_RANK, False, 403, investigator_provided=True + ), + PermissionTest( + "ScoreCalibration", "published", "mapper", Action.CHANGE_RANK, False, 403, investigator_provided=False + ), # Anonymous users: Cannot change rank of any ScoreCalibrations PermissionTest( "ScoreCalibration", "private", "anonymous", Action.CHANGE_RANK, False, 404, investigator_provided=True diff --git a/tests/routers/test_score_calibrations.py b/tests/routers/test_score_calibrations.py index 5ecbb880c..4d7b109f0 100644 --- a/tests/routers/test_score_calibrations.py +++ b/tests/routers/test_score_calibrations.py @@ -1372,7 +1372,7 @@ def test_cannot_create_score_calibration_when_score_set_not_owned_by_user( ], indirect=["mock_publication_fetch"], ) -def test_cannot_create_score_calibration_in_public_score_set_when_score_set_not_owned_by_user( +def test_can_create_score_calibration_in_public_score_set_as_non_contributor( client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, extra_user_app_overrides ): experiment = create_experiment(client) @@ -1396,9 +1396,11 @@ def test_cannot_create_score_calibration_in_public_score_set_when_score_set_not_ }, ) - assert response.status_code == 403 - error = response.json() - assert f"insufficient permissions on score set with URN '{score_set['urn']}'" in error["detail"] + assert response.status_code == 200 + calibration_response = response.json() + assert calibration_response["scoreSetUrn"] == score_set["urn"] + assert calibration_response["investigatorProvided"] is False + assert calibration_response["private"] is True @pytest.mark.parametrize( @@ -2102,9 +2104,10 @@ def test_cannot_update_score_calibration_in_published_score_set_when_score_set_n }, ) - assert response.status_code == 403 - error = response.json() - assert f"insufficient permissions on score set with URN '{score_set['urn']}'" in error["detail"] + # The calibration is private and investigator-provided, so the extra user cannot see it. + # With published score sets open to any authenticated user, the denial comes from the + # calibration-level permission check (hidden as 404) rather than the score set check. + assert response.status_code == 404 @pytest.mark.parametrize( @@ -3425,9 +3428,11 @@ def test_can_promote_to_primary_if_primary_exists_when_demote_existing_is_true( ], indirect=["mock_publication_fetch"], ) -def test_cannot_promote_to_primary_with_demote_existing_flag_if_user_does_not_have_change_rank_permissions_on_existing_primary( +def test_score_set_owner_can_promote_to_primary_with_demote_existing_flag_on_community_calibration( client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, admin_app_overrides ): + """Score set owners can change rank of community calibrations, so they can promote + their own calibration while demoting an admin-created community primary.""" experiment = create_experiment(client) score_set = create_seq_score_set_with_mapped_variants( client, @@ -3449,16 +3454,15 @@ def test_cannot_promote_to_primary_with_demote_existing_flag_if_user_does_not_ha f"/api/v1/score-calibrations/{secondary_calibration['urn']}/promote-to-primary?demoteExistingPrimary=true", ) - assert response.status_code == 403 - promotion_response = response.json() - assert "insufficient permissions on score calibration with URN" in promotion_response["detail"] - - # verify the previous primary is still primary + assert response.status_code == 200 + promoted = response.json() + assert promoted["primary"] is True + # verify the previous primary was demoted get_response = client.get(f"/api/v1/score-calibrations/{primary_calibration['urn']}") assert get_response.status_code == 200 previous_primary = get_response.json() - assert previous_primary["primary"] is True + assert previous_primary["primary"] is False ########################################################### @@ -4497,3 +4501,388 @@ def test_anonymous_user_can_get_all_variants_for_public_calibration( assert fc_variants["functionalClassificationId"] == calibration["functionalClassifications"][i]["id"] assert isinstance(fc_variants["variants"], list) assert len(fc_variants["variants"]) == calibration["functionalClassifications"][i]["variantCount"] + + +########################################################### +# Independent calibration creation and publication source validation +########################################################### + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_cannot_create_score_calibration_in_private_score_set_as_non_contributor( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, extra_user_app_overrides +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + with DependencyOverrider(extra_user_app_overrides): + response = client.post( + "/api/v1/score-calibrations", + json={ + "scoreSetUrn": score_set["urn"], + **deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED), + }, + ) + + assert response.status_code == 404 + error = response.json() + assert f"score set with URN '{score_set['urn']}' not found" in error["detail"] + + +def test_cannot_create_score_calibration_with_classifications_and_no_method_sources(client, setup_router_db): + response = client.post( + "/api/v1/score-calibrations", + json={ + "scoreSetUrn": "urn:mavedb:00000001-a-1", + "title": "Test", + "functionalClassifications": [ + { + "label": "normal", + "functionalClassification": "normal", + "range": [0.0, 5.0], + } + ], + "thresholdSources": [{"identifier": TEST_PUBMED_IDENTIFIER, "dbName": "PubMed"}], + "classificationSources": [], + "methodSources": [], + }, + ) + + assert response.status_code == 422 + + +def test_cannot_create_score_calibration_with_classifications_and_no_threshold_sources(client, setup_router_db): + response = client.post( + "/api/v1/score-calibrations", + json={ + "scoreSetUrn": "urn:mavedb:00000001-a-1", + "title": "Test", + "functionalClassifications": [ + { + "label": "normal", + "functionalClassification": "normal", + "range": [0.0, 5.0], + } + ], + "thresholdSources": [], + "classificationSources": [], + "methodSources": [{"identifier": TEST_PUBMED_IDENTIFIER, "dbName": "PubMed"}], + }, + ) + + assert response.status_code == 422 + + +def test_cannot_create_score_calibration_with_acmg_classification_and_no_classification_sources( + client, setup_router_db +): + response = client.post( + "/api/v1/score-calibrations", + json={ + "scoreSetUrn": "urn:mavedb:00000001-a-1", + "title": "Test", + "functionalClassifications": [ + { + "label": "normal", + "functionalClassification": "normal", + "range": [0.0, 5.0], + "acmgClassification": {"criterion": "BS3", "evidenceStrength": "STRONG"}, + "oddspathsRatio": 0.001, + } + ], + "thresholdSources": [{"identifier": TEST_PUBMED_IDENTIFIER, "dbName": "PubMed"}], + "classificationSources": [], + "methodSources": [{"identifier": TEST_PUBMED_IDENTIFIER, "dbName": "PubMed"}], + }, + ) + + assert response.status_code == 422 + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_can_create_baseline_only_calibration_without_publication_sources( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + response = client.post( + "/api/v1/score-calibrations", + json={ + "scoreSetUrn": score_set["urn"], + "title": "Baseline Only", + "baselineScore": 1.0, + "baselineScoreDescription": "Wild-type score", + "thresholdSources": [], + "classificationSources": [], + "methodSources": [], + }, + ) + + assert response.status_code == 200 + calibration_response = response.json() + assert calibration_response["scoreSetUrn"] == score_set["urn"] + + +########################################################### +# Community calibration promotion restrictions +########################################################### + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_community_calibration_owner_cannot_promote_to_primary( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, extra_user_app_overrides +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + with patch.object(ArqRedis, "enqueue_job", return_value=None): + score_set = publish_score_set(client, score_set["urn"]) + + # Create community calibration as extra user (non-contributor) + with DependencyOverrider(extra_user_app_overrides): + calibration = create_test_score_calibration_in_score_set_via_client( + client, score_set["urn"], deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED) + ) + assert calibration["investigatorProvided"] is False + + # Publish it + publish_test_score_calibration_via_client(client, calibration["urn"]) + + # Try to promote as the community calibration owner — should be denied + response = client.post( + f"/api/v1/score-calibrations/{calibration['urn']}/promote-to-primary", + ) + + assert response.status_code == 403 + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_score_set_owner_can_promote_community_calibration_to_primary( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, extra_user_app_overrides +): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + with patch.object(ArqRedis, "enqueue_job", return_value=None): + score_set = publish_score_set(client, score_set["urn"]) + + # Create community calibration as extra user (non-contributor) + with DependencyOverrider(extra_user_app_overrides): + calibration = create_test_score_calibration_in_score_set_via_client( + client, score_set["urn"], deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED) + ) + assert calibration["investigatorProvided"] is False + publish_test_score_calibration_via_client(client, calibration["urn"]) + + # Score set owner promotes the community calibration — should succeed + response = client.post( + f"/api/v1/score-calibrations/{calibration['urn']}/promote-to-primary", + ) + + assert response.status_code == 200 + promotion_response = response.json() + assert promotion_response["primary"] is True + + +########################################################### +# Additional coverage: community calibration lifecycle +########################################################### + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_non_contributor_can_update_own_community_calibration( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, extra_user_app_overrides +): + """A non-contributor who creates a community calibration on a published score set can update it.""" + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + with patch.object(ArqRedis, "enqueue_job", return_value=None): + score_set = publish_score_set(client, score_set["urn"]) + + with DependencyOverrider(extra_user_app_overrides): + calibration = create_test_score_calibration_in_score_set_via_client( + client, score_set["urn"], deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED) + ) + assert calibration["investigatorProvided"] is False + + # Update the calibration as the same non-contributor user + response = client.put( + f"/api/v1/score-calibrations/{calibration['urn']}", + json={ + "scoreSetUrn": score_set["urn"], + **deepcamelize(TEST_PATHOGENICITY_SCORE_CALIBRATION), + }, + ) + + assert response.status_code == 200 + updated = response.json() + assert updated["urn"] == calibration["urn"] + assert updated["investigatorProvided"] is False + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_non_contributor_cannot_move_calibration_to_private_score_set( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files, extra_user_app_overrides +): + """A non-contributor cannot move their community calibration to a private score set they don't own.""" + experiment = create_experiment(client) + published_score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + with patch.object(ArqRedis, "enqueue_job", return_value=None): + published_score_set = publish_score_set(client, published_score_set["urn"]) + + # Create a second (private) score set on a new experiment + experiment_2 = create_experiment(client) + private_score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment_2["urn"], + data_files / "scores.csv", + ) + + with DependencyOverrider(extra_user_app_overrides): + calibration = create_test_score_calibration_in_score_set_via_client( + client, published_score_set["urn"], deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED) + ) + + # Try to move the calibration to the private score set — should be denied + response = client.put( + f"/api/v1/score-calibrations/{calibration['urn']}", + json={ + "scoreSetUrn": private_score_set["urn"], + **deepcamelize(TEST_PATHOGENICITY_SCORE_CALIBRATION), + }, + ) + + # Non-contributor can't see the private score set, so gets 404 + assert response.status_code == 404 + + +@pytest.mark.parametrize( + "mock_publication_fetch", + [ + [ + {"dbName": "PubMed", "identifier": TEST_PUBMED_IDENTIFIER}, + {"dbName": "bioRxiv", "identifier": TEST_BIORXIV_IDENTIFIER}, + ] + ], + indirect=["mock_publication_fetch"], +) +def test_cannot_create_score_calibration_without_email( + client, setup_router_db, mock_publication_fetch, session, data_provider, data_files +): + """Users without an email address cannot create score calibrations.""" + experiment = create_experiment(client) + score_set = create_seq_score_set_with_mapped_variants( + client, + session, + data_provider, + experiment["urn"], + data_files / "scores.csv", + ) + + # Clear the user's email + client.put("api/v1/users/me", json={"email": None}) + + response = client.post( + "/api/v1/score-calibrations/", + json={ + "scoreSetUrn": score_set["urn"], + **deepcamelize(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED), + }, + ) + + assert response.status_code == 403 + assert "email" in response.json()["detail"].lower() diff --git a/tests/view_models/test_score_calibration.py b/tests/view_models/test_score_calibration.py index e96ab010e..8689d2e43 100644 --- a/tests/view_models/test_score_calibration.py +++ b/tests/view_models/test_score_calibration.py @@ -8,6 +8,7 @@ from mavedb.models.enums.score_calibration_relation import ScoreCalibrationRelation from mavedb.view_models.score_calibration import ( FunctionalClassificationCreate, + SavedScoreCalibration, ScoreCalibration, ScoreCalibrationCreate, ScoreCalibrationWithScoreSetUrn, @@ -695,3 +696,34 @@ def test_score_calibration_with_score_set_urn_can_be_created_from_non_orm_contex assert sc.title == data["title"] assert sc.score_set_urn == data["score_set_urn"] + + +def test_saved_score_calibration_with_empty_publication_sources_and_functional_classifications(): + """Legacy calibrations with functional classifications but no publication sources should serialize without error. + + The publication source validator lives on ScoreCalibrationModify (write path), not ScoreCalibrationBase, + so SavedScoreCalibration can load existing DB data that predates the publication requirement. + """ + data = deepcopy(TEST_SAVED_BRNICH_SCORE_CALIBRATION_RANGE_BASED) + data["thresholdSources"] = [] + data["classificationSources"] = [] + data["methodSources"] = [] + + sc = SavedScoreCalibration.model_validate(data) + + assert sc.title == data["title"] + assert len(sc.functional_classifications) > 0 + assert sc.threshold_sources == [] + assert sc.classification_sources == [] + assert sc.method_sources == [] + + +def test_create_calibration_with_functional_classifications_requires_publication_sources(): + """Creating a calibration with functional classifications but no publications should fail.""" + data = deepcopy(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED) + data["threshold_sources"] = [] + data["classification_sources"] = [] + data["method_sources"] = [] + + with pytest.raises(ValidationError, match="must provide at least one method source publication"): + ScoreCalibrationCreate.model_validate(data) From 3ff28aa973832ba6157eba533bf8d6586b8c8dbf Mon Sep 17 00:00:00 2001 From: Benjamin Capodanno Date: Mon, 23 Feb 2026 19:23:09 -0800 Subject: [PATCH 5/5] refactor: rename classification sources to evidence in score calibration models and related logic --- ...ename_calibration_publication_relation_.py | 35 +++++++++++ src/mavedb/lib/score_calibrations.py | 20 +++---- src/mavedb/lib/validation/transform.py | 8 +-- .../enums/score_calibration_relation.py | 2 +- src/mavedb/scripts/load_calibration_csv.py | 2 +- .../scripts/load_excalibr_calibrations.py | 2 +- src/mavedb/view_models/score_calibration.py | 20 +++---- tests/helpers/constants.py | 14 ++--- tests/lib/conftest.py | 2 +- tests/lib/test_score_calibrations.py | 38 ++++++------ tests/routers/test_score_calibrations.py | 12 ++-- tests/view_models/test_score_calibration.py | 60 +++++++++---------- 12 files changed, 124 insertions(+), 91 deletions(-) create mode 100644 alembic/versions/659999dec5d9_rename_calibration_publication_relation_.py diff --git a/alembic/versions/659999dec5d9_rename_calibration_publication_relation_.py b/alembic/versions/659999dec5d9_rename_calibration_publication_relation_.py new file mode 100644 index 000000000..306f1d15e --- /dev/null +++ b/alembic/versions/659999dec5d9_rename_calibration_publication_relation_.py @@ -0,0 +1,35 @@ +"""rename calibration publication relation classification to evidence + +Revision ID: 659999dec5d9 +Revises: dcf8572d3a17 +Create Date: 2026-02-23 00:00:00.000000 + +""" + +from alembic import op + +# revision identifiers, used by Alembic. +revision = "659999dec5d9" +down_revision = "dcf8572d3a17" +branch_labels = None +depends_on = None + + +def upgrade(): + op.execute( + """ + UPDATE score_calibration_publication_identifiers + SET relation = 'evidence' + WHERE relation = 'classification' + """ + ) + + +def downgrade(): + op.execute( + """ + UPDATE score_calibration_publication_identifiers + SET relation = 'classification' + WHERE relation = 'evidence' + """ + ) diff --git a/src/mavedb/lib/score_calibrations.py b/src/mavedb/lib/score_calibrations.py index 98c7708c3..7a5ee21fd 100644 --- a/src/mavedb/lib/score_calibrations.py +++ b/src/mavedb/lib/score_calibrations.py @@ -106,15 +106,15 @@ async def _create_score_calibration( publication identifier associations. For each publication source listed in the incoming ScoreCalibrationCreate model - (threshold_sources, classification_sources, method_sources), this function + (threshold_sources, evidence_sources, method_sources), this function ensures a corresponding PublicationIdentifier row exists (via find_or_create_publication_identifier) and creates a ScoreCalibrationPublicationIdentifierAssociation that links the identifier to the new calibration under the appropriate relation type - (ScoreCalibrationRelation.threshold / .classification / .method). + (ScoreCalibrationRelation.threshold / .evidence / .method). Fields in calibration_create that represent source lists or audit metadata - (threshold_sources, classification_sources, method_sources, created_at, + (threshold_sources, evidence_sources, method_sources, created_at, created_by, modified_at, modified_by) are excluded when instantiating the ScoreCalibration; audit fields created_by and modified_by are explicitly set from the provided user_data. The resulting ScoreCalibration object includes @@ -157,7 +157,7 @@ async def _create_score_calibration( """ relation_sources = ( (ScoreCalibrationRelation.threshold, calibration_create.threshold_sources or []), - (ScoreCalibrationRelation.classification, calibration_create.classification_sources or []), + (ScoreCalibrationRelation.evidence, calibration_create.evidence_sources or []), (ScoreCalibrationRelation.method, calibration_create.method_sources or []), ) @@ -182,7 +182,7 @@ async def _create_score_calibration( exclude={ "functional_classifications", "threshold_sources", - "classification_sources", + "evidence_sources", "method_sources", "score_set_urn", }, @@ -344,7 +344,7 @@ async def modify_score_calibration( 2. Loads (via SELECT ... WHERE urn = :score_set_urn) the ScoreSet that will contain the calibration. 3. Reconciles publication identifier associations for three relation categories: - threshold_sources -> ScoreCalibrationRelation.threshold - - classification_sources -> ScoreCalibrationRelation.classification + - evidence_sources -> ScoreCalibrationRelation.evidence - method_sources -> ScoreCalibrationRelation.method For each provided source identifier: * Calls find_or_create_publication_identifier to obtain (or persist) the identifier row. @@ -352,7 +352,7 @@ async def modify_score_calibration( * Creates a new association if missing. Any previously existing associations not referenced in the update are deleted from the session. 4. Updates mutable scalar fields on the calibration instance from calibration_update, excluding: - threshold_sources, classification_sources, method_sources, created_at, created_by, + threshold_sources, evidence_sources, method_sources, created_at, created_by, modified_at, modified_by. 5. Reassigns the calibration to the resolved ScoreSet, replaces its association collection, and stamps modified_by with the requesting user. @@ -366,7 +366,7 @@ async def modify_score_calibration( The existing calibration ORM instance to be modified (must be persistent or pending). calibration_update : score_calibration.ScoreCalibrationModify - score_set_urn (required) - - threshold_sources, classification_sources, method_sources (iterables of identifier objects) + - threshold_sources, evidence_sources, method_sources (iterables of identifier objects) - Additional mutable calibration attributes. user : User Context for the authenticated user; the user to be recorded for audit. @@ -415,7 +415,7 @@ async def modify_score_calibration( relation_sources = ( (ScoreCalibrationRelation.threshold, calibration_update.threshold_sources or []), - (ScoreCalibrationRelation.classification, calibration_update.classification_sources or []), + (ScoreCalibrationRelation.evidence, calibration_update.evidence_sources or []), (ScoreCalibrationRelation.method, calibration_update.method_sources or []), ) @@ -460,7 +460,7 @@ async def modify_score_calibration( if attr not in { "functional_classifications", "threshold_sources", - "classification_sources", + "evidence_sources", "method_sources", "created_at", "created_by", diff --git a/src/mavedb/lib/validation/transform.py b/src/mavedb/lib/validation/transform.py index 2152eff9d..5cb73c14d 100644 --- a/src/mavedb/lib/validation/transform.py +++ b/src/mavedb/lib/validation/transform.py @@ -67,7 +67,7 @@ class TransformedScoreSetPublicationIdentifiers(TypedDict): class TransformedCalibrationPublicationIdentifiers(TypedDict): threshold_sources: list[PublicationIdentifier] - classification_sources: list[PublicationIdentifier] + evidence_sources: list[PublicationIdentifier] method_sources: list[PublicationIdentifier] @@ -99,7 +99,7 @@ def transform_score_calibration_publication_identifiers( publication_identifiers: Optional[Sequence[ScoreCalibrationPublicationIdentifierAssociation]], ) -> TransformedCalibrationPublicationIdentifiers: transformed_publication_identifiers = TransformedCalibrationPublicationIdentifiers( - threshold_sources=[], classification_sources=[], method_sources=[] + threshold_sources=[], evidence_sources=[], method_sources=[] ) if not publication_identifiers: @@ -110,10 +110,10 @@ def transform_score_calibration_publication_identifiers( for assc in publication_identifiers if assc.relation is ScoreCalibrationRelation.threshold ] - transformed_publication_identifiers["classification_sources"] = [ + transformed_publication_identifiers["evidence_sources"] = [ TypeAdapter(PublicationIdentifier).validate_python(assc.publication) for assc in publication_identifiers - if assc.relation is ScoreCalibrationRelation.classification + if assc.relation is ScoreCalibrationRelation.evidence ] transformed_publication_identifiers["method_sources"] = [ TypeAdapter(PublicationIdentifier).validate_python(assc.publication) diff --git a/src/mavedb/models/enums/score_calibration_relation.py b/src/mavedb/models/enums/score_calibration_relation.py index 1c682479d..aa53da783 100644 --- a/src/mavedb/models/enums/score_calibration_relation.py +++ b/src/mavedb/models/enums/score_calibration_relation.py @@ -3,5 +3,5 @@ class ScoreCalibrationRelation(enum.Enum): threshold = "threshold" - classification = "classification" + evidence = "evidence" method = "method" diff --git a/src/mavedb/scripts/load_calibration_csv.py b/src/mavedb/scripts/load_calibration_csv.py index 6afc26cca..066509e39 100644 --- a/src/mavedb/scripts/load_calibration_csv.py +++ b/src/mavedb/scripts/load_calibration_csv.py @@ -363,7 +363,7 @@ def main(db: Session, csv_path: str, delimiter: str, overwrite: bool, purge_publ baseline_score_description=baseline_score_description, threshold_sources=threshold_publications, method_sources=method_publications, - classification_sources=calculation_publications, + evidence_sources=calculation_publications, research_use_only=False, functional_classifications=ranges, notes=calibration_notes, diff --git a/src/mavedb/scripts/load_excalibr_calibrations.py b/src/mavedb/scripts/load_excalibr_calibrations.py index e6db667b3..72f7ed2cd 100644 --- a/src/mavedb/scripts/load_excalibr_calibrations.py +++ b/src/mavedb/scripts/load_excalibr_calibrations.py @@ -234,7 +234,7 @@ def main(db: Session, csv_path: str, dataset_map: str, overwrite: bool, remove: score_set_urn=score_set.urn, calibration_metadata={"prior_probability_pathogenicity": prior}, threshold_sources=[EXCALIBR_CALIBRATION_CITATION], - classification_sources=[EXCALIBR_CALIBRATION_CITATION], + evidence_sources=[EXCALIBR_CALIBRATION_CITATION], method_sources=[EXCALIBR_CALIBRATION_CITATION], ) diff --git a/src/mavedb/view_models/score_calibration.py b/src/mavedb/view_models/score_calibration.py index abcea58f2..857d4e49f 100644 --- a/src/mavedb/view_models/score_calibration.py +++ b/src/mavedb/view_models/score_calibration.py @@ -284,7 +284,7 @@ class ScoreCalibrationBase(BaseModel): functional_classifications: Optional[Sequence[FunctionalClassificationBase]] = None threshold_sources: Sequence[PublicationIdentifierBase] - classification_sources: Sequence[PublicationIdentifierBase] + evidence_sources: Sequence[PublicationIdentifierBase] method_sources: Sequence[PublicationIdentifierBase] calibration_metadata: Optional[dict] = None @@ -433,7 +433,7 @@ class ScoreCalibrationModify(ScoreCalibrationBase): functional_classifications: Optional[Sequence[FunctionalClassificationModify]] = None threshold_sources: Sequence[PublicationIdentifierCreate] - classification_sources: Sequence[PublicationIdentifierCreate] + evidence_sources: Sequence[PublicationIdentifierCreate] method_sources: Sequence[PublicationIdentifierCreate] # TODO#668: Move this validator to ScoreCalibrationBase once legacy calibrations have been @@ -465,10 +465,10 @@ def functional_classifications_require_publication_sources( ) has_acmg_classification = any(fc.acmg_classification is not None for fc in self.functional_classifications) - if has_acmg_classification and not self.classification_sources: + if has_acmg_classification and not self.evidence_sources: raise ValidationError( - "Calibrations with ACMG classifications must provide at least one classification source publication.", - custom_loc=["body", "classificationSources"], + "Calibrations with ACMG classifications must provide at least one evidence source publication.", + custom_loc=["body", "evidenceSources"], ) return self @@ -479,7 +479,7 @@ class ScoreCalibrationCreate(ScoreCalibrationModify): functional_classifications: Optional[Sequence[FunctionalClassificationCreate]] = None threshold_sources: Sequence[PublicationIdentifierCreate] - classification_sources: Sequence[PublicationIdentifierCreate] + evidence_sources: Sequence[PublicationIdentifierCreate] method_sources: Sequence[PublicationIdentifierCreate] @@ -499,7 +499,7 @@ class SavedScoreCalibration(ScoreCalibrationBase): functional_classifications: Optional[Sequence[SavedFunctionalClassification]] = None threshold_sources: Sequence[SavedPublicationIdentifier] - classification_sources: Sequence[SavedPublicationIdentifier] + evidence_sources: Sequence[SavedPublicationIdentifier] method_sources: Sequence[SavedPublicationIdentifier] created_by: Optional[SavedUser] = None @@ -515,7 +515,7 @@ class Config: from_attributes = True arbitrary_types_allowed = True - @field_validator("threshold_sources", "classification_sources", "method_sources", mode="before") + @field_validator("threshold_sources", "evidence_sources", "method_sources", mode="before") def publication_identifiers_validator(cls, value: Any) -> Optional[list[PublicationIdentifier]]: """Coerce association proxy collections to plain lists.""" assert isinstance(value, Collection), "Publication identifier lists must be a collection" @@ -554,7 +554,7 @@ def generate_threshold_classification_and_method_sources(cls, data: Any): # typ data.publication_identifier_associations ) data.__setattr__("threshold_sources", publication_identifiers["threshold_sources"]) - data.__setattr__("classification_sources", publication_identifiers["classification_sources"]) + data.__setattr__("evidence_sources", publication_identifiers["evidence_sources"]) data.__setattr__("method_sources", publication_identifiers["method_sources"]) except (AttributeError, KeyError) as exc: raise ValidationError( @@ -568,7 +568,7 @@ class ScoreCalibration(SavedScoreCalibration): functional_classifications: Optional[Sequence[FunctionalClassification]] = None threshold_sources: Sequence[PublicationIdentifier] - classification_sources: Sequence[PublicationIdentifier] + evidence_sources: Sequence[PublicationIdentifier] method_sources: Sequence[PublicationIdentifier] created_by: Optional[User] = None modified_by: Optional[User] = None diff --git a/tests/helpers/constants.py b/tests/helpers/constants.py index 890396954..e06d07a12 100644 --- a/tests/helpers/constants.py +++ b/tests/helpers/constants.py @@ -1579,7 +1579,7 @@ TEST_FUNCTIONAL_RANGE_NOT_SPECIFIED, ], "threshold_sources": [{"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}], - "classification_sources": [{"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}], + "evidence_sources": [{"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}], "method_sources": [{"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}], "calibration_metadata": {}, } @@ -1596,7 +1596,7 @@ TEST_FUNCTIONAL_RANGE_NOT_SPECIFIED, ], "threshold_sources": [{"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}], - "classification_sources": [ + "evidence_sources": [ {"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}, {"identifier": TEST_BIORXIV_IDENTIFIER, "db_name": "bioRxiv"}, ], @@ -1609,7 +1609,7 @@ **{ camelize(k): v for k, v in TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED.items() - if k not in ("functional_classifications", "classification_sources", "threshold_sources", "method_sources") + if k not in ("functional_classifications", "evidence_sources", "threshold_sources", "method_sources") }, "functionalClassifications": [ TEST_SAVED_FUNCTIONAL_RANGE_NORMAL, @@ -1617,7 +1617,7 @@ TEST_SAVED_FUNCTIONAL_RANGE_NOT_SPECIFIED, ], "thresholdSources": [SAVED_PUBMED_PUBLICATION], - "classificationSources": [SAVED_PUBMED_PUBLICATION, SAVED_BIORXIV_PUBLICATION], + "evidenceSources": [SAVED_PUBMED_PUBLICATION, SAVED_BIORXIV_PUBLICATION], "methodSources": [SAVED_PUBMED_PUBLICATION], "id": 1, "urn": VALID_CALIBRATION_URN, @@ -1670,7 +1670,7 @@ TEST_FUNCTIONAL_RANGE_ABNORMAL, ], "threshold_sources": [{"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}], - "classification_sources": [{"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}], + "evidence_sources": [{"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}], "method_sources": [{"identifier": TEST_PUBMED_IDENTIFIER, "db_name": "PubMed"}], "calibration_metadata": {}, } @@ -1680,14 +1680,14 @@ **{ camelize(k): v for k, v in TEST_PATHOGENICITY_SCORE_CALIBRATION.items() - if k not in ("functional_classifications", "classification_sources", "threshold_sources", "method_sources") + if k not in ("functional_classifications", "evidence_sources", "threshold_sources", "method_sources") }, "functionalClassifications": [ TEST_SAVED_FUNCTIONAL_RANGE_NORMAL, TEST_SAVED_FUNCTIONAL_RANGE_ABNORMAL, ], "thresholdSources": [SAVED_PUBMED_PUBLICATION], - "classificationSources": [SAVED_PUBMED_PUBLICATION], + "evidenceSources": [SAVED_PUBMED_PUBLICATION], "methodSources": [SAVED_PUBMED_PUBLICATION], "id": 2, "investigatorProvided": True, diff --git a/tests/lib/conftest.py b/tests/lib/conftest.py index c55c79aa9..2befdb597 100644 --- a/tests/lib/conftest.py +++ b/tests/lib/conftest.py @@ -195,7 +195,7 @@ def _build_mock_publication_associations(saved_calibration_dict: dict) -> list: """ relation_map = { "thresholdSources": ScoreCalibrationRelation.threshold, - "classificationSources": ScoreCalibrationRelation.classification, + "evidenceSources": ScoreCalibrationRelation.evidence, "methodSources": ScoreCalibrationRelation.method, } associations = [] diff --git a/tests/lib/test_score_calibrations.py b/tests/lib/test_score_calibrations.py index ad6bb0ead..9a5ba43c5 100644 --- a/tests/lib/test_score_calibrations.py +++ b/tests/lib/test_score_calibrations.py @@ -346,7 +346,7 @@ async def test_create_score_calibration_in_score_set_creates_score_calibration_w "MockCalibrationCreate", score_set_urn=(str | None, setup_lib_db_with_score_set.urn), threshold_sources=(list, []), - classification_sources=(list, []), + evidence_sources=(list, []), method_sources=(list, []), functional_classifications=(list, []), ) @@ -366,7 +366,7 @@ async def test_create_score_calibration_in_score_set_investigator_provided_set_w "MockCalibrationCreate", score_set_urn=(str | None, setup_lib_db_with_score_set.urn), threshold_sources=(list, []), - classification_sources=(list, []), + evidence_sources=(list, []), method_sources=(list, []), functional_classifications=(list, []), ) @@ -398,7 +398,7 @@ async def test_create_score_calibration_in_score_set_investigator_provided_set_w "MockCalibrationCreate", score_set_urn=(str | None, setup_lib_db_with_score_set.urn), threshold_sources=(list, []), - classification_sources=(list, []), + evidence_sources=(list, []), method_sources=(list, []), functional_classifications=(list, []), ) @@ -419,7 +419,7 @@ async def test_create_score_calibration_in_score_set_investigator_provided_not_s "MockCalibrationCreate", score_set_urn=(str | None, setup_lib_db_with_score_set.urn), threshold_sources=(list, []), - classification_sources=(list, []), + evidence_sources=(list, []), method_sources=(list, []), functional_classifications=(list, []), ) @@ -458,7 +458,7 @@ async def test_create_score_calibration_creates_score_calibration_when_score_set "MockCalibrationCreate", score_set_urn=(str | None, None), threshold_sources=(list, []), - classification_sources=(list, []), + evidence_sources=(list, []), method_sources=(list, []), functional_classifications=(list, []), ) @@ -493,7 +493,7 @@ async def test_create_score_calibration_propagates_errors_from_publication_find_ )() ], ), - classification_sources=(list, []), + evidence_sources=(list, []), method_sources=(list, []), functional_classifications=(list, []), ) @@ -522,7 +522,7 @@ async def test_create_score_calibration_propagates_errors_from_publication_find_ "relation,expected_relation", [ ("threshold_sources", ScoreCalibrationRelation.threshold), - ("classification_sources", ScoreCalibrationRelation.classification), + ("evidence_sources", ScoreCalibrationRelation.evidence), ("method_sources", ScoreCalibrationRelation.method), ], ) @@ -546,7 +546,7 @@ async def test_create_score_calibration_publication_identifier_associations_crea "MockCalibrationCreate", score_set_urn=(str | None, score_set_urn), threshold_sources=(list, []), - classification_sources=(list, []), + evidence_sources=(list, []), method_sources=(list, []), functional_classifications=(list, []), ) @@ -582,7 +582,7 @@ async def test_create_score_calibration_user_is_set_as_creator_and_modifier( "MockCalibrationCreate", score_set_urn=(str | None, score_set_urn), threshold_sources=(list, []), - classification_sources=(list, []), + evidence_sources=(list, []), method_sources=(list, []), functional_classifications=(list, []), ) @@ -731,7 +731,7 @@ async def test_modify_score_calibration_modifies_score_calibration_when_score_se score_set_urn=(str | None, setup_lib_db_with_score_set.urn), description=(str | None, "Modified description"), threshold_sources=(list, []), - classification_sources=(list, []), + evidence_sources=(list, []), method_sources=(list, []), functional_classifications=(list, []), ) @@ -768,7 +768,7 @@ async def test_modify_score_calibration_clears_existing_publication_identifier_a "MockCalibrationModify", score_set_urn=(str | None, setup_lib_db_with_score_set.urn), threshold_sources=(list, []), - classification_sources=(list, []), + evidence_sources=(list, []), method_sources=(list, []), functional_classifications=(list, []), ) @@ -784,7 +784,7 @@ async def test_modify_score_calibration_clears_existing_publication_identifier_a "relation,expected_relation", [ ("threshold_sources", ScoreCalibrationRelation.threshold), - ("classification_sources", ScoreCalibrationRelation.classification), + ("evidence_sources", ScoreCalibrationRelation.evidence), ("method_sources", ScoreCalibrationRelation.method), ], ) @@ -815,7 +815,7 @@ async def test_modify_score_calibration_publication_identifier_associations_crea "MockCalibrationModify", score_set_urn=(str | None, setup_lib_db_with_score_set.urn), threshold_sources=(list, []), - classification_sources=(list, []), + evidence_sources=(list, []), method_sources=(list, []), functional_classifications=(list, []), ) @@ -869,7 +869,7 @@ async def test_modify_score_calibration_retains_existing_publication_relationshi for pub_dict in TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED["threshold_sources"] ], ), - classification_sources=( + evidence_sources=( list, [ create_model( @@ -877,7 +877,7 @@ async def test_modify_score_calibration_retains_existing_publication_relationshi db_name=(str, pub_dict["db_name"]), identifier=(str, pub_dict["identifier"]), )() - for pub_dict in TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED["classification_sources"] + for pub_dict in TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED["evidence_sources"] ], ), method_sources=( @@ -935,7 +935,7 @@ async def test_modify_score_calibration_adds_new_publication_association( )() ], ), - classification_sources=(list, []), + evidence_sources=(list, []), method_sources=(list, []), functional_classifications=(list, []), ) @@ -976,7 +976,7 @@ async def test_modify_score_calibration_user_is_set_as_modifier( "MockCalibrationModify", score_set_urn=(str | None, setup_lib_db_with_score_set.urn), threshold_sources=(list, []), - classification_sources=(list, []), + evidence_sources=(list, []), method_sources=(list, []), functional_classifications=(list, []), ) @@ -1026,7 +1026,7 @@ async def test_modify_score_calibration_new_score_set(setup_lib_db_with_score_se "MockCalibrationModify", score_set_urn=(str | None, new_containing_score_set.urn), threshold_sources=(list, []), - classification_sources=(list, []), + evidence_sources=(list, []), method_sources=(list, []), functional_classifications=(list, []), ) @@ -1062,7 +1062,7 @@ async def test_modify_score_calibration_clears_functional_classifications( "MockCalibrationModify", score_set_urn=(str | None, setup_lib_db_with_score_set.urn), threshold_sources=(list, []), - classification_sources=(list, []), + evidence_sources=(list, []), method_sources=(list, []), functional_classifications=(list, []), ) diff --git a/tests/routers/test_score_calibrations.py b/tests/routers/test_score_calibrations.py index 4d7b109f0..63c5c43c1 100644 --- a/tests/routers/test_score_calibrations.py +++ b/tests/routers/test_score_calibrations.py @@ -4558,7 +4558,7 @@ def test_cannot_create_score_calibration_with_classifications_and_no_method_sour } ], "thresholdSources": [{"identifier": TEST_PUBMED_IDENTIFIER, "dbName": "PubMed"}], - "classificationSources": [], + "evidenceSources": [], "methodSources": [], }, ) @@ -4580,7 +4580,7 @@ def test_cannot_create_score_calibration_with_classifications_and_no_threshold_s } ], "thresholdSources": [], - "classificationSources": [], + "evidenceSources": [], "methodSources": [{"identifier": TEST_PUBMED_IDENTIFIER, "dbName": "PubMed"}], }, ) @@ -4588,9 +4588,7 @@ def test_cannot_create_score_calibration_with_classifications_and_no_threshold_s assert response.status_code == 422 -def test_cannot_create_score_calibration_with_acmg_classification_and_no_classification_sources( - client, setup_router_db -): +def test_cannot_create_score_calibration_with_acmg_classification_and_no_evidence_sources(client, setup_router_db): response = client.post( "/api/v1/score-calibrations", json={ @@ -4606,7 +4604,7 @@ def test_cannot_create_score_calibration_with_acmg_classification_and_no_classif } ], "thresholdSources": [{"identifier": TEST_PUBMED_IDENTIFIER, "dbName": "PubMed"}], - "classificationSources": [], + "evidenceSources": [], "methodSources": [{"identifier": TEST_PUBMED_IDENTIFIER, "dbName": "PubMed"}], }, ) @@ -4643,7 +4641,7 @@ def test_can_create_baseline_only_calibration_without_publication_sources( "baselineScore": 1.0, "baselineScoreDescription": "Wild-type score", "thresholdSources": [], - "classificationSources": [], + "evidenceSources": [], "methodSources": [], }, ) diff --git a/tests/view_models/test_score_calibration.py b/tests/view_models/test_score_calibration.py index 8689d2e43..788869b3b 100644 --- a/tests/view_models/test_score_calibration.py +++ b/tests/view_models/test_score_calibration.py @@ -291,12 +291,12 @@ def test_can_create_valid_score_calibration(valid_calibration): else: assert sc.threshold_sources is None - if valid_calibration.get("classification_sources") is not None: - assert len(sc.classification_sources) == len(valid_calibration["classification_sources"]) - for pub in valid_calibration["classification_sources"]: - assert pub["identifier"] in [rs.identifier for rs in sc.classification_sources] + if valid_calibration.get("evidence_sources") is not None: + assert len(sc.evidence_sources) == len(valid_calibration["evidence_sources"]) + for pub in valid_calibration["evidence_sources"]: + assert pub["identifier"] in [rs.identifier for rs in sc.evidence_sources] else: - assert sc.classification_sources is None + assert sc.evidence_sources is None if valid_calibration.get("method_sources") is not None: assert len(sc.method_sources) == len(valid_calibration["method_sources"]) @@ -341,12 +341,12 @@ def test_can_create_valid_score_calibration_without_functional_classifications(v else: assert sc.threshold_sources is None - if valid_calibration.get("classification_sources") is not None: - assert len(sc.classification_sources) == len(valid_calibration["classification_sources"]) - for pub in valid_calibration["classification_sources"]: - assert pub["identifier"] in [rs.identifier for rs in sc.classification_sources] + if valid_calibration.get("evidence_sources") is not None: + assert len(sc.evidence_sources) == len(valid_calibration["evidence_sources"]) + for pub in valid_calibration["evidence_sources"]: + assert pub["identifier"] in [rs.identifier for rs in sc.evidence_sources] else: - assert sc.classification_sources is None + assert sc.evidence_sources is None if valid_calibration.get("method_sources") is not None: assert len(sc.method_sources) == len(valid_calibration["method_sources"]) @@ -492,12 +492,12 @@ def test_can_create_valid_score_calibration_from_attributed_object(valid_calibra else: assert sc.threshold_sources is None - if valid_calibration.get("classificationSources") is not None: - assert len(sc.classification_sources) == len(valid_calibration["classificationSources"]) - for pub in valid_calibration["classificationSources"]: - assert pub["identifier"] in [rs.identifier for rs in sc.classification_sources] + if valid_calibration.get("evidenceSources") is not None: + assert len(sc.evidence_sources) == len(valid_calibration["evidenceSources"]) + for pub in valid_calibration["evidenceSources"]: + assert pub["identifier"] in [rs.identifier for rs in sc.evidence_sources] else: - assert sc.classification_sources is None + assert sc.evidence_sources is None if valid_calibration.get("methodSources") is not None: assert len(sc.method_sources) == len(valid_calibration["methodSources"]) @@ -517,7 +517,7 @@ def test_cannot_create_score_calibration_when_publication_information_is_missing # Add publication identifiers with missing information invalid_data.pop("thresholdSources", None) - invalid_data.pop("classificationSources", None) + invalid_data.pop("evidenceSources", None) invalid_data.pop("methodSources", None) with pytest.raises(ValidationError) as exc_info: @@ -525,7 +525,7 @@ def test_cannot_create_score_calibration_when_publication_information_is_missing assert "Field required" in str(exc_info.value) assert "thresholdSources" in str(exc_info.value) - assert "classificationSources" in str(exc_info.value) + assert "evidenceSources" in str(exc_info.value) assert "methodSources" in str(exc_info.value) @@ -537,9 +537,9 @@ def test_can_create_score_calibration_from_association_style_publication_identif dummy_attributed_object_from_dict({"publication": pub, "relation": ScoreCalibrationRelation.threshold}) for pub in data.pop("thresholdSources", []) ] - classification_sources = [ - dummy_attributed_object_from_dict({"publication": pub, "relation": ScoreCalibrationRelation.classification}) - for pub in data.pop("classificationSources", []) + evidence_sources = [ + dummy_attributed_object_from_dict({"publication": pub, "relation": ScoreCalibrationRelation.evidence}) + for pub in data.pop("evidenceSources", []) ] method_sources = [ dummy_attributed_object_from_dict({"publication": pub, "relation": ScoreCalibrationRelation.method}) @@ -547,7 +547,7 @@ def test_can_create_score_calibration_from_association_style_publication_identif ] # Simulate ORM model by adding required fields that would originate from the DB - data["publication_identifier_associations"] = threshold_sources + classification_sources + method_sources + data["publication_identifier_associations"] = threshold_sources + evidence_sources + method_sources data["id"] = 1 data["score_set_id"] = 1 @@ -573,12 +573,12 @@ def test_can_create_score_calibration_from_association_style_publication_identif else: assert sc.threshold_sources is None - if orig_data.get("classificationSources") is not None: - assert len(sc.classification_sources) == len(orig_data["classificationSources"]) - for pub in orig_data["classificationSources"]: - assert pub["identifier"] in [rs.identifier for rs in sc.classification_sources] + if orig_data.get("evidenceSources") is not None: + assert len(sc.evidence_sources) == len(orig_data["evidenceSources"]) + for pub in orig_data["evidenceSources"]: + assert pub["identifier"] in [rs.identifier for rs in sc.evidence_sources] else: - assert sc.classification_sources is None + assert sc.evidence_sources is None if orig_data.get("methodSources") is not None: assert len(sc.method_sources) == len(orig_data["methodSources"]) @@ -622,7 +622,7 @@ def test_can_create_score_calibration_from_non_orm_context(): assert sc.baseline_score_description == data.get("baselineScoreDescription") assert len(sc.functional_classifications) == len(data["functionalClassifications"]) assert len(sc.threshold_sources) == len(data["thresholdSources"]) - assert len(sc.classification_sources) == len(data["classificationSources"]) + assert len(sc.evidence_sources) == len(data["evidenceSources"]) assert len(sc.method_sources) == len(data["methodSources"]) assert sc.calibration_metadata == data.get("calibrationMetadata") @@ -706,7 +706,7 @@ def test_saved_score_calibration_with_empty_publication_sources_and_functional_c """ data = deepcopy(TEST_SAVED_BRNICH_SCORE_CALIBRATION_RANGE_BASED) data["thresholdSources"] = [] - data["classificationSources"] = [] + data["evidenceSources"] = [] data["methodSources"] = [] sc = SavedScoreCalibration.model_validate(data) @@ -714,7 +714,7 @@ def test_saved_score_calibration_with_empty_publication_sources_and_functional_c assert sc.title == data["title"] assert len(sc.functional_classifications) > 0 assert sc.threshold_sources == [] - assert sc.classification_sources == [] + assert sc.evidence_sources == [] assert sc.method_sources == [] @@ -722,7 +722,7 @@ def test_create_calibration_with_functional_classifications_requires_publication """Creating a calibration with functional classifications but no publications should fail.""" data = deepcopy(TEST_BRNICH_SCORE_CALIBRATION_RANGE_BASED) data["threshold_sources"] = [] - data["classification_sources"] = [] + data["evidence_sources"] = [] data["method_sources"] = [] with pytest.raises(ValidationError, match="must provide at least one method source publication"):