Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 0 additions & 40 deletions .basedpyright/baseline.json
Original file line number Diff line number Diff line change
Expand Up @@ -4953,14 +4953,6 @@
"lineCount": 1
}
},
{
"code": "reportOptionalMemberAccess",
"range": {
"startColumn": 29,
"endColumn": 37,
"lineCount": 1
}
},
{
"code": "reportArgumentType",
"range": {
Expand Down Expand Up @@ -5679,30 +5671,6 @@
}
],
"./monitoring/uss_qualifier/reports/tested_requirements/breakdown.py": [
{
"code": "reportOptionalMemberAccess",
"range": {
"startColumn": 55,
"endColumn": 63,
"lineCount": 1
}
},
{
"code": "reportOptionalMemberAccess",
"range": {
"startColumn": 64,
"endColumn": 70,
"lineCount": 1
}
},
{
"code": "reportArgumentType",
"range": {
"startColumn": 23,
"endColumn": 43,
"lineCount": 1
}
},
{
"code": "reportOptionalMemberAccess",
"range": {
Expand Down Expand Up @@ -20767,14 +20735,6 @@
"lineCount": 1
}
},
{
"code": "reportReturnType",
"range": {
"startColumn": 11,
"endColumn": 13,
"lineCount": 1
}
},
{
"code": "reportAttributeAccessIssue",
"range": {
Expand Down
45 changes: 45 additions & 0 deletions monitoring/uss_qualifier/configurations/configuration.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
from __future__ import annotations

from collections.abc import Iterable

from implicitdict import ImplicitDict, Optional

from monitoring.monitorlib.dicts import JSONAddress
Expand Down Expand Up @@ -129,6 +131,9 @@ class ExecutionConfiguration(ImplicitDict):
stop_fast: Optional[bool] = False
"""If true, escalate the Severity of any failed check to Critical in order to end the test run early."""

do_not_stop_fast_for_acceptable_findings: Optional[bool]
"""If true, make an exception for stop_fast above when the failed check is identified as an acceptable_finding in one of the tested_requirements artifact descriptions."""

stop_when_resource_not_created: Optional[bool] = False
"""If true, stop test execution if one of the resources cannot be created. Otherwise, resources that cannot be created due to missing prerequisites are simply treated as omitted."""

Expand All @@ -154,6 +159,31 @@ class TestConfiguration(ImplicitDict):
"""Identifier for a requirements collection, local to a TestedRequirementsConfiguration artifact configuration."""


class FullyQualifiedCheck(ImplicitDict):
scenario_type: TestScenarioTypeName
"""Scenario in which the check occurs."""

test_case_name: str
"""Test case in which the check occurs."""

test_step_name: str
"""Test step in which the check occurs."""

check_name: str
"""Name of the check."""

def contained_in(self, collection: Iterable[FullyQualifiedCheck]) -> bool:
for other in collection:
if (
self.scenario_type == other.scenario_type
and self.test_case_name == other.test_case_name
and self.test_step_name == other.test_step_name
and self.check_name == other.check_name
):
return True
return False


class TestedRequirementsConfiguration(ImplicitDict):
report_name: str
"""Name of subfolder in output path to contain the rendered templated report"""
Expand All @@ -179,6 +209,9 @@ class TestedRequirementsConfiguration(ImplicitDict):
If a participant is not listed, no report will be generated for them.
"""

acceptable_findings: Optional[list[FullyQualifiedCheck]]
"""If any check identified in this field fails, ignore the failure when determining Tested Requirements outcomes."""


class SequenceViewConfiguration(ImplicitDict):
redact_access_tokens: bool = True
Expand Down Expand Up @@ -248,6 +281,18 @@ class ArtifactsConfiguration(ImplicitDict):
timing_report: Optional[TimingReportConfiguration] = None
"""If specified, configuration describing a desired report describing where and how time was spent during the test."""

@property
def acceptable_findings(self) -> Iterable[FullyQualifiedCheck]:
"""Iterates through checks where findings are acceptable in at least one tested_requirements artifact."""
if "tested_requirements" not in self or not self.tested_requirements:
return
for tested_requirements in self.tested_requirements:
if (
"acceptable_findings" in tested_requirements
and tested_requirements.acceptable_findings
):
yield from tested_requirements.acceptable_findings


class USSQualifierConfigurationV1(ImplicitDict):
test_run: Optional[TestConfiguration] = None
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ v1:
second_utm_auth: second_utm_auth
planning_area: planning_area
problematically_big_area: problematically_big_area
test_exclusions: test_exclusions

# When a test run is executed, a "baseline signature" is computed uniquely identifying the "baseline" of the test,
# usually excluding exactly what systems are participating in the test (the "environment"). This is a list of
Expand Down Expand Up @@ -167,15 +166,6 @@ v1:
# ========== Environment ==========
# =================================

# Controls tests behavior
test_exclusions:
$content_schema: monitoring/uss_qualifier/resources/definitions/ResourceDeclaration.json
resource_type: resources.dev.TestExclusionsResource
specification:
# Tests should allow private addresses that are not publicly addressable since this configuration runs locally
allow_private_addresses: true
allow_cleartext_queries: true

# Means by which uss_qualifier can discover which subscription ('sub' claim of its tokes) it is described by
utm_client_identity:
resource_type: resources.communications.ClientIdentityResource
Expand Down Expand Up @@ -287,10 +277,13 @@ v1:

# How to execute a test run using this configuration
execution:
# Since we expect no failed checks and want to stop execution immediately if there are any failed checks, we set
# this parameter to true.
# Since we want to stop execution immediately if there are any unexpected failed checks, we set this parameter to
# true.
stop_fast: true

# But, don't stop_fast for findings that are acceptable according to any of the tested_requirements.
do_not_stop_fast_for_acceptable_findings: true

# This block defines artifacts related to the test run. Note that all paths are
# relative to where uss_qualifier is executed from, and are located inside the
# Docker container executing uss_qualifier.
Expand Down Expand Up @@ -422,6 +415,14 @@ v1:
uss1: scd_and_dss
uss2: scd_no_dss

# It is acceptable for the checks listed below to fail (in terms of determining the status of tested requirements in this artifact)
acceptable_findings:
- scenario_type: scenarios.astm.utm.dss.DSSInteroperability
test_case_name: "Prerequisites"
test_step_name: "Test environment requirements"
check_name: "DSS instance is publicly addressable"
# Rationale: tests should allow private addresses that are not publicly addressable since this configuration runs locally

# Write out a human-readable report showing the sequence of events of the test
sequence_view: {}

Expand Down Expand Up @@ -452,6 +453,7 @@ v1:
pass_condition:
# When considering all of the applicable elements...
elements:
# ...the number of applicable elements should be zero.
# ...the number of applicable elements should be 4...
count:
equal_to: 0
# ...specifically, the 4x "DSS instance is publicly accessible" checks we deem acceptable
equal_to: 4
9 changes: 8 additions & 1 deletion monitoring/uss_qualifier/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ def execute_test_run(
whole_config: USSQualifierConfiguration,
description: TestDefinitionDescription,
):
assert whole_config.v1
config = whole_config.v1.test_run

if not config:
Expand All @@ -137,7 +138,13 @@ def execute_test_run(
)

logger.info("Instantiating top-level test suite action")
context = ExecutionContext(config.execution if "execution" in config else None)
if "artifacts" in whole_config.v1 and whole_config.v1.artifacts:
acceptable_findings = list(whole_config.v1.artifacts.acceptable_findings)
else:
acceptable_findings = []
context = ExecutionContext(
config.execution if "execution" in config else None, acceptable_findings
)
action = TestSuiteAction(config.action, resources)
logger.info("Running top-level test suite action")
report = action.run(context)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,9 @@
.findings_result {
background-color: rgb(255, 255, 192);
}
.accepted_findings_result {
background-color: rgb(178, 125, 209);
}
.fail_result {
background-color: rgb(255, 192, 192);
}
Expand Down
Loading
Loading