diff --git a/.basedpyright/baseline.json b/.basedpyright/baseline.json
index e496585129..ed06ba8db0 100644
--- a/.basedpyright/baseline.json
+++ b/.basedpyright/baseline.json
@@ -4953,14 +4953,6 @@
"lineCount": 1
}
},
- {
- "code": "reportOptionalMemberAccess",
- "range": {
- "startColumn": 29,
- "endColumn": 37,
- "lineCount": 1
- }
- },
{
"code": "reportArgumentType",
"range": {
@@ -5679,30 +5671,6 @@
}
],
"./monitoring/uss_qualifier/reports/tested_requirements/breakdown.py": [
- {
- "code": "reportOptionalMemberAccess",
- "range": {
- "startColumn": 55,
- "endColumn": 63,
- "lineCount": 1
- }
- },
- {
- "code": "reportOptionalMemberAccess",
- "range": {
- "startColumn": 64,
- "endColumn": 70,
- "lineCount": 1
- }
- },
- {
- "code": "reportArgumentType",
- "range": {
- "startColumn": 23,
- "endColumn": 43,
- "lineCount": 1
- }
- },
{
"code": "reportOptionalMemberAccess",
"range": {
@@ -20767,14 +20735,6 @@
"lineCount": 1
}
},
- {
- "code": "reportReturnType",
- "range": {
- "startColumn": 11,
- "endColumn": 13,
- "lineCount": 1
- }
- },
{
"code": "reportAttributeAccessIssue",
"range": {
diff --git a/monitoring/uss_qualifier/configurations/configuration.py b/monitoring/uss_qualifier/configurations/configuration.py
index 6cd41be3d0..5a6efe9d35 100644
--- a/monitoring/uss_qualifier/configurations/configuration.py
+++ b/monitoring/uss_qualifier/configurations/configuration.py
@@ -1,5 +1,7 @@
from __future__ import annotations
+from collections.abc import Iterable
+
from implicitdict import ImplicitDict, Optional
from monitoring.monitorlib.dicts import JSONAddress
@@ -129,6 +131,9 @@ class ExecutionConfiguration(ImplicitDict):
stop_fast: Optional[bool] = False
"""If true, escalate the Severity of any failed check to Critical in order to end the test run early."""
+ do_not_stop_fast_for_acceptable_findings: Optional[bool]
+ """If true, make an exception for stop_fast above when the failed check is identified as an acceptable_finding in one of the tested_requirements artifact descriptions."""
+
stop_when_resource_not_created: Optional[bool] = False
"""If true, stop test execution if one of the resources cannot be created. Otherwise, resources that cannot be created due to missing prerequisites are simply treated as omitted."""
@@ -154,6 +159,31 @@ class TestConfiguration(ImplicitDict):
"""Identifier for a requirements collection, local to a TestedRequirementsConfiguration artifact configuration."""
+class FullyQualifiedCheck(ImplicitDict):
+ scenario_type: TestScenarioTypeName
+ """Scenario in which the check occurs."""
+
+ test_case_name: str
+ """Test case in which the check occurs."""
+
+ test_step_name: str
+ """Test step in which the check occurs."""
+
+ check_name: str
+ """Name of the check."""
+
+ def contained_in(self, collection: Iterable[FullyQualifiedCheck]) -> bool:
+ for other in collection:
+ if (
+ self.scenario_type == other.scenario_type
+ and self.test_case_name == other.test_case_name
+ and self.test_step_name == other.test_step_name
+ and self.check_name == other.check_name
+ ):
+ return True
+ return False
+
+
class TestedRequirementsConfiguration(ImplicitDict):
report_name: str
"""Name of subfolder in output path to contain the rendered templated report"""
@@ -179,6 +209,9 @@ class TestedRequirementsConfiguration(ImplicitDict):
If a participant is not listed, no report will be generated for them.
"""
+ acceptable_findings: Optional[list[FullyQualifiedCheck]]
+ """If any check identified in this field fails, ignore the failure when determining Tested Requirements outcomes."""
+
class SequenceViewConfiguration(ImplicitDict):
redact_access_tokens: bool = True
@@ -248,6 +281,18 @@ class ArtifactsConfiguration(ImplicitDict):
timing_report: Optional[TimingReportConfiguration] = None
"""If specified, configuration describing a desired report describing where and how time was spent during the test."""
+ @property
+ def acceptable_findings(self) -> Iterable[FullyQualifiedCheck]:
+ """Iterates through checks where findings are acceptable in at least one tested_requirements artifact."""
+ if "tested_requirements" not in self or not self.tested_requirements:
+ return
+ for tested_requirements in self.tested_requirements:
+ if (
+ "acceptable_findings" in tested_requirements
+ and tested_requirements.acceptable_findings
+ ):
+ yield from tested_requirements.acceptable_findings
+
class USSQualifierConfigurationV1(ImplicitDict):
test_run: Optional[TestConfiguration] = None
diff --git a/monitoring/uss_qualifier/configurations/dev/f3548_self_contained.yaml b/monitoring/uss_qualifier/configurations/dev/f3548_self_contained.yaml
index 23babd472d..c7ee19db26 100644
--- a/monitoring/uss_qualifier/configurations/dev/f3548_self_contained.yaml
+++ b/monitoring/uss_qualifier/configurations/dev/f3548_self_contained.yaml
@@ -30,7 +30,6 @@ v1:
second_utm_auth: second_utm_auth
planning_area: planning_area
problematically_big_area: problematically_big_area
- test_exclusions: test_exclusions
# When a test run is executed, a "baseline signature" is computed uniquely identifying the "baseline" of the test,
# usually excluding exactly what systems are participating in the test (the "environment"). This is a list of
@@ -167,15 +166,6 @@ v1:
# ========== Environment ==========
# =================================
- # Controls tests behavior
- test_exclusions:
- $content_schema: monitoring/uss_qualifier/resources/definitions/ResourceDeclaration.json
- resource_type: resources.dev.TestExclusionsResource
- specification:
- # Tests should allow private addresses that are not publicly addressable since this configuration runs locally
- allow_private_addresses: true
- allow_cleartext_queries: true
-
# Means by which uss_qualifier can discover which subscription ('sub' claim of its tokes) it is described by
utm_client_identity:
resource_type: resources.communications.ClientIdentityResource
@@ -287,10 +277,13 @@ v1:
# How to execute a test run using this configuration
execution:
- # Since we expect no failed checks and want to stop execution immediately if there are any failed checks, we set
- # this parameter to true.
+ # Since we want to stop execution immediately if there are any unexpected failed checks, we set this parameter to
+ # true.
stop_fast: true
+ # But, don't stop_fast for findings that are acceptable according to any of the tested_requirements.
+ do_not_stop_fast_for_acceptable_findings: true
+
# This block defines artifacts related to the test run. Note that all paths are
# relative to where uss_qualifier is executed from, and are located inside the
# Docker container executing uss_qualifier.
@@ -422,6 +415,14 @@ v1:
uss1: scd_and_dss
uss2: scd_no_dss
+ # It is acceptable for the checks listed below to fail (in terms of determining the status of tested requirements in this artifact)
+ acceptable_findings:
+ - scenario_type: scenarios.astm.utm.dss.DSSInteroperability
+ test_case_name: "Prerequisites"
+ test_step_name: "Test environment requirements"
+ check_name: "DSS instance is publicly addressable"
+ # Rationale: tests should allow private addresses that are not publicly addressable since this configuration runs locally
+
# Write out a human-readable report showing the sequence of events of the test
sequence_view: {}
@@ -452,6 +453,7 @@ v1:
pass_condition:
# When considering all of the applicable elements...
elements:
- # ...the number of applicable elements should be zero.
+ # ...the number of applicable elements should be 4...
count:
- equal_to: 0
+ # ...specifically, the 4x "DSS instance is publicly accessible" checks we deem acceptable
+ equal_to: 4
diff --git a/monitoring/uss_qualifier/main.py b/monitoring/uss_qualifier/main.py
index 34b861c365..b3783d15cc 100644
--- a/monitoring/uss_qualifier/main.py
+++ b/monitoring/uss_qualifier/main.py
@@ -118,6 +118,7 @@ def execute_test_run(
whole_config: USSQualifierConfiguration,
description: TestDefinitionDescription,
):
+ assert whole_config.v1
config = whole_config.v1.test_run
if not config:
@@ -137,7 +138,13 @@ def execute_test_run(
)
logger.info("Instantiating top-level test suite action")
- context = ExecutionContext(config.execution if "execution" in config else None)
+ if "artifacts" in whole_config.v1 and whole_config.v1.artifacts:
+ acceptable_findings = list(whole_config.v1.artifacts.acceptable_findings)
+ else:
+ acceptable_findings = []
+ context = ExecutionContext(
+ config.execution if "execution" in config else None, acceptable_findings
+ )
action = TestSuiteAction(config.action, resources)
logger.info("Running top-level test suite action")
report = action.run(context)
diff --git a/monitoring/uss_qualifier/reports/templates/tested_requirements/style.html b/monitoring/uss_qualifier/reports/templates/tested_requirements/style.html
index 8e894ef139..0b074eeefa 100644
--- a/monitoring/uss_qualifier/reports/templates/tested_requirements/style.html
+++ b/monitoring/uss_qualifier/reports/templates/tested_requirements/style.html
@@ -58,6 +58,9 @@
.findings_result {
background-color: rgb(255, 255, 192);
}
+ .accepted_findings_result {
+ background-color: rgb(178, 125, 209);
+ }
.fail_result {
background-color: rgb(255, 192, 192);
}
diff --git a/monitoring/uss_qualifier/reports/tested_requirements/README.md b/monitoring/uss_qualifier/reports/tested_requirements/README.md
index 8b28ef925c..b4cf6fc29a 100644
--- a/monitoring/uss_qualifier/reports/tested_requirements/README.md
+++ b/monitoring/uss_qualifier/reports/tested_requirements/README.md
@@ -16,34 +16,140 @@ list of relevant requirements included on this page can either be set
explicitly in the artifact configuration, or else it defaults to every
requirement that the set of scenarios run may have been capable of measuring.
-### Summaries
-
-Each requirement is summarized (see [`TestedRequirement.classname`](./data_types.py)) for a participant in the following way:
-
-* If any test check relevant to that requirement failed, then the compliance
- summary for that requirement is indicated as "Fail".
-* Otherwise, if at least one test check measured compliance with the
- requirement did not detect non-compliance, then the compliance summary for
- that requirement is indicated as "Pass".
- * If the summary would be "Pass" but there are one or more low-severity findings (not indicating non-compliance to a requirement), the summary for that requirement is instead indicated as "Pass with findings".
-* If no test checks measuring compliance with the requirement were performed
- for the participant, then the compliance summary for that requirement is
- indicated as "Not tested"
+### Requirement summaries
+
+Each requirement is summarized (see [`TestedRequirement.status`](./data_types.py)) for a participant in the following way:
+
+
+
+
+ | Requirement status |
+ Meaning |
+ Criteria for checks associated with the requirement |
+
+
+ | Pass |
+ Not tested |
+ Fail |
+
+
+ Medium+ severity, not in acceptable_findings |
+ Low severity, not in acceptable_findings |
+ In acceptable_findings |
+
+
+
+
+ | Fail |
+ The participant is likely to be non-compliant with one or more requirements |
+ Any number |
+ Any number |
+ At least one |
+ Any number |
+ Any number |
+
+
+ | Findings |
+ The participant was not detected as non-compliant to any requirement, but there are important findings and no fully-successful checks |
+ None |
+ Any number |
+ None |
+ At least one |
+ Any number |
+
+
+ | Pass (with findings) |
+ The participant was not detected as non-compliant to any requirement, and some checks validate compliance to the requirement, but there are important findings |
+ At least one |
+ Any number |
+ None |
+ At least one |
+ Any number |
+
+
+ | Pass |
+ At least one test check measuring compliance with the requirement did not detect non-compliance, and no non-compliance was detected |
+ At least one |
+ Any number |
+ None |
+ None |
+ Any number |
+
+
+ | Not tested |
+ No checks associated with the requirement produced positive or negative results |
+ None |
+ Any number |
+ None |
+ None |
+ Any number |
+
+
+
+
+### Top-level verification status
The overall "Requirement verification status" for the participant is
summarized in the following way (see [`compute_overall_status`](./summaries.py)):
-* If any relevant requirement for the participant indicates "Fail", then the
- overall status is indicated as "Fail".
-* Otherwise, if any relevant requirement for the participant indicates "Not
- tested", then the overall status is indicated as "Not fully verified".
-* If at least one relevant requirement for the participant indicates "Pass with findings" and all relevant requirements for the participant indicate either "Pass" or "Pass with findings", then the overall status is indicated as "Pass (with findings)".
-* If all relevant requirements for the participant indicate "Pass", then the
- overall status is indicated as "Pass".
+
+
+
+ | Verification status (top-level) |
+ Meaning |
+ Criteria for requirements included in the artifact |
+
+
+ | Fail |
+ Findings |
+ Pass (with findings) |
+ Pass |
+ Not tested |
+
+
+
+
+ | Fail |
+ The participant is likely to be non-compliant with one or more relevant requirements |
+ At least one |
+ Any number |
+ Any number |
+ Any number |
+ Any number |
+
+
+ | Pass |
+ Every requirement considered relevant for the artifact has at least one test check measuring the participant's compliance with the requirement, and no relevant non-compliance was detected, and there are no relevant Low-severity findings |
+ None |
+ None |
+ None |
+ At least one |
+ None |
+
+
+ | Pass (with findings) |
+ Every requirement considered relevant for the artifact has at least one test check measuring the participant's compliance with the requirement, and no relevant non-compliance was detected, but there are important findings regarding one or more relevant requirements |
+ None |
+ None |
+ At least one |
+ Any number |
+ None |
+
+
+ | Not fully verified |
+ The participant was not detected to be non-compliant with any requirement, but tests could not be successfully conducted to verify compliance to all relevant requirements |
+ None |
+ Any number |
+ Any number |
+ Any number |
+ At least one |
+
+
+
## Troubleshooting "Fail" for a set of tested requirements
-If the "Requirement verification status" for a participant is "Fail" (see above for how this is determined), begin troubleshooting by locating the requirement row in the "Tested requirements" table. Identify the exact checks failed and see if the names alone reveal the issue. If not, click on the check name to navigate to the documentation for that check and see if the reason for failure can be determined from that documentation. If not, note the name of the test scenario in which the first failed check appears, then find that test scenario run in the [sequence view artifact](../sequence_view) and follow [the instructions to troubleshoot a failed check from the sequence view artifact](../sequence_view/README.md#troubleshooting-a-failed-check).
+If the "Requirement verification status" for a participant is "Fail" (see above for how this is determined), begin troubleshooting by locating the requirement row in the "Tested requirements" table. Identify the exact checks failed and see if the names alone reveal the issue. If not, click on the check name to navigate to the documentation for that check and see if the reason for failure can be determined from that documentation. If not, note the name of the test scenario in which the failed check appears, then find that test scenario run in the [sequence view artifact](../sequence_view) and follow [the instructions to troubleshoot a failed check from the sequence view artifact](../sequence_view/README.md#troubleshooting-a-failed-check).
## Troubleshooting "Not fully verified" for a set of tested requirements
@@ -54,3 +160,7 @@ If that row does not contain any Scenario entries, the test configuration is not
If the requirement row contains one or more Scenario entries, these are the test scenarios that are potentially capable of verifying compliance to the requirement. But, for some reason, the participant's compliance to the requirement was not verified by any of these test scenarios. For each test scenario capable of verifying compliance to the requirement, review the documentation (linked via the test scenario name) to determine if verification is expected for the participant (for instance, verification may not be expected if verification requires implementing a particular feature and the participant has chosen not to implement that feature). If verification is expected, consult the index page of the [sequence view artifact](../sequence_view) and find all runs of the test scenario in question. For each run of the test scenario in question, review the test scenario run page of the sequence view artifact (linked from the test scenario name on the index page). If the participant does not appear in any of the roles of any of the test scenario runs, the problem may be that the test designer has not included the participant in the relevant environmental resource (e.g., participant has not been added to the list of USSs implementing feature X). If the participant is involved in one or more runs of the test scenario, trace the events performed to determine why the check that should have verified requirement compliance was not performed for the participant (based on test scenario documentation) -- this is usually because the participant was found to not support an optional capability needed to verify compliance to the requirement.
After performing the above procedure, if the reason for non-verification still cannot be determined, file [a test scenario bug Issue](https://github.com/interuss/monitoring/issues/new?template=bug_test_scenario.md) with InterUSS. Be sure to attach the entire sequence view artifact and tested requirements artifact when possible, ideally the full zip of artifacts produced from the test run.
+
+## Purple cells
+
+If a test designer explicitly indicates one or more checks in `acceptable_findings`, then failure of this type of check should not affect the status indicated for associated requirements and the test overall. When the outcome of such a check is ignored in this way, that behavior will be annotated visually with a purple background. That is, instead of a red background for the failed check, it will be shown with a purple background to indicate that finding has been considered acceptable according to explicit test designer instructions.
diff --git a/monitoring/uss_qualifier/reports/tested_requirements/breakdown.py b/monitoring/uss_qualifier/reports/tested_requirements/breakdown.py
index 4a7a359405..3ee22f9128 100644
--- a/monitoring/uss_qualifier/reports/tested_requirements/breakdown.py
+++ b/monitoring/uss_qualifier/reports/tested_requirements/breakdown.py
@@ -10,7 +10,10 @@
list_potential_actions_for_action_generator_definition,
)
from monitoring.uss_qualifier.common_data_definitions import Severity
-from monitoring.uss_qualifier.configurations.configuration import ParticipantID
+from monitoring.uss_qualifier.configurations.configuration import (
+ FullyQualifiedCheck,
+ ParticipantID,
+)
from monitoring.uss_qualifier.fileio import load_dict_with_references
from monitoring.uss_qualifier.reports.report import (
FailedCheck,
@@ -44,6 +47,7 @@
def make_breakdown(
report: TestRunReport,
+ acceptable_findings: list[FullyQualifiedCheck],
participant_reqs: set[RequirementID] | None,
participant_ids: Iterable[ParticipantID],
) -> TestedBreakdown:
@@ -51,6 +55,7 @@ def make_breakdown(
Args:
report: Report to break down.
+ acceptable_findings: Checks where failure is acceptable, according to the configuration.
participant_reqs: Set of requirements to report for these participants. If None, defaults to everything.
participant_ids: IDs of participants for which the breakdown is being computed.
@@ -58,10 +63,19 @@ def make_breakdown(
"""
participant_breakdown = TestedBreakdown(packages=[])
_populate_breakdown_with_action_report(
- participant_breakdown, report.report, participant_ids, participant_reqs
+ participant_breakdown,
+ report.report,
+ acceptable_findings,
+ participant_ids,
+ participant_reqs,
)
+ assert report.configuration.v1
+ assert report.configuration.v1.test_run
_populate_breakdown_with_action_declaration(
- participant_breakdown, report.configuration.v1.test_run.action, participant_reqs
+ participant_breakdown,
+ report.configuration.v1.test_run.action,
+ acceptable_findings,
+ participant_reqs,
)
if participant_reqs is not None:
_populate_breakdown_with_req_set(participant_breakdown, participant_reqs)
@@ -96,23 +110,29 @@ def _populate_breakdown_with_req_set(
def _populate_breakdown_with_action_report(
breakdown: TestedBreakdown,
action: TestSuiteActionReport,
+ acceptable_findings: list[FullyQualifiedCheck],
participant_ids: Iterable[ParticipantID],
req_set: set[RequirementID] | None,
) -> None:
test_suite, test_scenario, action_generator = action.get_applicable_report()
if test_scenario:
+ assert action.test_scenario
return _populate_breakdown_with_scenario_report(
- breakdown, action.test_scenario, participant_ids, req_set
+ breakdown,
+ action.test_scenario,
+ acceptable_findings,
+ participant_ids,
+ req_set,
)
elif test_suite:
for subaction in action.test_suite.actions:
_populate_breakdown_with_action_report(
- breakdown, subaction, participant_ids, req_set
+ breakdown, subaction, acceptable_findings, participant_ids, req_set
)
elif action_generator:
for subaction in action.action_generator.actions:
_populate_breakdown_with_action_report(
- breakdown, subaction, participant_ids, req_set
+ breakdown, subaction, acceptable_findings, participant_ids, req_set
)
else:
pass # Skipped action
@@ -121,6 +141,7 @@ def _populate_breakdown_with_action_report(
def _populate_breakdown_with_scenario_report(
breakdown: TestedBreakdown,
scenario_report: TestScenarioReport,
+ acceptable_findings: list[FullyQualifiedCheck],
participant_ids: Iterable[ParticipantID],
req_set: set[RequirementID] | None,
) -> None:
@@ -206,8 +227,19 @@ def _populate_breakdown_with_scenario_report(
if matches:
tested_check = matches[0]
else:
+ current_check = FullyQualifiedCheck(
+ scenario_type=scenario_type_name,
+ test_case_name=case_name,
+ test_step_name=step.name,
+ check_name=check.name,
+ )
tested_check = TestedCheck(
- name=check.name, url="", has_todo=False
+ name=check.name,
+ url="",
+ has_todo=False,
+ is_finding_acceptable=current_check.contained_in(
+ acceptable_findings
+ ),
) # TODO: Consider populating has_todo with documentation instead
if isinstance(check, FailedCheck):
tested_check.url = check.documentation_url
@@ -226,12 +258,13 @@ def _populate_breakdown_with_scenario_report(
def _populate_breakdown_with_action_declaration(
breakdown: TestedBreakdown,
action: TestSuiteActionDeclaration | PotentialGeneratedAction,
+ acceptable_findings: list[FullyQualifiedCheck],
req_set: set[RequirementID] | None,
) -> None:
action_type = action.get_action_type()
if action_type == ActionType.TestScenario:
_populate_breakdown_with_scenario(
- breakdown, action.test_scenario.scenario_type, req_set
+ breakdown, action.test_scenario.scenario_type, acceptable_findings, req_set
)
elif action_type == ActionType.TestSuite:
if "suite_type" in action.test_suite and action.test_suite.suite_type:
@@ -240,13 +273,17 @@ def _populate_breakdown_with_action_declaration(
TestSuiteDefinition,
)
for a in suite_def.actions:
- _populate_breakdown_with_action_declaration(breakdown, a, req_set)
+ _populate_breakdown_with_action_declaration(
+ breakdown, a, acceptable_findings, req_set
+ )
elif (
"suite_definition" in action.test_suite
and action.test_suite.suite_definition
):
for a in action.test_suite.suite_definition.actions:
- _populate_breakdown_with_action_declaration(breakdown, a, req_set)
+ _populate_breakdown_with_action_declaration(
+ breakdown, a, acceptable_findings, req_set
+ )
else:
raise ValueError("Test suite action missing suite type or definition")
elif action_type == ActionType.ActionGenerator:
@@ -254,7 +291,9 @@ def _populate_breakdown_with_action_declaration(
action.action_generator
)
for a in potential_actions:
- _populate_breakdown_with_action_declaration(breakdown, a, req_set)
+ _populate_breakdown_with_action_declaration(
+ breakdown, a, acceptable_findings, req_set
+ )
else:
raise NotImplementedError(f"Unsupported test suite action type: {action_type}")
@@ -262,6 +301,7 @@ def _populate_breakdown_with_action_declaration(
def _populate_breakdown_with_scenario(
breakdown: TestedBreakdown,
scenario_type_name: TestScenarioTypeName,
+ acceptable_findings: list[FullyQualifiedCheck],
req_set: set[RequirementID] | None,
) -> None:
scenario_type = get_scenario_type_by_name(scenario_type_name)
@@ -333,8 +373,19 @@ def _populate_breakdown_with_scenario(
if matches:
tested_check = matches[0]
else:
+ current_check = FullyQualifiedCheck(
+ scenario_type=scenario_type_name,
+ test_case_name=case.name,
+ test_step_name=step.name,
+ check_name=check.name,
+ )
tested_check = TestedCheck(
- name=check.name, url=check.url, has_todo=check.has_todo
+ name=check.name,
+ url=check.url,
+ has_todo=check.has_todo,
+ is_finding_acceptable=current_check.contained_in(
+ acceptable_findings
+ ),
)
tested_step.checks.append(tested_check)
if not tested_check.url:
diff --git a/monitoring/uss_qualifier/reports/tested_requirements/data_types.py b/monitoring/uss_qualifier/reports/tested_requirements/data_types.py
index 3fa106a9c5..ef13c29d0b 100644
--- a/monitoring/uss_qualifier/reports/tested_requirements/data_types.py
+++ b/monitoring/uss_qualifier/reports/tested_requirements/data_types.py
@@ -1,3 +1,4 @@
+from collections.abc import Iterable
from enum import Enum
from implicitdict import ImplicitDict, Optional
@@ -10,6 +11,7 @@
FINDINGS_CLASS = "findings_result"
NOT_TESTED_CLASS = "not_tested"
FAIL_CLASS = "fail_result"
+ACCEPTED_FINDINGS_CLASS = "accepted_findings_result"
HAS_TODO_CLASS = "has_todo"
@@ -17,6 +19,7 @@ class TestedCheck(ImplicitDict):
name: str
url: str
has_todo: bool
+ is_finding_acceptable: bool
successes: int = 0
findings: int = 0
failures: int = 0
@@ -27,16 +30,16 @@ def result(self) -> str:
return "Fail"
if self.findings > 0 and self.successes == 0:
return "Findings"
- if self.not_tested:
- return "Not tested"
- if self.findings > 0:
+ if self.findings == 0 and self.successes > 0:
+ return "Pass"
+ if self.findings > 0 and self.successes > 0:
return "Pass (with findings)"
- return "Pass"
+ return "Not tested"
@property
def check_classname(self) -> str:
if self.failures > 0:
- return FAIL_CLASS
+ return ACCEPTED_FINDINGS_CLASS if self.is_finding_acceptable else FAIL_CLASS
if self.successes + self.failures == 0:
if self.has_todo:
return HAS_TODO_CLASS
@@ -47,17 +50,21 @@ def check_classname(self) -> str:
@property
def result_classname(self) -> str:
- if self.failures > 0:
- return FAIL_CLASS
- if self.successes + self.failures + self.findings == 0:
- return NOT_TESTED_CLASS
- if self.findings > 0:
- return FINDINGS_CLASS
- return PASS_CLASS
-
- @property
- def not_tested(self) -> bool:
- return self.successes + self.failures == 0
+ if self.is_finding_acceptable:
+ if self.successes > 0:
+ return PASS_CLASS
+ elif self.failures > 0 or self.findings > 0:
+ return ACCEPTED_FINDINGS_CLASS
+ else:
+ return NOT_TESTED_CLASS
+ else:
+ if self.failures > 0:
+ return FAIL_CLASS
+ if self.successes + self.failures + self.findings == 0:
+ return NOT_TESTED_CLASS
+ if self.findings > 0:
+ return FINDINGS_CLASS
+ return PASS_CLASS
class TestedStep(ImplicitDict):
@@ -69,18 +76,6 @@ class TestedStep(ImplicitDict):
def rows(self) -> int:
return len(self.checks)
- @property
- def no_failures(self) -> bool:
- return all(c.failures == 0 for c in self.checks)
-
- @property
- def not_tested(self) -> bool:
- return all(c.not_tested for c in self.checks)
-
- @property
- def findings(self) -> bool:
- return any(c.findings > 0 for c in self.checks)
-
class TestedCase(ImplicitDict):
name: str
@@ -91,18 +86,6 @@ class TestedCase(ImplicitDict):
def rows(self) -> int:
return sum(s.rows for s in self.steps)
- @property
- def no_failures(self) -> bool:
- return all(s.no_failures for s in self.steps)
-
- @property
- def not_tested(self) -> bool:
- return all(s.not_tested for s in self.steps)
-
- @property
- def findings(self) -> bool:
- return any(s.findings for s in self.steps)
-
class TestedScenario(ImplicitDict):
type: TestScenarioTypeName
@@ -114,17 +97,13 @@ class TestedScenario(ImplicitDict):
def rows(self) -> int:
return sum(c.rows for c in self.cases)
- @property
- def no_failures(self) -> bool:
- return all(c.no_failures for c in self.cases)
-
- @property
- def not_tested(self) -> bool:
- return all(c.not_tested for c in self.cases)
- @property
- def findings(self) -> bool:
- return any(c.findings for c in self.cases)
+class TestedRequirementStatus(str, Enum):
+ Pass = "Pass"
+ PassWithFindings = "Pass (with findings)"
+ Findings = "Findings"
+ Fail = "Fail"
+ NotTested = "Not tested"
class TestedRequirement(ImplicitDict):
@@ -138,16 +117,38 @@ def rows(self) -> int:
n = 1
return n
+ @property
+ def checks(self) -> Iterable[TestedCheck]:
+ for scenario in self.scenarios:
+ for case in scenario.cases:
+ for step in case.steps:
+ yield from step.checks
+
+ @property
+ def status(self) -> TestedRequirementStatus:
+ if any((c.failures > 0 and not c.is_finding_acceptable) for c in self.checks):
+ return TestedRequirementStatus.Fail
+ if all(c.successes == 0 for c in self.checks) and any(
+ c.findings > 0 for c in self.checks
+ ):
+ return TestedRequirementStatus.Findings
+ if any(c.successes > 0 for c in self.checks) and any(
+ (c.findings > 0 and not c.is_finding_acceptable) for c in self.checks
+ ):
+ return TestedRequirementStatus.PassWithFindings
+ if any(c.successes > 0 for c in self.checks):
+ return TestedRequirementStatus.Pass
+ return TestedRequirementStatus.NotTested
+
@property
def classname(self) -> str:
- if not all(s.no_failures for s in self.scenarios):
- return FAIL_CLASS
- elif all(s.not_tested for s in self.scenarios):
- return NOT_TESTED_CLASS
- elif any(s.findings for s in self.scenarios):
- return FINDINGS_CLASS
- else:
- return PASS_CLASS
+ return {
+ TestedRequirementStatus.Fail: FAIL_CLASS,
+ TestedRequirementStatus.Findings: FINDINGS_CLASS,
+ TestedRequirementStatus.PassWithFindings: FINDINGS_CLASS,
+ TestedRequirementStatus.Pass: PASS_CLASS,
+ TestedRequirementStatus.NotTested: NOT_TESTED_CLASS,
+ }[self.status]
class TestedPackage(ImplicitDict):
@@ -186,7 +187,7 @@ class ParticipantVerificationStatus(str, Enum):
Fail = "Fail"
"""Participant has failed to comply with one or more requirements."""
- Incomplete = "Incomplete"
+ NotFullyVerified = "NotFullyVerified"
"""Participant has not failed to comply with any requirements, but some identified requirements were not verified."""
def get_class(self) -> str:
@@ -196,7 +197,7 @@ def get_class(self) -> str:
return PASS_CLASS
elif self == ParticipantVerificationStatus.Fail:
return FAIL_CLASS
- elif self == ParticipantVerificationStatus.Incomplete:
+ elif self == ParticipantVerificationStatus.NotFullyVerified:
return NOT_TESTED_CLASS
else:
return ""
@@ -208,7 +209,7 @@ def get_text(self) -> str:
return "Pass (with findings)"
elif self == ParticipantVerificationStatus.Fail:
return "Fail"
- elif self == ParticipantVerificationStatus.Incomplete:
+ elif self == ParticipantVerificationStatus.NotFullyVerified:
return "Not fully verified"
else:
return "???"
diff --git a/monitoring/uss_qualifier/reports/tested_requirements/generate.py b/monitoring/uss_qualifier/reports/tested_requirements/generate.py
index 308bd82425..d46f9f3ef9 100644
--- a/monitoring/uss_qualifier/reports/tested_requirements/generate.py
+++ b/monitoring/uss_qualifier/reports/tested_requirements/generate.py
@@ -111,7 +111,14 @@ def generate_tested_requirements(
matching_participants = config.aggregate_participants[participant_id]
else:
matching_participants = [participant_id]
- participant_breakdown = make_breakdown(report, req_set, matching_participants)
+ participant_breakdown = make_breakdown(
+ report,
+ list(config.acceptable_findings)
+ if "acceptable_findings" in config and config.acceptable_findings
+ else [],
+ req_set,
+ matching_participants,
+ )
overall_status = compute_overall_status(participant_breakdown)
system_version = get_system_version(
find_participant_system_versions(report.report, matching_participants)
diff --git a/monitoring/uss_qualifier/reports/tested_requirements/summaries.py b/monitoring/uss_qualifier/reports/tested_requirements/summaries.py
index 69638873b1..64c8a653ca 100644
--- a/monitoring/uss_qualifier/reports/tested_requirements/summaries.py
+++ b/monitoring/uss_qualifier/reports/tested_requirements/summaries.py
@@ -40,7 +40,7 @@ def compute_overall_status(
if req.classname == FAIL_CLASS:
return ParticipantVerificationStatus.Fail
elif req.classname == NOT_TESTED_CLASS:
- overall_status = ParticipantVerificationStatus.Incomplete
+ overall_status = ParticipantVerificationStatus.NotFullyVerified
elif req.classname == FINDINGS_CLASS:
if overall_status == ParticipantVerificationStatus.Pass:
overall_status = ParticipantVerificationStatus.PassWithFindings
diff --git a/monitoring/uss_qualifier/scenarios/astm/utm/dss/dss_interoperability.md b/monitoring/uss_qualifier/scenarios/astm/utm/dss/dss_interoperability.md
index 368ef7c85d..af9c6e39be 100644
--- a/monitoring/uss_qualifier/scenarios/astm/utm/dss/dss_interoperability.md
+++ b/monitoring/uss_qualifier/scenarios/astm/utm/dss/dss_interoperability.md
@@ -29,12 +29,12 @@ This resource is optional.
### Test environment requirements test step
-#### 🛑 DSS instance is publicly addressable check
+#### ⚠️ DSS instance is publicly addressable check
As per **[astm.f3548.v21.DSS0300](../../../../requirements/astm/f3548/v21.md)** the DSS instance should be publicly addressable.
As such, this check will fail if the resolved IP of the DSS host is a private IP address.
This check is skipped if the test exclusion `allow_private_addresses` is set to `True`.
-#### 🛑 DSS instance is reachable check
+#### ⚠️ DSS instance is reachable check
As per **[astm.f3548.v21.DSS0300](../../../../requirements/astm/f3548/v21.md)** the DSS instance should be publicly addressable.
As such, this check will fail if the DSS is not reachable with a dummy query.
diff --git a/monitoring/uss_qualifier/scenarios/interuss/unit_test.py b/monitoring/uss_qualifier/scenarios/interuss/unit_test.py
index 30aac26e96..9d8ffc55f8 100644
--- a/monitoring/uss_qualifier/scenarios/interuss/unit_test.py
+++ b/monitoring/uss_qualifier/scenarios/interuss/unit_test.py
@@ -25,7 +25,7 @@ def run(self, context: ExecutionContext):
self.end_test_scenario()
def execute_unit_test(self):
- context = ExecutionContext(None)
+ context = ExecutionContext(None, [])
self.run(context)
self.cleanup()
return self
diff --git a/monitoring/uss_qualifier/scenarios/scenario.py b/monitoring/uss_qualifier/scenarios/scenario.py
index 6bca2e7bb1..f1c2d17629 100644
--- a/monitoring/uss_qualifier/scenarios/scenario.py
+++ b/monitoring/uss_qualifier/scenarios/scenario.py
@@ -520,7 +520,11 @@ def check(
documentation=check_documentation,
participants=[] if participants is None else participants,
step_report=self._step_report,
- stop_fast=self.context.stop_fast,
+ stop_fast=self.context.stop_fast(
+ self._current_case.name if self._current_case else None,
+ self._current_step.name if self._current_step else None,
+ name,
+ ),
on_failed_check=self.on_failed_check,
)
diff --git a/monitoring/uss_qualifier/scenarios/scenario_test/utils.py b/monitoring/uss_qualifier/scenarios/scenario_test/utils.py
index 59acd1d937..e2babe4d9a 100644
--- a/monitoring/uss_qualifier/scenarios/scenario_test/utils.py
+++ b/monitoring/uss_qualifier/scenarios/scenario_test/utils.py
@@ -6,7 +6,6 @@
from implicitdict import StringBasedDateTime
from loguru import logger
-from monitoring.deployment_manager.infrastructure import Context
from monitoring.monitorlib.fetch import (
Query,
QueryType,
@@ -158,14 +157,17 @@ def __exit__(self, *args):
logger.enable("monitoring.uss_qualifier.scenarios.scenario")
-def build_context(stop_fast: bool = False) -> Context:
+def build_context(stop_fast: bool = False):
"""Return a context that can be used with TestScenarios"""
class DummyContext:
- stop_fast = False
+ stop_fast_result = False
+
+ def stop_fast(self, case_name: str, step_name: str, check_name: str) -> bool:
+ return self.stop_fast_result
dc = DummyContext()
- dc.stop_fast = stop_fast
+ dc.stop_fast_result = stop_fast
return dc
diff --git a/monitoring/uss_qualifier/suites/suite.py b/monitoring/uss_qualifier/suites/suite.py
index 8fde355d46..e6c47e0f2f 100644
--- a/monitoring/uss_qualifier/suites/suite.py
+++ b/monitoring/uss_qualifier/suites/suite.py
@@ -23,6 +23,7 @@
)
from monitoring.uss_qualifier.configurations.configuration import (
ExecutionConfiguration,
+ FullyQualifiedCheck,
TestSuiteActionSelectionCondition,
)
from monitoring.uss_qualifier.fileio import resolve_filename
@@ -411,11 +412,17 @@ def address(self) -> JSONAddress:
class ExecutionContext:
start_time: datetime
config: ExecutionConfiguration | None
+ acceptable_findings: list[FullyQualifiedCheck]
top_frame: ActionStackFrame | None
current_frame: ActionStackFrame | None
- def __init__(self, config: ExecutionConfiguration | None):
+ def __init__(
+ self,
+ config: ExecutionConfiguration | None,
+ acceptable_findings: list[FullyQualifiedCheck],
+ ):
self.config = config
+ self.acceptable_findings = acceptable_findings
self.top_frame = None
self.current_frame = None
self.start_time = arrow.utcnow().datetime
@@ -455,14 +462,29 @@ def test_scenario_reports(
for child in frame.children:
yield from self.test_scenario_reports(child)
- @property
- def stop_fast(self) -> bool:
+ def stop_fast(
+ self, test_case_name: str, test_step_name: str, check_name: str
+ ) -> bool:
if (
self.config is not None
and "stop_fast" in self.config
- and self.config.stop_fast is not None
+ and self.config.stop_fast
):
- return self.config.stop_fast
+ if (
+ "do_not_stop_fast_for_acceptable_findings" in self.config
+ and self.config.do_not_stop_fast_for_acceptable_findings
+ ):
+ # See if there is an exception for the particular check being considered
+ if self.current_frame and self.current_frame.action.test_scenario:
+ current_check = FullyQualifiedCheck(
+ scenario_type=self.current_frame.action.test_scenario.declaration.scenario_type,
+ test_case_name=test_case_name,
+ test_step_name=test_step_name,
+ check_name=check_name,
+ )
+ if current_check.contained_in(self.acceptable_findings):
+ return False
+ return True
return False
def _compute_n_of(
diff --git a/schemas/monitoring/uss_qualifier/configurations/configuration/ExecutionConfiguration.json b/schemas/monitoring/uss_qualifier/configurations/configuration/ExecutionConfiguration.json
index dceb2ee116..5796d3040d 100644
--- a/schemas/monitoring/uss_qualifier/configurations/configuration/ExecutionConfiguration.json
+++ b/schemas/monitoring/uss_qualifier/configurations/configuration/ExecutionConfiguration.json
@@ -7,6 +7,13 @@
"description": "Path to content that replaces the $ref",
"type": "string"
},
+ "do_not_stop_fast_for_acceptable_findings": {
+ "description": "If true, make an exception for stop_fast above when the failed check is identified as an acceptable_finding in one of the tested_requirements artifact descriptions.",
+ "type": [
+ "boolean",
+ "null"
+ ]
+ },
"include_action_when": {
"description": "If specified, only execute test actions if they are selected by ANY of these conditions (and not selected by any of the `skip_when` conditions).",
"items": {
diff --git a/schemas/monitoring/uss_qualifier/configurations/configuration/FullyQualifiedCheck.json b/schemas/monitoring/uss_qualifier/configurations/configuration/FullyQualifiedCheck.json
new file mode 100644
index 0000000000..67650728de
--- /dev/null
+++ b/schemas/monitoring/uss_qualifier/configurations/configuration/FullyQualifiedCheck.json
@@ -0,0 +1,34 @@
+{
+ "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/configurations/configuration/FullyQualifiedCheck.json",
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "description": "monitoring.uss_qualifier.configurations.configuration.FullyQualifiedCheck, as defined in monitoring/uss_qualifier/configurations/configuration.py",
+ "properties": {
+ "$ref": {
+ "description": "Path to content that replaces the $ref",
+ "type": "string"
+ },
+ "check_name": {
+ "description": "Name of the check.",
+ "type": "string"
+ },
+ "scenario_type": {
+ "description": "Scenario in which the check occurs.",
+ "type": "string"
+ },
+ "test_case_name": {
+ "description": "Test case in which the check occurs.",
+ "type": "string"
+ },
+ "test_step_name": {
+ "description": "Test step in which the check occurs.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "check_name",
+ "scenario_type",
+ "test_case_name",
+ "test_step_name"
+ ],
+ "type": "object"
+}
\ No newline at end of file
diff --git a/schemas/monitoring/uss_qualifier/configurations/configuration/TestedRequirementsConfiguration.json b/schemas/monitoring/uss_qualifier/configurations/configuration/TestedRequirementsConfiguration.json
index 23dee344c9..0d9770cb3e 100644
--- a/schemas/monitoring/uss_qualifier/configurations/configuration/TestedRequirementsConfiguration.json
+++ b/schemas/monitoring/uss_qualifier/configurations/configuration/TestedRequirementsConfiguration.json
@@ -7,6 +7,16 @@
"description": "Path to content that replaces the $ref",
"type": "string"
},
+ "acceptable_findings": {
+ "description": "If any check identified in this field fails, ignore the failure when determining Tested Requirements outcomes.",
+ "items": {
+ "$ref": "FullyQualifiedCheck.json"
+ },
+ "type": [
+ "array",
+ "null"
+ ]
+ },
"aggregate_participants": {
"additionalProperties": {
"items": {