Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 21 additions & 2 deletions .github/workflows/consumer_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,27 @@ jobs:
- name: Run Consumer tests

run: |
set -o pipefail
.venv_docs/bin/python -m pytest -s -v src/tests/ --repo="$CONSUMER" --junitxml="reports/${{ matrix.consumer }}.xml" | tee "reports/${{ matrix.consumer }}.log"
pytest_rc=0
.venv_docs/bin/python -m pytest -vv src/tests/ \
--repo="$CONSUMER" \
--junitxml="reports/${{ matrix.consumer }}.xml" \
|| pytest_rc=$?


if [ -f "consumer_test.log" ]; then
src_log="consumer_test.log"
else
echo "consumer_test.log not found; expected at ./consumer_test.log"
exit ${pytest_rc:-1}
fi

dest_log="reports/${{ matrix.consumer }}.log"
mv "$src_log" "$dest_log"

tail -n 15 "$dest_log" >> "$GITHUB_STEP_SUMMARY"

cat "$dest_log"
exit $pytest_rc
env:
FORCE_COLOR: "1"
TERM: xterm-256color
Expand Down
96 changes: 56 additions & 40 deletions src/tests/test_consumer.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
import pytest
from _pytest.config import Config
from pytest import TempPathFactory
from rich import print
from rich import box, print
from rich.console import Console
from rich.table import Table

Expand All @@ -47,10 +47,14 @@

# Max width of the printout
# Trial and error has shown that 80 the best value is for GH CI output
len_max = 80
len_max = 120
CACHE_DIR = Path.home() / ".cache" / "docs_as_code_consumer_tests"

console = Console(force_terminal=True if os.getenv("CI") else None, width=80)
log_file_name = "consumer_test.log"
# Need to ignore the ruff error here. Due to how the script is written,
# can not use a context manager to open the log file, even though it would be preferable
# In a future re-write this should be considered.
log_fp = open(log_file_name, "a", encoding="utf-8") # noqa: SIM115
console = Console(file=log_fp, force_terminal=False, width=120, color_system=None)


@dataclass
Expand Down Expand Up @@ -125,22 +129,24 @@ def sphinx_base_dir(tmp_path_factory: TempPathFactory, pytestconfig: Config) ->
if disable_cache:
# Use persistent cache directory for local development
temp_dir = tmp_path_factory.mktemp("testing_dir")
print(f"[blue]Using temporary directory: {temp_dir}[/blue]")
console.print(f"[blue]Using temporary directory: {temp_dir}[/blue]")
return temp_dir

CACHE_DIR.mkdir(parents=True, exist_ok=True)
print(f"[green]Using persistent cache directory: {CACHE_DIR}[/green]")
console.print(f"[green]Using persistent cache directory: {CACHE_DIR}[/green]")
return CACHE_DIR


def cleanup():
def cleanup(cmd: str):
"""
Cleanup before tests are run
"""
for p in Path(".").glob("*/ubproject.toml"):
p.unlink()
shutil.rmtree("_build", ignore_errors=True)
cmd = "bazel clean --async"
if cmd == "bazel run //:ide_support":
shutil.rmtree(".venv_docs", ignore_errors=True)
cmd = "bazel clean --async"
subprocess.run(cmd.split(), text=True)


Expand Down Expand Up @@ -174,13 +180,15 @@ def filter_repos(repo_filter: str | None) -> list[ConsumerRepo]:
# Warn about any repos that weren't found
if requested_repos:
available_names = [repo.name for repo in REPOS_TO_TEST]
print(f"[yellow]Warning: Unknown repositories: {requested_repos}[/yellow]")
print(f"[yellow]Available repositories: {available_names}[/yellow]")
console.print(
f"[yellow]Warning: Unknown repositories: {requested_repos}[/yellow]"
)
console.print(f"[yellow]Available repositories: {available_names}[/yellow]")

# If no valid repos were found but filter was provided, return all repos
# This prevents accidentally running zero tests due to typos
if not filtered_repos and repo_filter:
print(
console.print(
"[red]No valid repositories found in filter, "
"running all repositories instead[/red]"
)
Expand Down Expand Up @@ -254,9 +262,9 @@ def parse_bazel_output(BR: BuildOutput, pytestconfig: Config) -> BuildOutput:
warning_dict: dict[str, list[str]] = defaultdict(list)

if pytestconfig.get_verbosity() >= 2 and os.getenv("CI"):
print("[DEBUG] Raw warnings in CI:")
console.print("[DEBUG] Raw warnings in CI:")
for i, warning in enumerate(split_warnings):
print(f"[DEBUG] Warning {i}: {repr(warning)}")
console.print(f"[DEBUG] Warning {i}: {repr(warning)}")

for raw_warning in split_warnings:
# In the CLI we seem to have some ansi codes in the warnings.
Expand All @@ -279,23 +287,23 @@ def parse_bazel_output(BR: BuildOutput, pytestconfig: Config) -> BuildOutput:
def print_overview_logs(BR: BuildOutput):
warning_loggers = list(BR.warnings.keys())
len_left_test_result = len_max - len("TEST RESULTS")
print(
console.print(
f"[blue]{'=' * int(len_left_test_result / 2)}"
f"TEST RESULTS"
f"{'=' * int(len_left_test_result / 2)}[/blue]"
)
print(f"[navy_blue]{'=' * len_max}[/navy_blue]")
console.print(f"[navy_blue]{'=' * len_max}[/navy_blue]")
warning_total_loggers_msg = f"Warning Loggers Total: {len(warning_loggers)}"
len_left_loggers = len_max - len(warning_total_loggers_msg)
print(
console.print(
f"[blue]{'=' * int(len_left_loggers / 2)}"
f"{warning_total_loggers_msg}"
f"{'=' * int(len_left_loggers / 2)}[/blue]"
)
warning_loggers = list(BR.warnings.keys())
warning_total_msg = "Logger Warnings Accumulated"
len_left_loggers_total = len_max - len(warning_total_msg)
print(
console.print(
f"[blue]{'=' * int(len_left_loggers_total / 2)}"
f"{warning_total_msg}"
f"{'=' * int(len_left_loggers_total / 2)}[/blue]"
Expand All @@ -306,20 +314,20 @@ def print_overview_logs(BR: BuildOutput):
color = "orange1" if logger == "[NO SPECIFIC LOGGER]" else "red"
warning_logger_msg = f"{logger} has {len(BR.warnings[logger])} warnings"
len_left_logger = len_max - len(warning_logger_msg)
print(
console.print(
f"[{color}]{'=' * int(len_left_logger / 2)}"
f"{warning_logger_msg}"
f"{'=' * int(len_left_logger / 2)}[/{color}]"
)
print(f"[blue]{'=' * len_max}[/blue]")
console.print(f"[blue]{'=' * len_max}[/blue]")


def verbose_printout(BR: BuildOutput):
"""Prints warnings for each logger when '-v' or higher is specified."""
warning_loggers = list(BR.warnings.keys())
for logger in warning_loggers:
len_left_logger = len_max - len(logger)
print(
console.print(
f"[cornflower_blue]{'=' * int(len_left_logger / 2)}"
f"{logger}"
f"{'=' * int(len_left_logger / 2)}[/cornflower_blue]"
Expand All @@ -329,36 +337,36 @@ def verbose_printout(BR: BuildOutput):
color = "red"
if logger == "[NO SPECIFIC LOGGER]":
color = "orange1"
print(
console.print(
f"[{color}]{'=' * int(len_left_warnings / 2)}"
f"{f'Warnings Found: {len(warnings)}'}"
f"{'=' * int(len_left_warnings / 2)}[/{color}]"
)
print("\n".join(f"[{color}]{x}[/{color}]" for x in warnings))
console.print("\n".join(f"[{color}]{x}[/{color}]" for x in warnings))


def print_running_cmd(repo: str, cmd: str, local_or_git: str):
"""Prints a 'Title Card' for the current command"""
len_left_cmd = len_max - len(cmd)
len_left_repo = len_max - len(repo)
len_left_local = len_max - len(local_or_git)
print(f"\n[cyan]{'=' * len_max}[/cyan]")
print(
console.print(f"\n[cyan]{'=' * len_max}[/cyan]")
console.print(
f"[cornflower_blue]{'=' * int(len_left_repo / 2)}"
f"{repo}"
f"{'=' * int(len_left_repo / 2)}[/cornflower_blue]"
)
print(
console.print(
f"[cornflower_blue]{'=' * int(len_left_local / 2)}"
f"{local_or_git}"
f"{'=' * int(len_left_local / 2)}[/cornflower_blue]"
)
print(
console.print(
f"[cornflower_blue]{'=' * int(len_left_cmd / 2)}"
f"{cmd}"
f"{'=' * int(len_left_cmd / 2)}[/cornflower_blue]"
)
print(f"[cyan]{'=' * len_max}[/cyan]")
console.print(f"[cyan]{'=' * len_max}[/cyan]")


def analyze_build_success(BR: BuildOutput) -> tuple[bool, str]:
Expand Down Expand Up @@ -401,8 +409,8 @@ def print_final_result(BR: BuildOutput, repo_name: str, cmd: str, pytestconfig:
verbose_printout(BR)
if pytestconfig.get_verbosity() >= 2:
# Verbosity Level 2 (-vv)
print("==== STDOUT ====:\n\n", BR.stdout)
print("==== STDERR ====:\n\n", BR.stderr)
console.print("==== STDOUT ====:\n\n", BR.stdout)
console.print("==== STDERR ====:\n\n", BR.stderr)

is_success, reason = analyze_build_success(BR)

Expand All @@ -412,20 +420,20 @@ def print_final_result(BR: BuildOutput, repo_name: str, cmd: str, pytestconfig:
# Printing a small 'report' for each cmd.
result_msg = f"{repo_name} - {cmd}: {status}"
len_left = len_max - len(result_msg)
print(
console.print(
f"[{color}]{'=' * int(len_left / 2)}"
f"{result_msg}"
f"{'=' * int(len_left / 2)}[/{color}]"
)
print(f"[{color}]Reason: {reason}[/{color}]")
print(f"[{color}]{'=' * len_max}[/{color}]")
console.print(f"[{color}]Reason: {reason}[/{color}]")
console.print(f"[{color}]{'=' * len_max}[/{color}]")

return is_success, reason


def print_result_table(results: list[Result]):
"""Printing an 'overview' table to show all results."""
table = Table(title="Docs-As-Code Consumer Test Result")
table = Table(title="Docs-As-Code Consumer Test Result", box=box.MARKDOWN)
table.add_column("Repository")
table.add_column("CMD")
table.add_column("LOCAL OR GIT")
Expand All @@ -441,12 +449,12 @@ def print_result_table(results: list[Result]):
result.reason,
style=style,
)
print(table)
console.print(table)


def stream_subprocess_output(cmd: str, repo_name: str):
"""Stream subprocess output in real-time for maximum verbosity"""
print(f"[green]Streaming output for: {cmd}[/green]")
console.print(f"[green]Streaming output for: {cmd}[/green]")

process = subprocess.Popen(
cmd.split(),
Expand All @@ -461,7 +469,7 @@ def stream_subprocess_output(cmd: str, repo_name: str):
if process.stdout is not None:
for line in iter(process.stdout.readline, ""):
if line:
print(line.rstrip()) # Print immediately
console.print(line.rstrip()) # Print immediately
output_lines.append(line)

process.stdout.close()
Expand All @@ -483,7 +491,7 @@ def run_cmd(
) -> tuple[list[Result], bool]:
verbosity: int = pytestconfig.get_verbosity()

cleanup()
cleanup(cmd)

if verbosity >= 3:
# Level 3 (-vvv): Stream output in real-time
Expand Down Expand Up @@ -584,7 +592,7 @@ def prepare_repo_overrides(
repo_path = Path(repo_name)

if not use_cache and repo_path.exists():
print(f"[green]Using cached repository: {repo_name}[/green]")
console.print(f"[green]Using cached repository: {repo_name}[/green]")
# Update the existing repo
os.chdir(repo_name)
subprocess.run(["git", "fetch", "origin"], check=True, capture_output=True)
Expand Down Expand Up @@ -616,6 +624,7 @@ def prepare_repo_overrides(

# Updated version of your test loop
def test_and_clone_repos_updated(sphinx_base_dir: Path, pytestconfig: Config):
global log_file_name
# Get command line options from pytest config

repo_tests: str | None = cast(str | None, pytestconfig.getoption("--repo"))
Expand All @@ -625,10 +634,10 @@ def test_and_clone_repos_updated(sphinx_base_dir: Path, pytestconfig: Config):

# Exit early if we don't find repos to test.
if not repos_to_test:
print("[red]No repositories to test after filtering![/red]")
console.print("[red]No repositories to test after filtering![/red]")
return

print(
console.print(
f"[green]Testing {len(repos_to_test)} repositories: "
f"{[r.name for r in repos_to_test]}[/green]"
)
Expand All @@ -642,6 +651,12 @@ def test_and_clone_repos_updated(sphinx_base_dir: Path, pytestconfig: Config):
results: list[Result] = []

for repo in repos_to_test:
len_left_repo = len_max - len(repo.name)
console.print(f"{'=' * len_max}")
console.print(f"{'=' * len_max}")
console.print(
f"{'=' * int(len_left_repo / 2)}{repo.name}{'=' * int(len_left_repo / 2)}"
)
# β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
# β”‚ Preparing the Repository for testing β”‚
# β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
Expand Down Expand Up @@ -692,3 +707,4 @@ def test_and_clone_repos_updated(sphinx_base_dir: Path, pytestconfig: Config):
pytest.fail(
reason="Consumer Tests failed, see table for which commands specifically. "
)
log_fp.close()
Loading