diff --git a/.typos.toml b/.typos.toml index 432385eef0..6123410cb1 100644 --- a/.typos.toml +++ b/.typos.toml @@ -28,6 +28,7 @@ choises = "choises" # appears in comment explaining validation purpose ordr = "ordr" # typo for "order" in "weno_ordr" - tests param suggestions unknwn = "unknwn" # typo for "unknown" - tests unknown param detection tru = "tru" # typo for "true" in "when_tru" - tests dependency keys +PNGs = "PNGs" [files] extend-exclude = ["docs/documentation/references*", "docs/references.bib", "tests/", "toolchain/cce_simulation_workgroup_256.sh", "build-docs/"] diff --git a/docs/documentation/case.md b/docs/documentation/case.md index 356f6d90fc..2f884ab94b 100644 --- a/docs/documentation/case.md +++ b/docs/documentation/case.md @@ -651,7 +651,7 @@ To restart the simulation from $k$-th time step, see @ref running "Restarting Ca The table lists formatted database output parameters. The parameters define variables that are outputted from simulation and file types and formats of data as well as options for post-processing. -- `format` specifies the choice of the file format of data file outputted by MFC by an integer of 1 and 2. `format = 1` and `2` correspond to Silo-HDF5 format and binary format, respectively. +- `format` specifies the choice of the file format of data file outputted by MFC by an integer of 1 and 2. `format = 1` and `2` correspond to Silo-HDF5 format and binary format, respectively. Both formats are supported by `./mfc.sh viz` (see @ref visualization "Flow Visualization"). Silo-HDF5 requires the h5py Python package; binary has no extra dependencies. - `precision` specifies the choice of the floating-point format of the data file outputted by MFC by an integer of 1 and 2. `precision = 1` and `2` correspond to single-precision and double-precision formats, respectively. diff --git a/docs/documentation/getting-started.md b/docs/documentation/getting-started.md index abb4c7d55b..101a569116 100644 --- a/docs/documentation/getting-started.md +++ b/docs/documentation/getting-started.md @@ -204,6 +204,24 @@ MFC is **unit-agnostic**: the solver performs no internal unit conversions. What The only requirement is **consistency** — all inputs must use the same unit system. Note that some parameters use **transformed stored forms** rather than standard physical values (e.g., `gamma` expects \f$1/(\gamma-1)\f$, not \f$\gamma\f$ itself). See @ref sec-stored-forms for details. +## Visualizing Results + +After running post_process, visualize the output directly from the command line: + +```shell +# List available variables +./mfc.sh viz examples/2D_shockbubble/ --list-vars --step 0 + +# Render a pressure snapshot +./mfc.sh viz examples/2D_shockbubble/ --var pres --step 1000 + +# Generate a video +./mfc.sh viz examples/2D_shockbubble/ --var pres --step all --mp4 +``` + +Output images and videos are saved to the `viz/` subdirectory of the case. +For more options, see @ref visualization "Flow Visualization" or run `./mfc.sh viz -h`. + ## Helpful Tools ### Parameter Lookup diff --git a/docs/documentation/running.md b/docs/documentation/running.md index 9039ab465f..cd663634fd 100644 --- a/docs/documentation/running.md +++ b/docs/documentation/running.md @@ -73,6 +73,14 @@ using 4 cores: ./mfc.sh run examples/2D_shockbubble/case.py -t simulation post_process -n 4 ``` +- Visualizing post-processed output: + +```shell +./mfc.sh viz examples/2D_shockbubble/ --var pres --step 1000 +``` + +See @ref visualization "Flow Visualization" for the full set of visualization options. + --- ## Running on GPUs diff --git a/docs/documentation/troubleshooting.md b/docs/documentation/troubleshooting.md index e272897986..9e7dd2c468 100644 --- a/docs/documentation/troubleshooting.md +++ b/docs/documentation/troubleshooting.md @@ -37,6 +37,7 @@ This guide covers debugging tools, common issues, and troubleshooting workflows ./mfc.sh run case.py -v # Run with verbose output ./mfc.sh test --only # Run a specific test ./mfc.sh clean # Clean and start fresh +./mfc.sh viz case_dir/ --list-vars --step 0 # Inspect post-processed data ``` --- @@ -457,6 +458,47 @@ Common issues: --- +## Visualization Issues + +### "No 'binary/' or 'silo_hdf5/' directory found" + +**Cause:** Post-processing has not been run, or the case directory path is wrong. + +**Fix:** +1. Run post_process first: + ```bash + ./mfc.sh run case.py -t post_process + ``` +2. Verify the path points to the case directory (containing `binary/` or `silo_hdf5/`) + +### "Variable 'X' not found" + +**Cause:** The requested variable was not written during post-processing. + +**Fix:** +1. List available variables: + ```bash + ./mfc.sh viz case_dir/ --list-vars --step 0 + ``` +2. Ensure your case file enables the desired output (e.g., ``prim_vars_wrt = 'T'``, ``cons_vars_wrt = 'T'``) + +### "h5py is required to read Silo-HDF5 files" + +**Cause:** The case was post-processed with `format=1` (Silo-HDF5) but `h5py` is not installed. + +**Fix:** +- Install h5py: `pip install h5py` +- Or re-run post_process with `format=2` in your case file to produce binary output + +### Visualization looks wrong or has artifacts + +**Possible causes and fixes:** +1. **Color range:** Try setting explicit `--vmin` and `--vmax` values +2. **Wrong variable:** Use `--list-vars` to check available variables +3. **3D slice position:** Adjust `--slice-axis` and `--slice-value` to view the correct plane + +--- + ## Getting Help If you can't resolve an issue: diff --git a/docs/documentation/visualization.md b/docs/documentation/visualization.md index 2f5ed308d0..da8587fc03 100644 --- a/docs/documentation/visualization.md +++ b/docs/documentation/visualization.md @@ -2,13 +2,133 @@ # Flow visualization -A post-processed database in Silo-HDF5 format can be visualized and analyzed using Paraview and VisIt. -After the post-processing of simulation data (see section @ref running "Running"), a directory named `silo_hdf5` contains a silo-HDF5 database. -Here, `silo_hdf5/` includes a directory named `root/` that contains index files for flow field data at each saved time step. +After running `post_process` on a simulation (see @ref running "Running"), MFC produces output in either Silo-HDF5 format (`format=1`) or binary format (`format=2`). +These can be visualized using MFC's built-in CLI tool or external tools like ParaView and VisIt. -### Visualizing with Paraview +--- -Paraview is an open-source interactive parallel visualization and graphical analysis tool for viewing scientific data. +## Quick visualization with `./mfc.sh viz` + +MFC includes a built-in visualization command that renders images and videos directly from post-processed output — no external GUI tools needed. + +### Basic usage + +```bash +# Plot pressure at timestep 1000 +./mfc.sh viz case_dir/ --var pres --step 1000 + +# Plot density at all available timesteps +./mfc.sh viz case_dir/ --var rho --step all +``` + +The command auto-detects the output format (binary or Silo-HDF5) and dimensionality (1D, 2D, or 3D). +Output images are saved to `case_dir/viz/` by default. + +### Exploring available data + +Before plotting, you can inspect what data is available: + +```bash +# List all available timesteps +./mfc.sh viz case_dir/ --list-steps + +# List all available variables at a given timestep +./mfc.sh viz case_dir/ --list-vars --step 0 +``` + +### Timestep selection + +The `--step` argument accepts several formats: + +| Format | Example | Description | +|--------|---------|-------------| +| Single | `--step 1000` | One timestep | +| Range | `--step 0:10000:500` | Start:end:stride (inclusive) | +| All | `--step all` | Every available timestep | + +### Rendering options + +Customize the appearance of plots: + +```bash +# Custom colormap and color range +./mfc.sh viz case_dir/ --var rho --step 1000 --cmap RdBu --vmin 0.5 --vmax 2.0 + +# Higher resolution +./mfc.sh viz case_dir/ --var pres --step 500 --dpi 300 + +# Logarithmic color scale +./mfc.sh viz case_dir/ --var schlieren --step 1000 --log-scale +``` + +| Option | Description | Default | +|--------|-------------|---------| +| `--cmap` | Matplotlib colormap name | `viridis` | +| `--vmin` | Minimum color scale value | auto | +| `--vmax` | Maximum color scale value | auto | +| `--dpi` | Image resolution (dots per inch) | 150 | +| `--log-scale` | Use logarithmic color scale | off | +| `--output` | Output directory for images | `case_dir/viz/` | + +### 3D slicing + +For 3D simulations, `viz` extracts a 2D slice for plotting. +By default, it slices at the midplane along the z-axis: + +```bash +# Default z-midplane slice +./mfc.sh viz case_dir/ --var pres --step 500 + +# Slice along the x-axis at x=0.25 +./mfc.sh viz case_dir/ --var pres --step 500 --slice-axis x --slice-value 0.25 + +# Slice by array index +./mfc.sh viz case_dir/ --var pres --step 500 --slice-axis y --slice-index 50 +``` + +### Video generation + +Generate MP4 videos from a range of timesteps: + +```bash +# Basic video (10 fps) +./mfc.sh viz case_dir/ --var pres --step 0:10000:100 --mp4 + +# Custom frame rate +./mfc.sh viz case_dir/ --var schlieren --step all --mp4 --fps 24 + +# Video with fixed color range +./mfc.sh viz case_dir/ --var rho --step 0:5000:50 --mp4 --vmin 0.1 --vmax 1.0 +``` + +Videos are saved as `case_dir/viz/.mp4`. +The color range is automatically computed from the first, middle, and last frames unless `--vmin`/`--vmax` are specified. + +### Format selection + +The output format is auto-detected from the case directory. +To override: + +```bash +./mfc.sh viz case_dir/ --var pres --step 0 --format binary +./mfc.sh viz case_dir/ --var pres --step 0 --format silo +``` + +> [!NOTE] +> Reading Silo-HDF5 files requires the `h5py` Python package. +> If it is not installed, you will see a clear error message with installation instructions. +> Alternatively, use `format=2` (binary) in your case file to produce binary output, which has no extra dependencies. + +### Complete option reference + +Run `./mfc.sh viz -h` for a full list of options. + +--- + +## Visualizing with ParaView + +ParaView is an open-source interactive parallel visualization and graphical analysis tool for viewing scientific data. +Post-processed data in Silo-HDF5 format (`format=1`) can be opened directly in ParaView. Paraview 5.11.0 has been confirmed to work with the MFC databases for some parallel environments. Nevertheless, the installation and configuration of Paraview can be environment-dependent and are left to the user. diff --git a/examples/1D_inert_shocktube/viz.py b/examples/1D_inert_shocktube/viz.py index 6d96f4a8bb..eb477f2c2a 100644 --- a/examples/1D_inert_shocktube/viz.py +++ b/examples/1D_inert_shocktube/viz.py @@ -1,4 +1,4 @@ -import mfc.viz +import mfc.viz_legacy as mfc_viz import os import subprocess @@ -8,11 +8,11 @@ from case import sol_L as sol -case = mfc.viz.Case(".") +case = mfc_viz.Case(".") os.makedirs("viz", exist_ok=True) -# sns.set_theme(style=mfc.viz.generate_cpg_style()) +# sns.set_theme(style=mfc_viz.generate_cpg_style()) Y_VARS = ["H2", "O2", "H2O", "N2"] diff --git a/examples/1D_reactive_shocktube/viz.py b/examples/1D_reactive_shocktube/viz.py index 63c769fca9..2a38e21b2c 100644 --- a/examples/1D_reactive_shocktube/viz.py +++ b/examples/1D_reactive_shocktube/viz.py @@ -1,4 +1,4 @@ -import mfc.viz +import mfc.viz_legacy as mfc_viz import os import subprocess @@ -8,11 +8,11 @@ from case import sol_L as sol -case = mfc.viz.Case(".") +case = mfc_viz.Case(".") os.makedirs("viz", exist_ok=True) -sns.set_theme(style=mfc.viz.generate_cpg_style()) +sns.set_theme(style=mfc_viz.generate_cpg_style()) Y_VARS = ["H2", "O2", "H2O", "N2"] diff --git a/examples/nD_perfect_reactor/analyze.py b/examples/nD_perfect_reactor/analyze.py index b2bd4e1fa8..51ec9f3456 100644 --- a/examples/nD_perfect_reactor/analyze.py +++ b/examples/nD_perfect_reactor/analyze.py @@ -3,12 +3,12 @@ from tqdm import tqdm import matplotlib.pyplot as plt -import mfc.viz +import mfc.viz_legacy as mfc_viz from case import dt, Tend, SAVE_COUNT, sol -case = mfc.viz.Case(".", dt) +case = mfc_viz.Case(".", dt) -sns.set_theme(style=mfc.viz.generate_cpg_style()) +sns.set_theme(style=mfc_viz.generate_cpg_style()) Y_MAJORS = set(["H", "O", "OH", "HO2"]) Y_MINORS = set(["H2O", "H2O2"]) diff --git a/examples/nD_perfect_reactor/export.py b/examples/nD_perfect_reactor/export.py index 112622a32f..1d042791a8 100644 --- a/examples/nD_perfect_reactor/export.py +++ b/examples/nD_perfect_reactor/export.py @@ -2,10 +2,10 @@ import cantera as ct from tqdm import tqdm -import mfc.viz +import mfc.viz_legacy as mfc_viz from case import dt, NS, Tend, SAVE_COUNT, sol -case = mfc.viz.Case(".", dt) +case = mfc_viz.Case(".", dt) for name in tqdm(sol.species_names, desc="Loading Variables"): case.load_variable(f"Y_{name}", f"prim.{5 + sol.species_index(name)}") diff --git a/toolchain/main.py b/toolchain/main.py index 568db1db67..61df696faf 100644 --- a/toolchain/main.py +++ b/toolchain/main.py @@ -125,6 +125,8 @@ def __print_greeting(): def __checks(): + if ARG("command") in ("viz", "params", "completion", "help"): + return if not does_command_exist("cmake"): raise MFCException("CMake is required to build MFC but couldn't be located on your system. Please ensure it installed and discoverable (e.g in your system's $PATH).") @@ -175,6 +177,9 @@ def __run(): # pylint: disable=too-many-branches elif cmd == "generate": from mfc import generate # pylint: disable=import-outside-toplevel generate.generate() + elif cmd == "viz": + from mfc.viz import viz # pylint: disable=import-outside-toplevel + viz.viz() elif cmd == "params": from mfc import params_cmd # pylint: disable=import-outside-toplevel params_cmd.params() diff --git a/toolchain/mfc/args.py b/toolchain/mfc/args.py index a05fdd5d72..4601943d3f 100644 --- a/toolchain/mfc/args.py +++ b/toolchain/mfc/args.py @@ -109,7 +109,7 @@ def custom_error(message): # Add default arguments of other subparsers # This ensures all argument keys exist even for commands that don't define them # Only process subparsers that have common arguments we need - relevant_subparsers = ["run", "test", "build", "clean", "count", "count_diff", "validate"] + relevant_subparsers = ["run", "test", "build", "clean", "count", "count_diff", "validate", "viz"] for name in relevant_subparsers: if args["command"] == name: continue @@ -120,7 +120,7 @@ def custom_error(message): # Parse with dummy input to get defaults (suppress errors for required positionals) try: # Commands with required positional input need a dummy value - if name in ["run", "validate"]: + if name in ["run", "validate", "viz"]: vals, _ = subparser.parse_known_args(["dummy_input.py"]) elif name == "build": vals, _ = subparser.parse_known_args([]) diff --git a/toolchain/mfc/cli/commands.py b/toolchain/mfc/cli/commands.py index 8ad8c4bd07..6eb4c50236 100644 --- a/toolchain/mfc/cli/commands.py +++ b/toolchain/mfc/cli/commands.py @@ -861,6 +861,156 @@ include_common=["targets", "mfc_config", "jobs", "verbose", "debug_log"], ) +VIZ_COMMAND = Command( + name="viz", + help="Visualize post-processed MFC output.", + description="Render 2D colormaps, 3D slices, 1D line plots, and MP4 videos from MFC post-processed output (binary or Silo-HDF5).", + positionals=[ + Positional( + name="input", + help="Path to the case directory containing binary/ or silo_hdf5/ output.", + completion=Completion(type=CompletionType.DIRECTORIES), + ), + ], + arguments=[ + Argument( + name="var", + help="Variable name to visualize (e.g. pres, rho, schlieren).", + type=str, + default=None, + metavar="VAR", + ), + Argument( + name="step", + help="Timestep(s): single int, start:end:stride, or 'all'.", + type=str, + default=None, + metavar="STEP", + ), + Argument( + name="format", + short="f", + help="Output format: binary or silo (auto-detected if omitted).", + type=str, + default=None, + choices=["binary", "silo"], + completion=Completion(type=CompletionType.CHOICES, choices=["binary", "silo"]), + ), + Argument( + name="output", + short="o", + help="Output directory for rendered images/videos.", + type=str, + default=None, + metavar="DIR", + completion=Completion(type=CompletionType.DIRECTORIES), + ), + Argument( + name="cmap", + help="Matplotlib colormap name (default: viridis).", + type=str, + default=None, + metavar="CMAP", + ), + Argument( + name="vmin", + help="Minimum value for color scale.", + type=float, + default=None, + metavar="VMIN", + ), + Argument( + name="vmax", + help="Maximum value for color scale.", + type=float, + default=None, + metavar="VMAX", + ), + Argument( + name="dpi", + help="Image resolution in DPI (default: 150).", + type=int, + default=None, + metavar="DPI", + ), + Argument( + name="slice-axis", + help="Axis for 3D slice: x, y, or z (default: z).", + type=str, + default=None, + choices=["x", "y", "z"], + dest="slice_axis", + completion=Completion(type=CompletionType.CHOICES, choices=["x", "y", "z"]), + ), + Argument( + name="slice-value", + help="Coordinate value at which to take the 3D slice.", + type=float, + default=None, + dest="slice_value", + metavar="VAL", + ), + Argument( + name="slice-index", + help="Array index at which to take the 3D slice.", + type=int, + default=None, + dest="slice_index", + metavar="IDX", + ), + Argument( + name="mp4", + help="Generate an MP4 video instead of individual images.", + action=ArgAction.STORE_TRUE, + default=False, + ), + Argument( + name="fps", + help="Frames per second for MP4 output (default: 10).", + type=int, + default=None, + metavar="FPS", + ), + Argument( + name="list-vars", + help="List available variable names and exit.", + action=ArgAction.STORE_TRUE, + default=False, + dest="list_vars", + ), + Argument( + name="list-steps", + help="List available timesteps and exit.", + action=ArgAction.STORE_TRUE, + default=False, + dest="list_steps", + ), + Argument( + name="log-scale", + help="Use logarithmic color scale.", + action=ArgAction.STORE_TRUE, + default=False, + dest="log_scale", + ), + ], + examples=[ + Example("./mfc.sh viz case_dir/ --var pres --step 1000", "Plot pressure at step 1000"), + Example("./mfc.sh viz case_dir/ --list-vars --step 0", "List available variables"), + Example("./mfc.sh viz case_dir/ --list-steps", "List available timesteps"), + Example("./mfc.sh viz case_dir/ --var schlieren --step 0:10000:500 --mp4", "Generate video"), + Example("./mfc.sh viz case_dir/ --var pres --step 500 --slice-axis z", "3D slice at z midplane"), + ], + key_options=[ + ("--var NAME", "Variable to visualize"), + ("--step STEP", "Timestep(s): int, start:end:stride, or 'all'"), + ("--list-vars", "List available variables"), + ("--list-steps", "List available timesteps"), + ("--mp4", "Generate MP4 video"), + ("--cmap NAME", "Matplotlib colormap"), + ("--slice-axis x|y|z", "Axis for 3D slice"), + ], +) + PARAMS_COMMAND = Command( name="params", help="Search and explore MFC case parameters.", @@ -1002,6 +1152,7 @@ CLEAN_COMMAND, VALIDATE_COMMAND, NEW_COMMAND, + VIZ_COMMAND, PARAMS_COMMAND, PACKER_COMMAND, COMPLETION_COMMAND, diff --git a/toolchain/mfc/cli/docs_gen.py b/toolchain/mfc/cli/docs_gen.py index a91213a7be..989ea6a612 100644 --- a/toolchain/mfc/cli/docs_gen.py +++ b/toolchain/mfc/cli/docs_gen.py @@ -243,7 +243,7 @@ def generate_cli_reference(schema: CLISchema) -> str: # Command categories core_commands = ["build", "run", "test", "clean", "validate"] - utility_commands = ["new", "params", "packer", "completion", "generate", "help"] + utility_commands = ["new", "viz", "params", "packer", "completion", "generate", "help"] dev_commands = ["lint", "format", "spelling", "precheck", "count", "count_diff"] ci_commands = ["bench", "bench_diff"] other_commands = ["load", "interactive"] diff --git a/toolchain/mfc/viz/__init__.py b/toolchain/mfc/viz/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/toolchain/mfc/viz/reader.py b/toolchain/mfc/viz/reader.py new file mode 100644 index 0000000000..7dc7251958 --- /dev/null +++ b/toolchain/mfc/viz/reader.py @@ -0,0 +1,423 @@ +""" +Binary format reader for MFC post-processed output. + +Reads Fortran unformatted binary files produced by MFC's post_process +with format=2. Each Fortran `write` produces one record: + [4-byte record-marker][payload][4-byte record-marker] + +File layout per processor: + Record 1 (header): m(int32), n(int32), p(int32), dbvars(int32) + Record 2 (grid): x_cb [, y_cb [, z_cb]] (float32 or float64) + Records 3..N (vars): varname(50-char) + data((m+1)*(n+1)*(p+1) floats) +""" + +import os +import struct +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Tuple + +import numpy as np + + +NAME_LEN = 50 # Fortran character length for variable names + + +@dataclass +class ProcessorData: + """Data from a single processor file. + + m, n, p store dimension metadata but their exact semantics differ + between readers: + - Binary: m = Fortran header value, x_cb has m+2 elements, + cell count is m+1. + - Silo: m = cell count = len(x_cb) - 1. + Assembly code intentionally avoids using m/n/p for array sizing — + it derives everything from x_cb/y_cb/z_cb lengths. If future code + needs m directly, this discrepancy must be resolved. + """ + m: int + n: int + p: int + x_cb: np.ndarray + y_cb: np.ndarray + z_cb: np.ndarray + variables: Dict[str, np.ndarray] = field(default_factory=dict) + + +@dataclass +class AssembledData: + """Assembled multi-processor data on a global grid.""" + ndim: int + x_cc: np.ndarray + y_cc: np.ndarray + z_cc: np.ndarray + variables: Dict[str, np.ndarray] = field(default_factory=dict) + + + +def _detect_endianness(path: str) -> str: + """Detect endianness from the first record marker. + + The header record contains 4 int32s (m, n, p, dbvars) = 16 bytes, + so the leading Fortran record marker must be 16. + """ + with open(path, 'rb') as f: + raw = f.read(4) + if len(raw) < 4: + raise EOFError(f"File too short to detect endianness: {path}") + le = struct.unpack('i', raw)[0] + if be == 16: + return '>' + raise ValueError( + f"Cannot detect endianness: first record marker is {le} (LE) / {be} (BE), expected 16" + ) + + +def _read_record_endian(f, endian: str) -> bytes: + """Read one Fortran unformatted record with known endianness.""" + raw = f.read(4) + if len(raw) < 4: + raise EOFError("Unexpected end of file reading record marker") + rec_len = struct.unpack(f'{endian}i', raw)[0] + if rec_len < 0: + raise ValueError(f"Invalid Fortran record length: {rec_len}") + payload = f.read(rec_len) + if len(payload) < rec_len: + raise EOFError("Unexpected end of file reading record payload") + trail = f.read(4) + if len(trail) < 4: + raise EOFError("Unexpected end of file reading trailing record marker") + trail_len = struct.unpack(f'{endian}i', trail)[0] + if trail_len != rec_len: + raise ValueError( + f"Fortran record marker mismatch: leading={rec_len}, trailing={trail_len}. " + "File may be corrupted." + ) + return payload + + +def read_binary_file(path: str, var_filter: Optional[str] = None) -> ProcessorData: # pylint: disable=too-many-locals,too-many-statements + """ + Read a single MFC binary post-process file. + + Args: + path: Path to the .dat file. + var_filter: If given, only load this variable (skip others). + + Returns: + ProcessorData with grid and variable data. + """ + endian = _detect_endianness(path) + + with open(path, 'rb') as f: + # Record 1: header [m, n, p, dbvars] — 4 int32 + hdr = _read_record_endian(f, endian) + m, n, p, dbvars = struct.unpack(f'{endian}4i', hdr) + if m < 0 or n < 0 or p < 0 or dbvars < 0: + raise ValueError( + f"Invalid header in {path}: m={m}, n={n}, p={p}, dbvars={dbvars}" + ) + + # Record 2: grid coordinates — all in one record + grid_raw = _read_record_endian(f, endian) + grid_bytes = len(grid_raw) + + # Determine number of grid values + if p > 0: + n_vals = (m + 2) + (n + 2) + (p + 2) + elif n > 0: + n_vals = (m + 2) + (n + 2) + else: + n_vals = m + 2 + + # Auto-detect grid precision from record size + if grid_bytes == n_vals * 8: + grid_dtype = np.dtype(f'{endian}f8') + elif grid_bytes == n_vals * 4: + grid_dtype = np.dtype(f'{endian}f4') + else: + bytes_per_val = grid_bytes / n_vals if n_vals else 0 + raise ValueError( + f"Cannot determine grid precision: {grid_bytes} bytes for {n_vals} values " + f"({bytes_per_val:.1f} bytes/value)" + ) + + grid_arr = np.frombuffer(grid_raw, dtype=grid_dtype) + + # Split into x_cb, y_cb, z_cb + offset = 0 + x_cb = grid_arr[offset:offset + m + 2].astype(np.float64) + offset += m + 2 + if n > 0: + y_cb = grid_arr[offset:offset + n + 2].astype(np.float64) + offset += n + 2 + else: + y_cb = np.array([0.0]) + if p > 0: + z_cb = grid_arr[offset:offset + p + 2].astype(np.float64) + else: + z_cb = np.array([0.0]) + + # Records 3..N: variables + variables: Dict[str, np.ndarray] = {} + data_size = (m + 1) * max(n + 1, 1) * max(p + 1, 1) + + for _ in range(dbvars): + var_raw = _read_record_endian(f, endian) + varname = var_raw[:NAME_LEN].decode('ascii', errors='replace').strip() + + if var_filter is not None and varname != var_filter: + continue + + # Auto-detect variable data precision from record size + data_bytes = len(var_raw) - NAME_LEN + if data_bytes == data_size * 8: + var_dtype = np.dtype(f'{endian}f8') + elif data_bytes == data_size * 4: + var_dtype = np.dtype(f'{endian}f4') + else: + var_bpv = data_bytes / data_size if data_size else 0 + raise ValueError( + f"Cannot determine variable precision for '{varname}': " + f"{data_bytes} bytes for {data_size} values ({var_bpv:.1f} bytes/value)" + ) + + data = np.frombuffer(var_raw[NAME_LEN:], dtype=var_dtype).astype(np.float64) + + # Reshape for multi-dimensional data (Fortran column-major order) + if p > 0: + data = data.reshape((m + 1, n + 1, p + 1), order='F') + elif n > 0: + data = data.reshape((m + 1, n + 1), order='F') + + variables[varname] = data + + return ProcessorData(m=m, n=n, p=p, x_cb=x_cb, y_cb=y_cb, z_cb=z_cb, variables=variables) + + +def discover_format(case_dir: str) -> str: + """Detect whether case has binary or silo_hdf5 output.""" + if os.path.isdir(os.path.join(case_dir, 'binary')): + return 'binary' + if os.path.isdir(os.path.join(case_dir, 'silo_hdf5')): + return 'silo' + raise FileNotFoundError( + f"No 'binary/' or 'silo_hdf5/' directory found in {case_dir}. " + "Run post_process with format=1 (Silo) or format=2 (binary) first." + ) + + +def discover_timesteps(case_dir: str, fmt: str) -> List[int]: + """Return sorted list of available timesteps.""" + if fmt not in ('binary', 'silo'): + raise ValueError(f"Unknown format '{fmt}'. Supported: 'binary', 'silo'.") + + if fmt == 'binary': + # Check root/ first (1D), then p0/ + root_dir = os.path.join(case_dir, 'binary', 'root') + if os.path.isdir(root_dir): + steps = set() + for fname in os.listdir(root_dir): + if fname.endswith('.dat'): + try: + steps.add(int(fname[:-4])) + except ValueError: + pass + if steps: + return sorted(steps) + + # Multi-dimensional: look in p0/ + p0_dir = os.path.join(case_dir, 'binary', 'p0') + if os.path.isdir(p0_dir): + steps = set() + for fname in os.listdir(p0_dir): + if fname.endswith('.dat'): + try: + steps.add(int(fname[:-4])) + except ValueError: + pass + return sorted(steps) + + import warnings # pylint: disable=import-outside-toplevel + warnings.warn( + f"No timestep directories found: checked '{root_dir}' and '{p0_dir}'", + stacklevel=2, + ) + + elif fmt == 'silo': + p0_dir = os.path.join(case_dir, 'silo_hdf5', 'p0') + if os.path.isdir(p0_dir): + steps = set() + for fname in os.listdir(p0_dir): + if fname.endswith('.silo') and not fname.startswith('collection'): + try: + steps.add(int(fname[:-5])) + except ValueError: + pass + return sorted(steps) + + import warnings # pylint: disable=import-outside-toplevel + warnings.warn( + f"No timestep directory found: checked '{p0_dir}'", + stacklevel=2, + ) + + return [] + + +def _discover_processors(case_dir: str, fmt: str) -> List[int]: + """Return sorted list of processor ranks.""" + if fmt == 'binary': + base = os.path.join(case_dir, 'binary') + else: + base = os.path.join(case_dir, 'silo_hdf5') + + ranks = [] + if not os.path.isdir(base): + return ranks + for entry in os.listdir(base): + if entry.startswith('p') and entry[1:].isdigit(): + ranks.append(int(entry[1:])) + return sorted(ranks) + + +def _is_1d(case_dir: str) -> bool: + """Check if the output is 1D (has binary/root/ directory).""" + return os.path.isdir(os.path.join(case_dir, 'binary', 'root')) + + +def assemble_from_proc_data( # pylint: disable=too-many-locals + proc_data: List[Tuple[int, ProcessorData]], +) -> AssembledData: + """ + Assemble multi-processor data into a single global grid. + + Shared helper used by both binary and silo assembly paths. + Handles ghost/buffer cell overlap between processors by using + per-cell coordinate lookup (np.unique + np.searchsorted + np.ix_). + """ + if not proc_data: + raise ValueError("No processor data to assemble") + + # Single processor — fast path + if len(proc_data) == 1: + _, pd = proc_data[0] + x_cc = (pd.x_cb[:-1] + pd.x_cb[1:]) / 2.0 + y_cc = (pd.y_cb[:-1] + pd.y_cb[1:]) / 2.0 if pd.n > 0 else np.array([0.0]) + z_cc = (pd.z_cb[:-1] + pd.z_cb[1:]) / 2.0 if pd.p > 0 else np.array([0.0]) + ndim = 1 + (pd.n > 0) + (pd.p > 0) + return AssembledData( + ndim=ndim, x_cc=x_cc, y_cc=y_cc, z_cc=z_cc, + variables=pd.variables, + ) + + # Multi-processor assembly + sample = proc_data[0][1] + ndim = 1 + (sample.n > 0) + (sample.p > 0) + + # Compute cell centers for each processor + proc_centers = [] + for rank, pd in proc_data: + x_cc = (pd.x_cb[:-1] + pd.x_cb[1:]) / 2.0 + y_cc = (pd.y_cb[:-1] + pd.y_cb[1:]) / 2.0 if pd.n > 0 else np.array([0.0]) + z_cc = (pd.z_cb[:-1] + pd.z_cb[1:]) / 2.0 if pd.p > 0 else np.array([0.0]) + proc_centers.append((rank, pd, x_cc, y_cc, z_cc)) + + # Build unique sorted global coordinate arrays (handles ghost overlap) + all_x = np.concatenate([xc for _, _, xc, _, _ in proc_centers]) + global_x = np.unique(np.round(all_x, 12)) + if ndim >= 2: + all_y = np.concatenate([yc for _, _, _, yc, _ in proc_centers]) + global_y = np.unique(np.round(all_y, 12)) + else: + global_y = np.array([0.0]) + if ndim >= 3: + all_z = np.concatenate([zc for _, _, _, _, zc in proc_centers]) + global_z = np.unique(np.round(all_z, 12)) + else: + global_z = np.array([0.0]) + + varnames = sorted({vn for _, pd in proc_data for vn in pd.variables}) + nx, ny, nz = len(global_x), len(global_y), len(global_z) + + global_vars: Dict[str, np.ndarray] = {} + for vn in varnames: + if ndim == 3: + global_vars[vn] = np.zeros((nx, ny, nz)) + elif ndim == 2: + global_vars[vn] = np.zeros((nx, ny)) + else: + global_vars[vn] = np.zeros(nx) + + # Place each processor's data using per-cell coordinate lookup + for _rank, pd, x_cc, y_cc, z_cc in proc_centers: + xi = np.clip(np.searchsorted(global_x, np.round(x_cc, 12)), 0, nx - 1) + yi = np.clip(np.searchsorted(global_y, np.round(y_cc, 12)), 0, ny - 1) if ndim >= 2 else np.array([0]) + zi = np.clip(np.searchsorted(global_z, np.round(z_cc, 12)), 0, nz - 1) if ndim >= 3 else np.array([0]) + + for vn, data in pd.variables.items(): + if vn not in global_vars: + continue + if ndim == 3: + global_vars[vn][np.ix_(xi, yi, zi)] = data + elif ndim == 2: + global_vars[vn][np.ix_(xi, yi)] = data + else: + global_vars[vn][xi] = data + + return AssembledData( + ndim=ndim, x_cc=global_x, y_cc=global_y, z_cc=global_z, + variables=global_vars, + ) + + +def assemble(case_dir: str, step: int, fmt: str = 'binary', # pylint: disable=too-many-locals + var: Optional[str] = None) -> AssembledData: + """ + Read and assemble multi-processor data for a given timestep. + + For 1D, reads the root file directly. + For 2D/3D, reads all processor files and assembles into global arrays. + """ + if fmt != 'binary': + raise ValueError(f"Format '{fmt}' not supported by binary reader. Use silo_reader.") + + # 1D case: read root file directly + if _is_1d(case_dir): + root_path = os.path.join(case_dir, 'binary', 'root', f'{step}.dat') + if not os.path.isfile(root_path): + raise FileNotFoundError(f"Root file not found: {root_path}") + pdata = read_binary_file(root_path, var_filter=var) + x_cc = (pdata.x_cb[:-1] + pdata.x_cb[1:]) / 2.0 + return AssembledData( + ndim=1, x_cc=x_cc, + y_cc=np.array([0.0]), z_cc=np.array([0.0]), + variables=pdata.variables, + ) + + # Multi-dimensional: read all processor files + ranks = _discover_processors(case_dir, fmt) + if not ranks: + raise FileNotFoundError(f"No processor directories found in {case_dir}/binary/") + + proc_data: List[Tuple[int, ProcessorData]] = [] + for rank in ranks: + fpath = os.path.join(case_dir, 'binary', f'p{rank}', f'{step}.dat') + if not os.path.isfile(fpath): + import warnings # pylint: disable=import-outside-toplevel + warnings.warn(f"Processor file not found, skipping: {fpath}", stacklevel=2) + continue + pdata = read_binary_file(fpath, var_filter=var) + if pdata.m == 0 and pdata.n == 0 and pdata.p == 0: + import warnings # pylint: disable=import-outside-toplevel + warnings.warn(f"Processor p{rank} has zero dimensions, skipping", stacklevel=2) + continue + proc_data.append((rank, pdata)) + + if not proc_data: + raise FileNotFoundError(f"No valid processor data found for step {step}") + + return assemble_from_proc_data(proc_data) diff --git a/toolchain/mfc/viz/renderer.py b/toolchain/mfc/viz/renderer.py new file mode 100644 index 0000000000..fa7c41c6e7 --- /dev/null +++ b/toolchain/mfc/viz/renderer.py @@ -0,0 +1,277 @@ +""" +Image and video rendering for MFC visualization. + +Produces PNG images (1D line plots, 2D colormaps) and MP4 videos +from assembled MFC data. Uses matplotlib with the Agg backend +for headless rendering. +""" + +import os + +import numpy as np + +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt # pylint: disable=wrong-import-position +from matplotlib.colors import LogNorm # pylint: disable=wrong-import-position + + +def render_1d(x_cc, data, varname, step, output, **opts): # pylint: disable=too-many-arguments,too-many-positional-arguments + """Render a 1D line plot and save as PNG.""" + fig, ax = plt.subplots(figsize=opts.get('figsize', (10, 6))) + ax.plot(x_cc, data, linewidth=1.5) + ax.set_xlabel('x') + ax.set_ylabel(varname) + ax.set_title(f'{varname} (step {step})') + + vmin = opts.get('vmin') + vmax = opts.get('vmax') + if vmin is not None or vmax is not None: + ax.set_ylim(vmin, vmax) + + fig.tight_layout() + fig.savefig(output, dpi=opts.get('dpi', 150)) + plt.close(fig) + + +def render_2d(x_cc, y_cc, data, varname, step, output, **opts): # pylint: disable=too-many-arguments,too-many-positional-arguments,too-many-locals + """Render a 2D colormap via pcolormesh and save as PNG.""" + fig, ax = plt.subplots(figsize=opts.get('figsize', (10, 8))) + + cmap = opts.get('cmap', 'viridis') + vmin = opts.get('vmin') + vmax = opts.get('vmax') + log_scale = opts.get('log_scale', False) + + norm = None + if log_scale: + lo = vmin if vmin is not None else np.nanmin(data[data > 0]) if np.any(data > 0) else 1e-10 + hi = vmax if vmax is not None else np.nanmax(data) + if not np.isfinite(hi) or hi <= 0: + hi = 1.0 + if not np.isfinite(lo) or lo <= 0 or lo >= hi: + lo = hi * 1e-10 + norm = LogNorm(vmin=lo, vmax=hi) + vmin = None + vmax = None + + # data shape is (nx, ny), pcolormesh expects (ny, nx) when using x_cc, y_cc + pcm = ax.pcolormesh(x_cc, y_cc, data.T, cmap=cmap, vmin=vmin, vmax=vmax, + norm=norm, shading='auto') + fig.colorbar(pcm, ax=ax, label=varname) + ax.set_xlabel('x') + ax.set_ylabel('y') + ax.set_title(f'{varname} (step {step})') + ax.set_aspect('equal', adjustable='box') + + fig.tight_layout() + fig.savefig(output, dpi=opts.get('dpi', 150)) + plt.close(fig) + + +def render_3d_slice(assembled, varname, step, output, slice_axis='z', # pylint: disable=too-many-arguments,too-many-positional-arguments,too-many-locals,too-many-statements,too-many-branches + slice_index=None, slice_value=None, **opts): + """Extract a 2D slice from 3D data and render as a colormap.""" + data_3d = assembled.variables[varname] + + axis_map = {'x': 0, 'y': 1, 'z': 2} + if slice_axis not in axis_map: + raise ValueError( + f"Invalid slice_axis '{slice_axis}'. Must be one of: 'x', 'y', 'z'." + ) + axis_idx = axis_map[slice_axis] + + coords = [assembled.x_cc, assembled.y_cc, assembled.z_cc] + coord_along = coords[axis_idx] + + if slice_index is not None: + idx = slice_index + elif slice_value is not None: + idx = int(np.argmin(np.abs(coord_along - slice_value))) + else: + idx = len(coord_along) // 2 + + idx = max(0, min(idx, len(coord_along) - 1)) + + if axis_idx == 0: + sliced = data_3d[idx, :, :] + x_plot, y_plot = assembled.y_cc, assembled.z_cc + xlabel, ylabel = 'y', 'z' + elif axis_idx == 1: + sliced = data_3d[:, idx, :] + x_plot, y_plot = assembled.x_cc, assembled.z_cc + xlabel, ylabel = 'x', 'z' + else: + sliced = data_3d[:, :, idx] + x_plot, y_plot = assembled.x_cc, assembled.y_cc + xlabel, ylabel = 'x', 'y' + + fig, ax = plt.subplots(figsize=opts.get('figsize', (10, 8))) + + cmap = opts.get('cmap', 'viridis') + vmin = opts.get('vmin') + vmax = opts.get('vmax') + log_scale = opts.get('log_scale', False) + + norm = None + if log_scale: + lo = vmin if vmin is not None else np.nanmin(sliced[sliced > 0]) if np.any(sliced > 0) else 1e-10 + hi = vmax if vmax is not None else np.nanmax(sliced) + if not np.isfinite(hi) or hi <= 0: + hi = 1.0 + if not np.isfinite(lo) or lo <= 0 or lo >= hi: + lo = hi * 1e-10 + norm = LogNorm(vmin=lo, vmax=hi) + vmin = None + vmax = None + + # sliced shape depends on axis: need to transpose appropriately + pcm = ax.pcolormesh(x_plot, y_plot, sliced.T, cmap=cmap, vmin=vmin, + vmax=vmax, norm=norm, shading='auto') + fig.colorbar(pcm, ax=ax, label=varname) + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + slice_coord = coord_along[idx] + ax.set_title(f'{varname} (step {step}, {slice_axis}={slice_coord:.4g})') + ax.set_aspect('equal', adjustable='box') + + fig.tight_layout() + fig.savefig(output, dpi=opts.get('dpi', 150)) + plt.close(fig) + + +def render_mp4(varname, steps, output, fps=10, # pylint: disable=too-many-arguments,too-many-positional-arguments,too-many-locals,too-many-statements + read_func=None, **opts): + """ + Generate an MP4 video by iterating over timesteps. + + Args: + varname: Variable name to plot. + steps: List of timestep integers. + output: Output MP4 file path. + fps: Frames per second. + read_func: Callable(step) -> AssembledData for loading each frame. + **opts: Rendering options (cmap, vmin, vmax, dpi, log_scale, figsize, + slice_axis, slice_index, slice_value). + + Returns: + True if the MP4 was successfully written, False on failure + (e.g., missing imageio dependency or encoding error). + """ + if read_func is None: + raise ValueError("read_func must be provided for MP4 rendering") + + if not steps: + raise ValueError("No timesteps provided for MP4 generation") + + opts = dict(opts) # avoid mutating the caller's dict + + # Pre-compute vmin/vmax from first, middle, and last frames if not provided + auto_vmin = opts.get('vmin') + auto_vmax = opts.get('vmax') + + if auto_vmin is None or auto_vmax is None: + sample_steps = [steps[0]] + if len(steps) > 1: + sample_steps.append(steps[-1]) + if len(steps) > 2: + sample_steps.append(steps[len(steps) // 2]) + + all_mins, all_maxs = [], [] + log_scale = opts.get('log_scale', False) + for s in sample_steps: + ad = read_func(s) + d = ad.variables.get(varname) + if d is not None: + if log_scale: + pos = d[d > 0] + if pos.size > 0: + all_mins.append(np.nanmin(pos)) + all_maxs.append(np.nanmax(pos)) + else: + all_mins.append(np.nanmin(d)) + all_maxs.append(np.nanmax(d)) + + if auto_vmin is None and all_mins: + opts['vmin'] = min(all_mins) + if auto_vmax is None and all_maxs: + opts['vmax'] = max(all_maxs) + + # Write frames as images to a temp directory next to the output file + output_dir = os.path.dirname(os.path.abspath(output)) + viz_dir = os.path.join(output_dir, '_frames') + # Clean stale frames from any interrupted previous run + if os.path.isdir(viz_dir): + for stale in os.listdir(viz_dir): + if stale.endswith('.png'): + try: + os.remove(os.path.join(viz_dir, stale)) + except OSError: + pass + os.makedirs(viz_dir, exist_ok=True) + + try: + from tqdm import tqdm # pylint: disable=import-outside-toplevel + step_iter = tqdm(steps, desc='Rendering frames') + except ImportError: + step_iter = steps + + for i, step in enumerate(step_iter): + assembled = read_func(step) + var_data = assembled.variables.get(varname) + if var_data is None: + continue + frame_path = os.path.join(viz_dir, f'{i:06d}.png') + + if assembled.ndim == 1: + render_1d(assembled.x_cc, var_data, + varname, step, frame_path, **opts) + elif assembled.ndim == 2: + render_2d(assembled.x_cc, assembled.y_cc, + var_data, + varname, step, frame_path, **opts) + elif assembled.ndim == 3: + render_3d_slice(assembled, varname, step, frame_path, **opts) + else: + raise ValueError( + f"Unsupported dimensionality ndim={assembled.ndim} for step {step}. " + "Expected 1, 2, or 3." + ) + + # Combine frames into MP4 using imageio + imageio-ffmpeg (bundled ffmpeg) + frame_files = sorted(f for f in os.listdir(viz_dir) if f.endswith('.png')) + + success = False + try: + import imageio # pylint: disable=import-outside-toplevel + except ImportError: + print("imageio is not installed. Install it with: pip install imageio imageio-ffmpeg") + print(f"Frames saved to {viz_dir}/") + return False + + writer = None + try: + writer = imageio.get_writer(output, fps=fps, codec='libx264', + pixelformat='yuv420p', macro_block_size=2) + for fname in frame_files: + writer.append_data(imageio.imread(os.path.join(viz_dir, fname))) + success = True + except (OSError, ValueError, RuntimeError) as exc: + print(f"imageio MP4 write failed: {exc}") + finally: + if writer is not None: + writer.close() + + # Clean up only the frames we created + if success: + for fname in frame_files: + fpath = os.path.join(viz_dir, fname) + try: + os.remove(fpath) + except OSError: + pass + try: + os.rmdir(viz_dir) + except OSError: + pass # directory not empty (pre-existing files) + return success diff --git a/toolchain/mfc/viz/silo_reader.py b/toolchain/mfc/viz/silo_reader.py new file mode 100644 index 0000000000..7292c8630d --- /dev/null +++ b/toolchain/mfc/viz/silo_reader.py @@ -0,0 +1,172 @@ +""" +Silo-HDF5 reader for MFC post-processed output. + +Silo files produced by MFC are valid HDF5 underneath. Each Silo object +is stored as an HDF5 Named Datatype whose ``silo`` compound attribute +carries the metadata (mesh name, data-array path, dimensions, etc.). +Actual data lives in numbered datasets under the ``.silo/`` group. + +This reader uses h5py to navigate that structure. + +Requires: h5py (optional dependency). +""" + +import os +from typing import Dict, List, Optional, Tuple + +import numpy as np + +from .reader import AssembledData, ProcessorData, assemble_from_proc_data + +try: + import h5py + + HAS_H5PY = True +except ImportError: + HAS_H5PY = False + +# Silo type constants (from silo.h) +_DB_QUADMESH = 130 +_DB_QUADVAR = 501 + + +def _check_h5py(): + if not HAS_H5PY: + raise ImportError( + "h5py is required to read Silo-HDF5 files.\n" + "Install it with: pip install h5py\n" + "Or re-run post_process with format=2 to produce binary output." + ) + + + +def _resolve_path(h5file, path_bytes): + """Resolve a silo internal path (e.g. b'/.silo/#000003') and return its data as a numpy array.""" + path = path_bytes.decode() if isinstance(path_bytes, bytes) else str(path_bytes) + return np.array(h5file[path]) + + +def read_silo_file( # pylint: disable=too-many-locals + path: str, var_filter: Optional[str] = None +) -> ProcessorData: + """ + Read a single Silo-HDF5 file produced by MFC post_process. + + Args: + path: Path to the ``.silo`` file. + var_filter: If given, only load this variable (case-sensitive). + + Returns: + ProcessorData with grid coordinates and variable arrays. + """ + _check_h5py() + + with h5py.File(path, "r") as f: + # --- locate the mesh ------------------------------------------------ + mesh_name = None + mesh_attr = None + for key, obj in f.items(): + if key in ("..", ".silo"): + continue + if not isinstance(obj, h5py.Datatype): + continue + silo_type = obj.attrs.get("silo_type") + if silo_type is not None and int(silo_type) == _DB_QUADMESH: + if "silo" not in obj.attrs: + continue + mesh_name = key + mesh_attr = obj.attrs["silo"] + break + + if mesh_attr is None: + raise ValueError(f"No rectilinear mesh found in {path}") + + ndims = int(mesh_attr["ndims"]) + coord_paths = [] + for i in range(ndims): + coord_paths.append(mesh_attr[f"coord{i}"]) + + coords = [_resolve_path(f, cp) for cp in coord_paths] + + x_cb = coords[0] + y_cb = coords[1] if ndims >= 2 else np.array([0.0]) + z_cb = coords[2] if ndims >= 3 else np.array([0.0]) + + # Grid dimensions: node counts minus 1 give cell counts + m = len(x_cb) - 1 + n = (len(y_cb) - 1) if ndims >= 2 else 0 + p = (len(z_cb) - 1) if ndims >= 3 else 0 + + # --- locate variables ------------------------------------------------ + variables: Dict[str, np.ndarray] = {} + for key, obj in f.items(): + if key in ("..", ".silo", mesh_name): + continue + if not isinstance(obj, h5py.Datatype): + continue + silo_type = obj.attrs.get("silo_type") + if silo_type is None or int(silo_type) != _DB_QUADVAR: + continue + + # Apply variable filter + if var_filter is not None and key != var_filter: + continue + + if "silo" not in obj.attrs: + continue + attr = obj.attrs["silo"] + data_path = attr["value0"] + data = _resolve_path(f, data_path).astype(np.float64) + + # MFC's DBPUTQV1 passes the Fortran column-major array as a + # flat buffer. HDF5 stores it row-major. Reinterpret the + # bytes in Fortran order so data[i,j,k] = value at (x_i,y_j,z_k), + # matching the binary reader convention. + # Assumption: Silo/HDF5 preserves the Fortran dimension ordering + # (nx, ny, nz) as the dataset shape. If a future Silo version + # reverses the shape, this reshape would silently transpose data. + if data.ndim >= 2: + data = np.ascontiguousarray(data).ravel().reshape(data.shape, order='F') + variables[key] = data + + return ProcessorData( + m=m, n=n, p=p, x_cb=x_cb, y_cb=y_cb, z_cb=z_cb, variables=variables + ) + + +def assemble_silo( + case_dir: str, + step: int, + var: Optional[str] = None, +) -> AssembledData: + """ + Read and assemble multi-processor Silo-HDF5 data for a given timestep. + """ + _check_h5py() + + base = os.path.join(case_dir, "silo_hdf5") + if not os.path.isdir(base): + raise FileNotFoundError(f"Silo-HDF5 directory not found: {base}") + ranks: List[int] = [] + for entry in os.listdir(base): + if entry.startswith("p") and entry[1:].isdigit(): + ranks.append(int(entry[1:])) + ranks.sort() + + if not ranks: + raise FileNotFoundError(f"No processor directories in {base}") + + proc_data: List[Tuple[int, ProcessorData]] = [] + for rank in ranks: + silo_file = os.path.join(base, f"p{rank}", f"{step}.silo") + if not os.path.isfile(silo_file): + import warnings # pylint: disable=import-outside-toplevel + warnings.warn(f"Processor file not found, skipping: {silo_file}", stacklevel=2) + continue + pdata = read_silo_file(silo_file, var_filter=var) + proc_data.append((rank, pdata)) + + if not proc_data: + raise FileNotFoundError(f"No Silo data found for step {step}") + + return assemble_from_proc_data(proc_data) diff --git a/toolchain/mfc/viz/viz.py b/toolchain/mfc/viz/viz.py new file mode 100644 index 0000000000..a2111ffb98 --- /dev/null +++ b/toolchain/mfc/viz/viz.py @@ -0,0 +1,266 @@ +""" +Main entry point for the ``./mfc.sh viz`` command. + +Dispatches to reader + renderer based on CLI arguments. +""" + +import os +import sys + +from mfc.state import ARG +from mfc.printer import cons + + +def _parse_steps(step_arg, available_steps): + """ + Parse the --step argument into a list of timestep integers. + + Formats: + - Single int: "1000" + - Range: "0:10000:500" (start:end:stride) + - "all": all available timesteps + """ + if step_arg is None or step_arg == 'all': + return available_steps + + try: + if ':' in str(step_arg): + parts = str(step_arg).split(':') + start = int(parts[0]) + end = int(parts[1]) + stride = int(parts[2]) if len(parts) > 2 else 1 + requested = list(range(start, end + 1, stride)) + return [s for s in requested if s in set(available_steps)] + + single = int(step_arg) + except ValueError: + cons.print(f"[bold red]Error:[/bold red] Invalid --step value '{step_arg}'. " + "Expected an integer, a range (start:end:stride), or 'all'.") + sys.exit(1) + + if available_steps and single not in set(available_steps): + return [] + return [single] + + +def viz(): # pylint: disable=too-many-locals,too-many-statements,too-many-branches + """Main viz command dispatcher.""" + from .reader import discover_format, discover_timesteps, assemble # pylint: disable=import-outside-toplevel + from .renderer import render_1d, render_2d, render_3d_slice, render_mp4 # pylint: disable=import-outside-toplevel + + case_dir = ARG('input') + if case_dir is None: + cons.print("[bold red]Error:[/bold red] Please specify a case directory.") + sys.exit(1) + + # Resolve case directory + if not os.path.isdir(case_dir): + cons.print(f"[bold red]Error:[/bold red] Directory not found: {case_dir}") + sys.exit(1) + + # Auto-detect or use specified format + fmt_arg = ARG('format') + if fmt_arg: + if fmt_arg not in ('binary', 'silo'): + cons.print(f"[bold red]Error:[/bold red] Unknown format '{fmt_arg}'. " + "Supported formats: 'binary', 'silo'.") + sys.exit(1) + fmt = fmt_arg + else: + try: + fmt = discover_format(case_dir) + except FileNotFoundError as exc: + cons.print(f"[bold red]Error:[/bold red] {exc}") + sys.exit(1) + + cons.print(f"[bold]Format:[/bold] {fmt}") + + # Handle --list-steps + if ARG('list_steps'): + steps = discover_timesteps(case_dir, fmt) + if not steps: + cons.print("[yellow]No timesteps found.[/yellow]") + else: + cons.print(f"[bold]Available timesteps ({len(steps)}):[/bold]") + # Print in columns + line = "" + for i, s in enumerate(steps): + line += f"{s:>8}" + if (i + 1) % 10 == 0: + cons.print(line) + line = "" + if line: + cons.print(line) + return + + # Handle --list-vars (requires --step) + if ARG('list_vars'): + step_arg = ARG('step') + steps = discover_timesteps(case_dir, fmt) + if not steps: + cons.print("[bold red]Error:[/bold red] No timesteps found.") + sys.exit(1) + + if step_arg is None or step_arg == 'all': + step = steps[0] + cons.print(f"[dim]Using first available timestep: {step}[/dim]") + else: + try: + step = int(step_arg) + except ValueError: + cons.print(f"[bold red]Error:[/bold red] Invalid --step value '{step_arg}'. " + "Expected an integer or 'all'.") + sys.exit(1) + + if fmt == 'silo': + from .silo_reader import assemble_silo # pylint: disable=import-outside-toplevel + assembled = assemble_silo(case_dir, step) + else: + assembled = assemble(case_dir, step, fmt) + + varnames = sorted(assembled.variables.keys()) + cons.print(f"[bold]Available variables ({len(varnames)}):[/bold]") + for vn in varnames: + data = assembled.variables[vn] + cons.print(f" {vn:<20s} min={data.min():.6g} max={data.max():.6g}") + return + + # For rendering, --var and --step are required + varname = ARG('var') + step_arg = ARG('step') + + if varname is None: + cons.print("[bold red]Error:[/bold red] --var is required for rendering. " + "Use --list-vars to see available variables.") + sys.exit(1) + + if step_arg is None: + cons.print("[bold red]Error:[/bold red] --step is required for rendering. " + "Use --list-steps to see available timesteps, or pass --step all.") + sys.exit(1) + + steps = discover_timesteps(case_dir, fmt) + if not steps: + cons.print("[bold red]Error:[/bold red] No timesteps found.") + sys.exit(1) + + requested_steps = _parse_steps(step_arg, steps) + if not requested_steps: + cons.print(f"[bold red]Error:[/bold red] No matching timesteps for --step {step_arg}") + if steps: + cons.print(f"[bold]Available range:[/bold] {steps[0]} to {steps[-1]} " + f"({len(steps)} timesteps)") + sys.exit(1) + + # Collect rendering options + render_opts = {} + cmap = ARG('cmap') + if cmap: + render_opts['cmap'] = cmap + vmin = ARG('vmin') + if vmin is not None: + render_opts['vmin'] = float(vmin) + vmax = ARG('vmax') + if vmax is not None: + render_opts['vmax'] = float(vmax) + dpi = ARG('dpi') + if dpi is not None: + render_opts['dpi'] = int(dpi) + if ARG('log_scale'): + render_opts['log_scale'] = True + + slice_axis = ARG('slice_axis') + slice_index = ARG('slice_index') + slice_value = ARG('slice_value') + if slice_axis: + render_opts['slice_axis'] = slice_axis + if slice_index is not None: + render_opts['slice_index'] = int(slice_index) + if slice_value is not None: + render_opts['slice_value'] = float(slice_value) + + # Choose read function based on format + def read_step(step): + if fmt == 'silo': + from .silo_reader import assemble_silo # pylint: disable=import-outside-toplevel + return assemble_silo(case_dir, step, var=varname) + return assemble(case_dir, step, fmt, var=varname) + + # Validate variable name by reading the first timestep (without var filter), + # then reuse it for the first rendered frame to avoid double I/O. + first_assembled = read_step(requested_steps[0]) + if varname not in first_assembled.variables: + # Re-read without filter to show all available variable names + if fmt == 'silo': + from .silo_reader import assemble_silo # pylint: disable=import-outside-toplevel + all_vars_assembled = assemble_silo(case_dir, requested_steps[0]) + else: + all_vars_assembled = assemble(case_dir, requested_steps[0], fmt) + avail = sorted(all_vars_assembled.variables.keys()) + cons.print(f"[bold red]Error:[/bold red] Variable '{varname}' not found.") + cons.print(f"[bold]Available variables:[/bold] {', '.join(avail)}") + sys.exit(1) + + # Create output directory + output_base = ARG('output') + if output_base is None: + output_base = os.path.join(case_dir, 'viz') + os.makedirs(output_base, exist_ok=True) + + # MP4 mode + if ARG('mp4'): + fps = ARG('fps') or 10 + mp4_path = os.path.join(output_base, f'{varname}.mp4') + cons.print(f"[bold]Generating MP4:[/bold] {mp4_path} ({len(requested_steps)} frames)") + success = render_mp4(varname, requested_steps, mp4_path, + fps=int(fps), read_func=read_step, **render_opts) + if success: + cons.print(f"[bold green]Done:[/bold green] {mp4_path}") + else: + cons.print(f"[bold red]Error:[/bold red] Failed to generate {mp4_path}. " + "Ensure imageio and imageio-ffmpeg are installed.") + sys.exit(1) + return + + # Single or multiple PNG frames + try: + from tqdm import tqdm # pylint: disable=import-outside-toplevel + step_iter = tqdm(requested_steps, desc='Rendering') + except ImportError: + step_iter = requested_steps + + failures = [] + for step in step_iter: + try: + assembled = first_assembled if step == requested_steps[0] else read_step(step) + except (FileNotFoundError, EOFError, ValueError) as exc: + cons.print(f"[yellow]Warning:[/yellow] Skipping step {step}: {exc}") + failures.append(step) + continue + + output_path = os.path.join(output_base, f'{varname}_{step}.png') + + if assembled.ndim == 1: + render_1d(assembled.x_cc, assembled.variables[varname], + varname, step, output_path, **render_opts) + elif assembled.ndim == 2: + render_2d(assembled.x_cc, assembled.y_cc, + assembled.variables[varname], + varname, step, output_path, **render_opts) + elif assembled.ndim == 3: + render_3d_slice(assembled, varname, step, output_path, **render_opts) + else: + cons.print(f"[yellow]Warning:[/yellow] Unsupported ndim={assembled.ndim} " + f"for step {step}, skipping.") + failures.append(step) + continue + + if len(requested_steps) == 1: + cons.print(f"[bold green]Saved:[/bold green] {output_path}") + + rendered = len(requested_steps) - len(failures) + if failures: + cons.print(f"[yellow]Warning:[/yellow] {len(failures)} step(s) failed: " + f"{failures[:10]}{'...' if len(failures) > 10 else ''}") + if rendered > 1: + cons.print(f"[bold green]Saved {rendered} frames to:[/bold green] {output_base}/") diff --git a/toolchain/mfc/viz.py b/toolchain/mfc/viz_legacy.py similarity index 100% rename from toolchain/mfc/viz.py rename to toolchain/mfc/viz_legacy.py diff --git a/toolchain/pyproject.toml b/toolchain/pyproject.toml index 53e2140290..1e78da1c24 100644 --- a/toolchain/pyproject.toml +++ b/toolchain/pyproject.toml @@ -37,6 +37,10 @@ dependencies = [ "seaborn", "matplotlib", + # Visualization (video rendering) + "imageio>=2.33", + "imageio-ffmpeg>=0.5.0", + # Chemistry "cantera>=3.1.0", #"pyrometheus == 1.0.5",