Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
885 changes: 885 additions & 0 deletions crates/solverang/src/assembly/constraints.rs

Large diffs are not rendered by default.

524 changes: 524 additions & 0 deletions crates/solverang/src/assembly/entities.rs

Large diffs are not rendered by default.

14 changes: 14 additions & 0 deletions crates/solverang/src/assembly/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
//! Assembly entities and constraints for rigid body systems.
//!
//! This module provides types for modeling assemblies of rigid bodies connected
//! by geometric constraints:
//!
//! - **Entities**: [`RigidBody`] (position + quaternion orientation)
//! - **Internal constraints**: [`UnitQuaternion`] (normalization)
//! - **Assembly constraints**: [`Mate`], [`CoaxialAssembly`], [`Insert`], [`Gear`]

pub mod entities;
pub mod constraints;

pub use entities::{RigidBody, UnitQuaternion};
pub use constraints::{Mate, CoaxialAssembly, Insert, Gear};
68 changes: 68 additions & 0 deletions crates/solverang/src/constraint/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
//! Constraint trait for the constraint system.
//!
//! A constraint produces residuals (equations that should be zero when satisfied)
//! and Jacobians (partial derivatives of residuals with respect to parameters).
//! The solver uses these to iteratively find parameter values that satisfy all
//! constraints simultaneously.
//!
//! # Key Design Decisions
//!
//! - **Jacobian returns `(row, ParamId, value)`, not `(row, col, value)`.** The
//! constraint doesn't need to know the column ordering. The solver's
//! [`SolverMapping`](crate::param::SolverMapping) handles it.
//!
//! - **Constraints read from [`ParamStore`](crate::param::ParamStore)**, not from
//! point arrays. This allows constraints over any combination of parameters.
//!
//! - **No geometry types** — the solver never sees `Point2D`, `Circle`, etc.

use crate::id::{ConstraintId, EntityId, ParamId};
use crate::param::ParamStore;

/// A constraint: a set of equations over parameters.
///
/// Constraints produce residuals (which should be zero when satisfied) and
/// Jacobians (partial derivatives of residuals w.r.t. parameters). The solver
/// uses these to iteratively find parameter values that satisfy all constraints.
///
/// # What's NOT on this trait
///
/// - No `<const D: usize>` — constraints work in any dimension.
/// - No `points: &[Point<D>]` parameter — constraints read from `ParamStore`.
/// - No geometry types — the solver never sees `Point2D`, `Circle`, etc.
/// - Jacobian returns `ParamId`, not column indices — the system does the mapping.
pub trait Constraint: Send + Sync {
/// Unique identifier for this constraint.
fn id(&self) -> ConstraintId;

/// Human-readable name for diagnostics and debugging.
fn name(&self) -> &str;

/// Which entities this constraint binds.
fn entity_ids(&self) -> &[EntityId];

/// Which parameters this constraint depends on (for graph building).
fn param_ids(&self) -> &[ParamId];

/// Number of scalar equations this constraint produces.
fn equation_count(&self) -> usize;

/// Evaluate residuals. Each element should be zero when satisfied.
fn residuals(&self, store: &ParamStore) -> Vec<f64>;

/// Sparse Jacobian: `(equation_row, param_id, partial_derivative)`.
///
/// Only non-zero entries need to be returned. The system maps `ParamId` to
/// column indices via [`SolverMapping`](crate::param::SolverMapping).
fn jacobian(&self, store: &ParamStore) -> Vec<(usize, ParamId, f64)>;

/// Weight for soft constraints (default 1.0).
fn weight(&self) -> f64 {
1.0
}

/// Is this a soft constraint that can be relaxed?
fn is_soft(&self) -> bool {
false
}
}
252 changes: 252 additions & 0 deletions crates/solverang/src/dataflow/cache.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,252 @@
//! Per-cluster solution caching and warm-start support.
//!
//! After a successful solve, each cluster's solution is stored in a
//! [`SolutionCache`]. On the next solve cycle, the cached solution can serve
//! as a warm start, often reducing the number of iterations required.
//!
//! When clusters are invalidated (due to parameter changes) or removed
//! (due to re-decomposition), their cache entries are discarded.

use std::collections::HashMap;

use crate::id::ClusterId;

/// Cached solution state for a single cluster.
///
/// Stores the parameter values (in solver-column order), the residual norm
/// at those values, and the number of solver iterations that were used.
#[derive(Clone, Debug)]
pub struct ClusterCache {
/// Cached parameter values in solver-column order.
pub solution: Vec<f64>,
/// Residual norm (L2) at the cached solution.
pub residual_norm: f64,
/// Number of solver iterations used to reach this solution.
pub iterations: usize,
}

/// Solution cache for all clusters.
///
/// After a successful solve, each cluster's solution is cached. On the next
/// solve, the cached solution is used as a warm start if the cluster's
/// parameters haven't changed too much.
///
/// # Example
///
/// ```ignore
/// let mut cache = SolutionCache::new();
///
/// // After solving cluster 0:
/// cache.store(ClusterId(0), vec![1.0, 2.0, 3.0], 1e-12, 5);
///
/// // On next solve, retrieve the warm start:
/// if let Some(cached) = cache.get(&ClusterId(0)) {
/// // Use cached.solution as the initial guess.
/// }
/// ```
#[derive(Clone, Debug, Default)]
pub struct SolutionCache {
clusters: HashMap<ClusterId, ClusterCache>,
}

impl SolutionCache {
/// Create an empty solution cache.
pub fn new() -> Self {
Self {
clusters: HashMap::new(),
}
}

/// Store a solution for a cluster.
///
/// Overwrites any previously cached solution for the same cluster.
pub fn store(
&mut self,
cluster_id: ClusterId,
solution: Vec<f64>,
residual_norm: f64,
iterations: usize,
) {
self.clusters.insert(
cluster_id,
ClusterCache {
solution,
residual_norm,
iterations,
},
);
}

/// Get the cached solution for a cluster, if one exists.
///
/// Returns `None` if the cluster has no cached solution (either it was
/// never solved or its cache was invalidated).
pub fn get(&self, cluster_id: &ClusterId) -> Option<&ClusterCache> {
self.clusters.get(cluster_id)
}

/// Invalidate (remove) the cached solution for a single cluster.
///
/// Call this when a cluster's parameters have changed enough that
/// the cached solution is no longer a useful warm start.
pub fn invalidate(&mut self, cluster_id: &ClusterId) {
self.clusters.remove(cluster_id);
}

/// Invalidate all cached solutions.
///
/// Typically called after a full re-decomposition, since cluster IDs
/// may have been reassigned.
pub fn invalidate_all(&mut self) {
self.clusters.clear();
}

/// Remove entries for clusters that no longer exist.
///
/// After re-decomposition, old cluster IDs may be stale. This method
/// retains only the entries whose IDs appear in `valid_ids`.
pub fn retain_clusters(&mut self, valid_ids: &[ClusterId]) {
let valid_set: std::collections::HashSet<&ClusterId> = valid_ids.iter().collect();
self.clusters.retain(|id, _| valid_set.contains(id));
}

/// Returns the number of cached cluster solutions.
pub fn len(&self) -> usize {
self.clusters.len()
}

/// Returns `true` if the cache contains no entries.
pub fn is_empty(&self) -> bool {
self.clusters.is_empty()
}
}

#[cfg(test)]
mod tests {
use super::*;
use crate::id::ClusterId;

#[test]
fn new_cache_is_empty() {
let cache = SolutionCache::new();
assert!(cache.is_empty());
assert_eq!(cache.len(), 0);
}

#[test]
fn store_and_get() {
let mut cache = SolutionCache::new();
let id = ClusterId(0);

cache.store(id, vec![1.0, 2.0, 3.0], 1e-10, 7);

let entry = cache.get(&id).expect("should have cached entry");
assert_eq!(entry.solution, vec![1.0, 2.0, 3.0]);
assert!((entry.residual_norm - 1e-10).abs() < 1e-20);
assert_eq!(entry.iterations, 7);
}

#[test]
fn store_overwrites_previous() {
let mut cache = SolutionCache::new();
let id = ClusterId(0);

cache.store(id, vec![1.0], 0.1, 10);
cache.store(id, vec![2.0], 0.01, 5);

let entry = cache.get(&id).unwrap();
assert_eq!(entry.solution, vec![2.0]);
assert!((entry.residual_norm - 0.01).abs() < 1e-15);
assert_eq!(entry.iterations, 5);
assert_eq!(cache.len(), 1);
}

#[test]
fn get_nonexistent_returns_none() {
let cache = SolutionCache::new();
assert!(cache.get(&ClusterId(42)).is_none());
}

#[test]
fn invalidate_single_cluster() {
let mut cache = SolutionCache::new();
cache.store(ClusterId(0), vec![1.0], 0.0, 1);
cache.store(ClusterId(1), vec![2.0], 0.0, 2);

cache.invalidate(&ClusterId(0));

assert!(cache.get(&ClusterId(0)).is_none());
assert!(cache.get(&ClusterId(1)).is_some());
assert_eq!(cache.len(), 1);
}

#[test]
fn invalidate_nonexistent_is_noop() {
let mut cache = SolutionCache::new();
cache.store(ClusterId(0), vec![1.0], 0.0, 1);

cache.invalidate(&ClusterId(99));

assert_eq!(cache.len(), 1);
}

#[test]
fn invalidate_all_clears_cache() {
let mut cache = SolutionCache::new();
cache.store(ClusterId(0), vec![1.0], 0.0, 1);
cache.store(ClusterId(1), vec![2.0], 0.0, 2);
cache.store(ClusterId(2), vec![3.0], 0.0, 3);

cache.invalidate_all();

assert!(cache.is_empty());
assert_eq!(cache.len(), 0);
}

#[test]
fn retain_clusters_keeps_valid_ids() {
let mut cache = SolutionCache::new();
cache.store(ClusterId(0), vec![1.0], 0.0, 1);
cache.store(ClusterId(1), vec![2.0], 0.0, 2);
cache.store(ClusterId(2), vec![3.0], 0.0, 3);
cache.store(ClusterId(3), vec![4.0], 0.0, 4);

// After re-decomposition, only clusters 1 and 3 still exist.
cache.retain_clusters(&[ClusterId(1), ClusterId(3)]);

assert_eq!(cache.len(), 2);
assert!(cache.get(&ClusterId(0)).is_none());
assert!(cache.get(&ClusterId(1)).is_some());
assert!(cache.get(&ClusterId(2)).is_none());
assert!(cache.get(&ClusterId(3)).is_some());
}

#[test]
fn retain_clusters_with_empty_valid_ids() {
let mut cache = SolutionCache::new();
cache.store(ClusterId(0), vec![1.0], 0.0, 1);

cache.retain_clusters(&[]);

assert!(cache.is_empty());
}

#[test]
fn default_is_empty() {
let cache = SolutionCache::default();
assert!(cache.is_empty());
}

#[test]
fn clone_is_independent() {
let mut cache = SolutionCache::new();
cache.store(ClusterId(0), vec![1.0, 2.0], 1e-8, 3);

let mut cloned = cache.clone();
cloned.invalidate(&ClusterId(0));

// Original should be unaffected.
assert!(cache.get(&ClusterId(0)).is_some());
assert!(cloned.get(&ClusterId(0)).is_none());
}
}
18 changes: 18 additions & 0 deletions crates/solverang/src/dataflow/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
//! Incremental dataflow tracking for the constraint solver.
//!
//! This module provides change tracking and solution caching to enable
//! incremental re-solving. Instead of re-solving the entire constraint system
//! when a single parameter changes, the solver can:
//!
//! 1. Identify which clusters are affected by the change ([`ChangeTracker`]).
//! 2. Re-solve only the dirty clusters.
//! 3. Use cached solutions as warm starts ([`SolutionCache`]).
//!
//! When structural changes occur (entities or constraints added/removed),
//! the system triggers a full re-decomposition before solving.

mod cache;
mod tracker;

pub use cache::{ClusterCache, SolutionCache};
pub use tracker::ChangeTracker;
Loading