Source code for femorph_solver.validation._convergence
"""Convergence study runner.
Sweeps a :class:`BenchmarkProblem` across a sequence of mesh
refinements, captures the :class:`ValidationResult` at each, and
fits a log-log convergence rate :math:`|e| \\sim n_\\text{dof}^{-p}`
per quantity.
The fitted rate is the **empirical** rate on this specific mesh
family — NOT a claim of theoretical rate. For a conforming
Galerkin FE on a smooth problem we expect :math:`p = 2/d` in 3D
(i.e. :math:`|e| \\sim h^2` where :math:`h \\sim n_\\text{dof}^{-1/3}`),
but shear-locking, mixed formulations, and integration-error
terms can shift the observed rate considerably. The study
reports what is measured; the caller judges whether the rate is
consistent with theory.
"""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any
import numpy as np
if TYPE_CHECKING:
from femorph_solver.validation._benchmark import (
BenchmarkProblem,
ValidationResult,
)
[docs]
@dataclass(frozen=True)
class ConvergenceRecord:
"""Measured convergence of one quantity across refinements.
``results`` is the per-refinement list. ``convergence_rate``
is the fitted :math:`p` in :math:`|e| = C \\cdot n_\\text{dof}^{-p}`
over the *last two* refinements (coarsest pair drops out so the
rate reflects the fine-mesh regime). ``None`` if fewer than
two refinements completed or the error did not monotonically
decrease.
"""
quantity_name: str
results: list[ValidationResult]
convergence_rate: float | None = None
[docs]
@dataclass(frozen=True)
class ConvergenceStudy:
"""Run a benchmark at multiple refinements, capture + fit.
Parameters
----------
problem :
A :class:`BenchmarkProblem` instance.
refinements :
Sequence of mesh-parameter dicts. Each dict is forwarded
verbatim to ``problem.build_model``. The order is
coarse → fine; the final entry is the reference mesh for
acceptance.
"""
problem: BenchmarkProblem
refinements: list[dict[str, Any]] = field(default_factory=list)
[docs]
def run(self) -> list[ConvergenceRecord]:
"""Run every refinement; return one record per published quantity."""
per_quantity: dict[str, list[ValidationResult]] = {
pv.name: [] for pv in self.problem.published_values
}
for params in self.refinements:
rows = self.problem.validate(**params)
for r in rows:
per_quantity[r.published.name].append(r)
return [
ConvergenceRecord(
quantity_name=name,
results=results,
convergence_rate=_fit_rate(results),
)
for name, results in per_quantity.items()
]
def _fit_rate(results: list[ValidationResult]) -> float | None:
"""Fit ``|err| = C * n_dof^-p`` over the two finest refinements.
Returns ``None`` if we have fewer than two usable samples or
the fit would involve zero / non-positive errors (which would
arise if the problem was solved exactly at both refinements —
an interesting signal worth flagging to the caller, but not
convertible to a power-law slope).
"""
if len(results) < 2:
return None
r_fine = results[-1]
r_coarse = results[-2]
if r_fine.n_dof is None or r_coarse.n_dof is None:
return None
err_fine = abs(r_fine.rel_error)
err_coarse = abs(r_coarse.rel_error)
if err_fine <= 0.0 or err_coarse <= 0.0:
return None
# log(err) = log(C) - p log(n_dof)
slope = (np.log(err_fine) - np.log(err_coarse)) / (
np.log(r_fine.n_dof) - np.log(r_coarse.n_dof)
)
return float(-slope)