"""femorph-solver model — a thin wrapper around a ``pyvista.UnstructuredGrid``.
The grid is the canonical mesh representation:
* ``point_data["ansys_node_num"]`` — 1-based node numbers
* ``cell_data["ansys_elem_num"]`` — 1-based element numbers
* ``cell_data["ansys_elem_type_num"]`` — element-type id per element
* ``cell_data["ansys_material_type"]`` — material id per element
* ``cell_data["ansys_real_constant"]`` — real-constant id per element
The ``ansys_*`` array names are kept for back-compatibility with
``mapdl_archive`` and existing ``.pv`` files on disk; they are
storage-format keys, not API names — every public method on
:class:`Model` operates on neutral kernel names.
Global tables that don't belong per-cell live on the Model directly:
* :attr:`etypes` — ET id → element class name (e.g. ``"TRUSS2"``)
* :attr:`materials` — MAT id → ``{"EX": ..., "PRXY": ..., "DENS": ...}``
* :attr:`real_constants` — REAL id → 1-D ``numpy.ndarray``
References
----------
* Global-assembly orchestration (loop over element groups, gather
per-element ``Kₑ`` / ``Mₑ`` into CSR via a pre-computed sparsity
pattern): Saad, Y., *Iterative Methods for Sparse Linear
Systems*, 2nd ed., SIAM, 2003, §3.3 (symbolic + numeric
assembly); Zienkiewicz, O.C. and Taylor, R.L., *The Finite
Element Method: Its Basis and Fundamentals*, 7th ed.,
Butterworth-Heinemann, 2013, §2.8.
* Boundary-condition elimination (partition into free / fixed
DOFs and solve the reduced system; ``_bc_reduce_and_release``):
Cook, Malkus, Plesha, Witt, *Concepts and Applications of
Finite Element Analysis*, 4th ed., Wiley, 2002, §2.10
(partitioning of ``K`` + ``u`` by prescribed / free DOFs);
Bathe, K.J., *Finite Element Procedures*, 2nd ed., Prentice
Hall, 2014, §3.4.
* Master–slave / cyclic constraint reduction (``P^H K P``):
Craveur, J.-C. and Jetteur, P., *Modélisation par éléments
finis*, Dunod, 2008, §17 (substructuring + cyclic); Petrolo,
M. and Zappino, E., cyclic symmetry reduction for rotor
dynamics, various references; see also
:mod:`femorph_solver.solvers.cyclic` for the detailed block.
"""
from __future__ import annotations
import contextlib
from typing import TYPE_CHECKING, Any, overload
import numpy as np
import pyvista as pv
import scipy.sparse as _sp
from vtkmodules.util.numpy_support import numpy_to_vtk
from vtkmodules.vtkCommonCore import VTK_UNSIGNED_CHAR, vtkTypeInt32Array
from vtkmodules.vtkCommonDataModel import vtkCellArray
from femorph_solver import _core
from femorph_solver.elements import get as _get_element
from femorph_solver.elements._base import CANONICAL_DOFS as _CANONICAL_DOFS
from femorph_solver.elements._base import ElementBase
from femorph_solver.labels import (
DOFLabel,
ElementType,
ForceLabel,
MaterialProperty,
UnitSystem,
_token,
)
if TYPE_CHECKING: # pragma: no cover
from collections.abc import Callable, Sequence
from pathlib import Path
from femorph_solver._state import BoundaryConditions, Loads
from femorph_solver.solvers.harmonic import HarmonicResult
from femorph_solver.solvers.modal import ModalResult
from femorph_solver.solvers.static import StaticResult
from femorph_solver.solvers.transient import TransientResult
def _canonicalize_grid(grid: pv.UnstructuredGrid) -> pv.UnstructuredGrid:
"""Return a copy of ``grid`` with points sorted by ``ansys_node_num``
and cells sorted by ``ansys_elem_num``.
Every :class:`Model` hot path assumes the grid is in
node-number-sorted order — the DOF layout walks points in the
order they appear and matrix rows inherit that order, which
means the same mesh loaded via two different paths must iterate
points identically or two freshly-assembled Ks would differ by
a permutation. Centralising the canonicalisation here (rather
than re-sorting inside every consumer) keeps the invariant
local and lets the hot path take a zero-copy view of
``point_data`` / ``cell_data``.
When the grid already happens to be sorted the function is still a
no-op on content — it shortcuts if both orderings are already
monotonic — so for meshes produced by ``_rebuild_grid`` (which
always emits canonically-sorted output) or by foreign-deck
readers (which typically do as well) there's no copy.
"""
node_nums = np.asarray(grid.point_data.get("ansys_node_num"))
elem_nums = np.asarray(grid.cell_data.get("ansys_elem_num"))
pt_order = np.argsort(node_nums, kind="stable")
cell_order = np.argsort(elem_nums, kind="stable")
pts_sorted = np.all(pt_order == np.arange(node_nums.size))
cells_sorted = np.all(cell_order == np.arange(elem_nums.size))
if pts_sorted and cells_sorted:
return grid
# Rebuild with reordered points + cells. VTK has no in-place
# ``ReorderPoints`` that also rewires connectivity, so the cleanest
# path is to extract → reorder → reassemble.
new_points = np.asarray(grid.points, dtype=np.float64)[pt_order]
# Inverse map: old VTK point index -> new VTK point index.
inverse_pt = np.empty(node_nums.size, dtype=np.int32)
inverse_pt[pt_order] = np.arange(node_nums.size, dtype=np.int32)
# Per-cell connectivity in the original VTK index space, reindexed
# to the new point order.
old_conn = np.asarray(grid.cell_connectivity, dtype=np.int64)
old_offset = np.asarray(grid.offset, dtype=np.int64)
new_conn_full = inverse_pt[old_conn].astype(np.int32, copy=False)
# Reorder cells. Each cell's block in ``new_conn_full`` starts at
# ``old_offset[i]`` and has length ``old_offset[i+1] - old_offset[i]``.
cell_sizes = np.diff(old_offset)
new_conn_chunks: list[np.ndarray] = []
new_offset = [0]
for i in cell_order:
start = int(old_offset[i])
sz = int(cell_sizes[i])
new_conn_chunks.append(new_conn_full[start : start + sz])
new_offset.append(new_offset[-1] + sz)
new_conn = (
np.concatenate(new_conn_chunks).astype(np.int32)
if new_conn_chunks
else np.empty(0, dtype=np.int32)
)
new_offset_arr = np.asarray(new_offset, dtype=np.int32)
new_celltypes = np.asarray(grid.celltypes, dtype=np.uint8)[cell_order]
# Assemble a fresh grid with int32 cell structure up-front.
vtk_dtype = vtkTypeInt32Array().GetDataType()
offsets_vtk = numpy_to_vtk(new_offset_arr, deep=True, array_type=vtk_dtype)
conn_vtk = numpy_to_vtk(new_conn, deep=True, array_type=vtk_dtype)
cell_array = vtkCellArray()
cell_array.SetData(offsets_vtk, conn_vtk)
celltypes_vtk = numpy_to_vtk(new_celltypes, deep=True, array_type=VTK_UNSIGNED_CHAR)
new_grid = pv.UnstructuredGrid()
new_grid.SetCells(celltypes_vtk, cell_array)
new_grid.points = new_points
# Carry over point/cell data in the new ordering.
for k, v in grid.point_data.items():
new_grid.point_data[k] = np.asarray(v)[pt_order]
for k, v in grid.cell_data.items():
new_grid.cell_data[k] = np.asarray(v)[cell_order]
return new_grid
def _force_int32_cells(grid: pv.UnstructuredGrid) -> None:
"""In-place: coerce ``grid.cell_connectivity`` + ``grid.offset`` to int32.
VTK builds these arrays as int64 by default on 64-bit systems even
though every real FEM mesh fits in a 32-bit index (2 G cells / points
would be ~64 GB of doubles alone). The int64 layout doubles the
memory footprint of cell structure and — more importantly — forces
a copy-cast every time our downstream code asks for the int32 view
it actually consumes (scipy CSR, the C++ scatter kernels, etc.).
Rewriting the ``vtkCellArray`` with int32 backing storage is
zero-copy after one pass — matches :func:`femorph.utilities.force_int32_itype`.
"""
if grid.n_cells == 0:
return
offset = grid.offset
if offset.dtype == np.int32:
return
conn = grid.cell_connectivity
vtk_dtype = vtkTypeInt32Array().GetDataType()
offset32 = np.ascontiguousarray(offset, dtype=np.int32)
conn32 = np.ascontiguousarray(conn, dtype=np.int32)
offsets_vtk = numpy_to_vtk(offset32, deep=True, array_type=vtk_dtype)
conn_vtk = numpy_to_vtk(conn32, deep=True, array_type=vtk_dtype)
cell_array = vtkCellArray()
cell_array.SetData(offsets_vtk, conn_vtk)
cell_types_vtk = numpy_to_vtk(grid.celltypes, deep=True, array_type=VTK_UNSIGNED_CHAR)
grid.SetCells(cell_types_vtk, cell_array)
[docs]
class Model:
"""Top-level femorph-solver model — the user-facing entry point.
A :class:`Model` wraps a :class:`pyvista.UnstructuredGrid` together
with the per-element / per-material / per-real-constant tables, the
Dirichlet boundary-condition state, and the applied-load state. The
grid is the canonical source of truth for geometry and connectivity;
everything else is metadata stored in dictionaries keyed by ANSYS-
style numeric ids (``etype``, ``mat``, ``real``).
Construct a model in one of three ways:
* :meth:`Model.from_grid` — wrap an existing
:class:`pyvista.UnstructuredGrid`.
* :meth:`Model.from_pv` — load a previously :meth:`save`-d ``.pv``
snapshot (full round-trip including materials and BCs).
* :meth:`Model.from_chain` / :meth:`Model.from_lines` /
:meth:`Model.from_points` — build the grid in-place from raw
arrays.
Once the model has elements, materials, and BCs assigned, drive an
analysis with one of the four solve verbs:
:meth:`solve_static` (linear static), :meth:`solve_modal`,
:meth:`solve_harmonic`, or :meth:`solve_transient`.
For cyclic-symmetry modal analysis, wrap the model in
:class:`femorph_solver.CyclicModel` and call
:meth:`CyclicModel.solve_modal`.
Parameters
----------
grid : pyvista.UnstructuredGrid, optional
Mesh to wrap. Defaults to an empty grid; in that case nodes and
elements are added incrementally with the lower-level ``n`` /
``e`` / ``et`` / ``mp`` / ``r`` builders before the first solve.
unit_system : UnitSystem or str, keyword-only, optional
Bookkeeping stamp identifying which physical unit convention the
material constants and geometry follow. Defaults to
``UnitSystem.UNSPECIFIED``; the solver itself is dimensionless,
but downstream readers (result files, stress recovery, the
MAPDL ``from_cdb`` reader) consume the stamp to interpret
numbers correctly.
See Also
--------
CyclicModel : sector-reduced cyclic-symmetry wrapper.
Examples
--------
Load the bundled bladed-rotor sector, clamp the hub face, and solve
the lowest four natural frequencies.
>>> import numpy as np
>>> import femorph_solver as fs
>>> model = fs.Model.from_pv(fs.examples.cyclic_bladed_rotor_sector_path())
>>> hub_z = float(np.asarray(model.grid.points)[:, 2].min())
>>> hub = np.asarray(model.grid.points)[:, 2] < hub_z + 1e-6
>>> model.fix(where=hub, dof=["UX", "UY", "UZ"])
>>> result = model.solve_modal(n_modes=4)
>>> result.frequency.shape
(4,)
>>> bool(result.frequency[0] > 0.0)
True
"""
def __init__(
self,
grid: pv.UnstructuredGrid | None = None,
*,
unit_system: UnitSystem | str = UnitSystem.UNSPECIFIED,
) -> None:
self._grid: pv.UnstructuredGrid = pv.UnstructuredGrid() if grid is None else grid
self._etypes: dict[int, str] = {}
self._materials: dict[int, dict[str, float]] = {}
self._real_constants: dict[int, np.ndarray] = {}
# Lazy-init: the registry only exists once geometry methods are
# actually used. Access via ``Model.geometry`` (property).
self._geometry = None # type: ignore[var-annotated]
# Bookkeeping-only stamp. The solver is dimensionless — ``E = 2.1e11``
# is whatever the caller intends — but every realistic user works in
# one consistent system. Stamping the Model lets downstream readers
# (result files, stress recovery, material libraries) interpret the
# numbers without guessing. See :class:`femorph_solver.UnitSystem`.
self._unit_system: UnitSystem = (
unit_system if isinstance(unit_system, UnitSystem) else UnitSystem(unit_system)
)
self._current_type: int = 1
self._current_mat: int = 1
self._current_real: int = 1
# Incremental-build staging area for ``N`` / ``E``. After any
# grid rebuild these are drained back into the grid and cleared;
# the grid is the permanent source of truth for geometry +
# connectivity. Previously the dicts were the primary storage
# (the grid was rebuilt from them on demand) — that cost ~34 MB
# at 200×200×2 once the mesh matured because every node (80 B)
# and element (~250 B) was duplicated between dict and grid.
self._staged_nodes: dict[int, tuple[float, float, float]] = {}
# elem_num -> (node_nums, etype_id, mat_id, real_id)
self._staged_elements: dict[int, tuple[list[int], int, int, int]] = {}
self._dirty: bool = False
# Cached _dof_layout() result — reused across K/M assembly once the grid
# is clean. Invalidated by any structural mutation (n, e, et).
self._layout_cache: (
tuple[np.ndarray, dict[int, tuple[int, tuple[str, ...]]], int] | None
) = None
self._node_dof_labels_cache: dict[int, tuple[str, ...]] | None = None
# Cached (N, row_ptr, col_idx) CSR pattern — reused across K and M
# since connectivity is geometry-only. Invalidated alongside the
# layout cache on any structural mutation (n / e / et).
self._csr_pattern_cache: tuple[int, np.ndarray, np.ndarray] | None = None
# Cached assembled K and M. Invalidated on any structural or
# material mutation — see ``_invalidate_matrix_cache``. Callers
# that run modal sweeps or static solves back-to-back on the same
# Model see the zero-overhead hit on the second call.
self._K_cache: _sp.csr_array | None = None
self._M_cache: _sp.csr_array | None = None
self._M_lumped_cache: _sp.csr_array | None = None
# Boundary conditions / loads staged via D / F. Keyed by (node, dof_idx).
self._disp_bc: dict[tuple[int, int], float] = {}
self._force_bc: dict[tuple[int, int], float] = {}
# Thermal boundary conditions (D,TEMP / F,HEAT). The thermal
# pillar runs as a parallel DOF-layout pipeline so the
# structural BC dicts above can stay strictly U / F-typed.
self._temp_bc: dict[int, float] = {}
self._heat_bc: dict[int, float] = {}
# Surface convection / heat-flux loads. Each entry stores the
# snapshot of the active node selection plus the load values;
# the thermal assembler walks every thermal element's edges and
# contributes to K_t / Q on edges whose endpoints are entirely
# in the stored selection.
self._surf_conv: list[tuple[frozenset[int], float, float]] = []
self._surf_hflux: list[tuple[frozenset[int], float]] = []
# Element-level body force: BFE,..,HGEN volumetric heat
# generation. Each entry is ``(elem_set, q_gen)`` where
# ``q_gen`` is the generation rate (W/volume); the thermal
# load assembler integrates ``q_gen * volume / n_nodes`` to
# each node of the affected elements.
self._bfe_hgen: list[tuple[frozenset[int], float]] = []
# Distributed loads on element faces (MAPDL ``SFBEAM``). Keyed
# by element number; each entry is a list of ``(face, label,
# value)`` triples so multiple distinct face-loads on one
# element accumulate. ``_load_vector`` consults this map and
# adds the consistent-load contribution to F.
self._sfbeam_loads: dict[int, list[tuple[int, str, float]]] = {}
# Thermal loading (MAPDL ``TREF`` + ``BFUNIF,TEMP`` + ``BF,*,TEMP,*``).
# ``_tref`` is the stress-free reference temperature; node
# temperatures default to ``_uniform_temp`` (or ``_tref`` when
# neither uniform nor per-node is set, so ΔT = 0 and the
# thermal-load contribution is identically zero).
self._tref: float = 0.0
self._uniform_temp: float | None = None
self._node_temps: dict[int, float] = {}
# Coupled DOFs (MAPDL ``CP``) — multi-point constraints that
# bind a set of DOFs to share the same scalar value at solve
# time. Keyed by set id; each entry is a list of ``(node,
# dof_label)`` pairs with the first entry treated as master.
# ``solve()`` resolves each set into ``(slave_dof, master_dof)``
# global-index pairs and hands them to the linear solver.
self._cp_sets: dict[int, list[tuple[int, str]]] = {}
# CDB / dat-side named components. ``from_cdb`` populates
# ``_node_components`` from ``mapdl_archive.Archive.node_components``
# and ``_element_components`` from the corresponding element
# table. ``Model.fix(component=...)`` and any future
# component-aware verbs read from these dicts. Empty by
# default - components only exist when a CDB / dat reader
# stamped them on a freshly-loaded Model.
self._node_components: dict[str, np.ndarray] = {}
self._element_components: dict[str, np.ndarray] = {}
@property
def grid(self) -> pv.UnstructuredGrid:
"""Underlying :class:`pyvista.UnstructuredGrid`.
Returns the canonical mesh container; pending incremental edits
from ``n`` / ``e`` builders are flushed first so the returned
grid is always in a coherent state.
Returns
-------
pyvista.UnstructuredGrid
The model's mesh, with ``point_data["ansys_node_num"]``
and the standard ``cell_data`` arrays
(``ansys_elem_num``, ``ansys_elem_type_num``,
``ansys_material_type``, ``ansys_real_constant``) populated.
"""
if self._dirty:
self._rebuild_grid()
return self._grid
@property
def etypes(self) -> dict[int, str]:
"""Mapping ``etype_id → element-type name`` (``"HEX8"``, ``"BEAM2"``, ...).
Returns
-------
dict of {int: str}
ANSYS-style element-type table. Read-only view; mutate
with :meth:`assign` or the lower-level ``et`` builder.
"""
return self._etypes
@property
def materials(self) -> dict[int, dict[str, float]]:
"""Mapping ``mat_id → {property: value}``.
Returns
-------
dict of {int: dict of {str: float}}
Per-material property table — ``"EX"`` / ``"PRXY"`` /
``"DENS"`` / ``"ALPX"`` and any other constants supplied
via :meth:`assign` or the ``mp`` builder.
"""
return self._materials
@property
def real_constants(self) -> dict[int, np.ndarray]:
"""Mapping ``real_id → real-constant array``.
Returns
-------
dict of {int: numpy.ndarray}
Per-real-constant table. Element-specific layout — see
the element's reference page for which positional slots
mean what (e.g. BEAM2 stores ``[A, Iz, Iy, K]``).
"""
return self._real_constants
@property
def bcs(self) -> BoundaryConditions:
"""Typed view of the Model's Dirichlet boundary-condition state.
Zero-copy view over the internal ``(node, dof) → value``
storage. Provides clean inspection (``bcs.fixed``,
``bcs.n_pinned``, ``len(bcs)``, ``bcs.is_fixed(node, dof)``),
mutation helpers (``bcs.pin(...)``, ``bcs.clear()``), and a
``bcs.to_table()`` for REPL printing.
Every :meth:`fix` / :meth:`d` call on the Model shows up here
immediately; the view doesn't duplicate state.
"""
from femorph_solver._state import BoundaryConditions # noqa: PLC0415
return BoundaryConditions(self)
@property
def loads(self) -> Loads:
"""Typed view of the Model's applied nodal forces / moments.
Mirror of :attr:`bcs` for :meth:`apply_force` / :meth:`f` state.
Supports ``loads.forces``, ``loads.n_applied``,
``loads.apply(node, fz=...)``, ``loads.clear()``, and
``loads.to_table()``.
"""
from femorph_solver._state import Loads # noqa: PLC0415
return Loads(self)
@property
def geometry(self):
"""Per-model :class:`GeometryRegistry` for native geometry creation.
Returns the model's geometry registry, lazy-initialised on first
access. See ``docs/design/geometry.md`` for the API design and
the implementation roadmap.
Examples
--------
>>> import femorph_solver as fs
>>> m = fs.Model()
>>> g = m.geometry
>>> p1 = g.point(0, 0, 0)
>>> p2 = g.point(1, 0, 0)
>>> line = g.line(p1, p2)
>>> g.n_points, g.n_lines
(2, 1)
"""
from femorph_solver.geometry import GeometryRegistry # noqa: PLC0415
if self._geometry is None:
self._geometry = GeometryRegistry()
return self._geometry
@property
def unit_system(self) -> UnitSystem:
"""Current :class:`UnitSystem` stamp.
Defaults to :attr:`UnitSystem.UNSPECIFIED` for legacy Models that
pre-date this attribute. Set via :meth:`set_unit_system`, pass
via the constructor, or let :meth:`Model.from_grid` /
:func:`femorph_solver.mapdl_api.from_cdb` detect it from the input.
"""
return self._unit_system
[docs]
def set_unit_system(self, unit_system: UnitSystem | str) -> None:
"""Declare the :class:`UnitSystem` this Model's numeric inputs follow.
Bookkeeping only — the solver's math is dimensionless. The stamp
travels with the Model through assembly, solve, and the result
serialiser so downstream readers can interpret the numbers.
Parameters
----------
unit_system : UnitSystem or str
Target unit system; strings are coerced through
:class:`UnitSystem`.
"""
self._unit_system = (
unit_system if isinstance(unit_system, UnitSystem) else UnitSystem(unit_system)
)
[docs]
def assert_unit_system(self, expected: UnitSystem | str) -> None:
"""Raise :class:`ValueError` if this Model's ``unit_system`` is not ``expected``.
Use this in scripts that require a specific unit convention — a
mismatch between the caller's assumption and the Model's stamp
usually means numbers will be silently wrong by a factor of
:math:`10^n`. :attr:`UnitSystem.UNSPECIFIED` always passes (no
assertion possible on an unstamped Model).
Parameters
----------
expected : UnitSystem or str
Required unit system; strings are coerced through
:class:`UnitSystem`.
"""
want = expected if isinstance(expected, UnitSystem) else UnitSystem(expected)
if self._unit_system == UnitSystem.UNSPECIFIED:
return
if self._unit_system != want:
raise ValueError(
f"Model.unit_system is {self._unit_system.value}, expected {want.value}"
)
@property
def n_nodes(self) -> int:
"""Number of nodes in the model.
Returns
-------
int
``self.grid.n_points``.
"""
return int(self.grid.n_points)
@property
def n_elements(self) -> int:
"""Number of elements in the model.
Returns
-------
int
``self.grid.n_cells``.
"""
return int(self.grid.n_cells)
@property
def node_numbers(self) -> np.ndarray:
"""Per-node ANSYS-style node numbers.
Returns
-------
numpy.ndarray of int32, shape ``(n_nodes,)``
One-based ANSYS node id for each grid point, in
``self.grid.points`` order. Empty array on a model whose
grid carries no ``ansys_node_num`` array.
"""
arr = self.grid.point_data.get("ansys_node_num")
return np.asarray(arr, dtype=np.int32) if arr is not None else np.empty(0, dtype=np.int32)
@property
def element_numbers(self) -> np.ndarray:
"""Per-element ANSYS-style element numbers.
Returns
-------
numpy.ndarray of int32, shape ``(n_elements,)``
One-based ANSYS element id for each grid cell, in
``self.grid.cells`` order. Empty array on a model whose
grid carries no ``ansys_elem_num`` array.
"""
arr = self.grid.cell_data.get("ansys_elem_num")
return np.asarray(arr, dtype=np.int32) if arr is not None else np.empty(0, dtype=np.int32)
def __repr__(self) -> str:
return (
f"<femorph_solver.Model nodes={self.n_nodes} elements={self.n_elements} "
f"etypes={len(self._etypes)} materials={len(self._materials)} "
f"reals={len(self._real_constants)}>"
)
[docs]
def validate(self, *, strict: bool = False) -> list[str]:
"""Pre-flight check: report problems that would break a solve.
Cheap O(n_nodes + n_elements) inspection of the model's
current state. Returns a list of human-readable findings,
each one a single-line string explaining a discovered issue
that ``solve_*`` would surface as an error or wrong answer.
Empty list = "ready to solve."
Parameters
----------
strict : bool, default False
``False`` (the default) returns the findings list and the
caller decides what to do. ``True`` raises
:class:`~femorph_solver.exceptions.ValidationError`
(carrying ``findings`` as an attribute) when any are
present — useful in pipelines that want fail-fast.
Returns
-------
list of str
Findings, one per discovered issue. Stable across calls
on a non-mutated model.
Raises
------
femorph_solver.exceptions.ValidationError
When ``strict=True`` and at least one finding was produced.
See Also
--------
femorph_solver.exceptions : the typed exception hierarchy
this method's findings would map onto if surfaced as
individual raises.
Examples
--------
Pre-check a freshly-constructed model. An empty model has
no elements assigned, so :meth:`validate` flags it.
>>> import femorph_solver as fs
>>> m = fs.Model()
>>> findings = m.validate()
>>> any("no elements" in f for f in findings)
True
With ``strict=True`` the same call raises:
>>> try:
... m.validate(strict=True)
... except fs.exceptions.ValidationError as exc:
... bool(exc.findings)
True
"""
findings: list[str] = []
# 1. Empty geometry.
if self.n_nodes == 0:
findings.append("model has no nodes (n_nodes=0)")
if self.n_elements == 0:
findings.append("model has no elements (n_elements=0)")
# 2. Element-type registry. Every distinct etype id used by
# any cell must be registered.
et_arr = self.grid.cell_data.get("ansys_elem_type_num")
if et_arr is not None and len(et_arr):
used_et = set(int(x) for x in np.unique(et_arr))
missing_et = used_et - set(self._etypes.keys())
for et_id in sorted(missing_et):
findings.append(
f"element-type id {et_id} used by mesh but not registered "
f"(call model.assign(ELEMENTS.<TYPE>, ...) or APDL et())"
)
# 3. Material registry. If the cell carries a material id,
# it must exist. An element with no material id (mat=0)
# is OK only when the user is about to call assign().
mat_arr = self.grid.cell_data.get("ansys_material_type")
if mat_arr is not None and len(mat_arr):
used_mat = set(int(x) for x in np.unique(mat_arr))
missing_mat = used_mat - set(self._materials.keys()) - {0}
for mat_id in sorted(missing_mat):
findings.append(
f"material id {mat_id} used by mesh but no properties registered "
f"(call model.assign(..., {{'EX': ..., 'PRXY': ..., 'DENS': ...}}))"
)
# 4. Material property completeness. Every registered
# material that the mesh references should have at least
# EX + PRXY (DENS only matters for modal / transient).
for mat_id, props in self._materials.items():
if "EX" not in props:
findings.append(f"material {mat_id} missing EX (Young's modulus)")
if "PRXY" not in props:
findings.append(f"material {mat_id} missing PRXY (Poisson's ratio)")
# 5. Real-constant registry. Only flag when an element type
# actually NEEDS a real constant (BEAM2, LINK180, SPRING,
# POINT_MASS — cross-section / spring-rate / mass data).
# HEX8 / TET10 / WEDGE15 / etc. always have a per-cell
# real-id field (1 by default) but never use it.
real_arr = self.grid.cell_data.get("ansys_real_constant")
if real_arr is not None and len(real_arr) and et_arr is not None:
real_required_etypes = {"BEAM2", "BEAM188", "LINK180", "TRUSS2", "SPRING", "POINT_MASS"}
for cell_idx in range(len(real_arr)):
et_id = int(et_arr[cell_idx])
et_name = self._etypes.get(et_id, "")
if et_name not in real_required_etypes:
continue
real_id = int(real_arr[cell_idx])
if real_id == 0 or real_id in self._real_constants:
continue
findings.append(
f"real-constant id {real_id} required by element type {et_name} "
f"(cell {cell_idx}) but not registered "
f"(call model.r({real_id}, ...) or APDL r())"
)
# First failure per element type is enough — don't spam.
break
if strict and findings:
from femorph_solver.exceptions import ValidationError # noqa: PLC0415
raise ValidationError(findings)
return findings
[docs]
def save(self, path: str | Path) -> Path:
"""Serialise this Model to a single ``.pv`` file (TA-15 / #265).
The mesh, cell- / point-data, materials, real constants,
boundary conditions, force loads, and unit-system stamp all
round-trip through :meth:`Model.load`. Solver-internal state
is stamped into ``field_data`` under keys prefixed with
``__solver_key_``; a vanilla pyvista reader can recognise
those as opaque and skip them.
Parameters
----------
path : str or pathlib.Path
Destination file (``.pv`` extension conventional).
Returns
-------
pathlib.Path
Canonicalised absolute path of the written file.
Examples
--------
Save the bundled rotor sector to a temp file and read it back.
>>> import tempfile
>>> from pathlib import Path
>>> import femorph_solver as fs
>>> model = fs.Model.from_pv(fs.examples.cyclic_bladed_rotor_sector_path())
>>> with tempfile.TemporaryDirectory() as tmp:
... out = model.save(Path(tmp) / "rotor.pv")
... restored = fs.Model.load(out)
... restored.n_nodes == model.n_nodes
True
"""
from femorph_solver._persistence import save_model # noqa: PLC0415
return save_model(self, path)
[docs]
@classmethod
def load(cls, path: str | Path) -> Model:
"""Inverse of :meth:`save` — read a ``.pv`` back into a Model.
Parameters
----------
path : str or pathlib.Path
Path to a file previously written by :meth:`save`.
Returns
-------
Model
Fresh Model with the saved geometry, etypes, materials,
real constants, BCs, loads, and unit system applied.
Raises
------
FileNotFoundError
If the file does not exist.
ValueError
If the file is missing the solver-internal state blob
(i.e. it was not written by :meth:`save`).
"""
from femorph_solver._persistence import load_model # noqa: PLC0415
return load_model(path)
[docs]
@classmethod
def from_pv(cls, path: str | Path) -> Model:
"""Load a single-file ``.pv`` Model — alias of :meth:`load`.
Mirrors the ``from_<format>`` family (:meth:`from_grid`,
:meth:`from_lines`, :meth:`from_chain`, :meth:`from_points`)
for callers who prefer the ``from_pv`` spelling for the native
single-file format :meth:`save` writes.
Parameters
----------
path : str or pathlib.Path
Path to a ``.pv`` snapshot previously written by
:meth:`save`.
Returns
-------
Model
Fresh Model with the saved geometry, etypes, materials,
real constants, BCs, loads, and unit system applied.
"""
return cls.load(path)
[docs]
@classmethod
def from_cdb(cls, path: str | Path, **kwargs: Any) -> Model:
"""Load a MAPDL CDB deck as a :class:`Model`.
Thin factory delegating to
:func:`femorph_solver.interop.mapdl.from_cdb`. Lifts the
loader to the same ``Model.from_<format>`` family as the
native and pyvista loaders so users don't have to remember
which import path each foreign-deck reader lives under, and
so ``Model.from_cdb`` mirrors :meth:`CyclicModel.from_cdb`.
Parameters
----------
path : str or pathlib.Path
Path to the ``.cdb`` archive.
**kwargs
Forwarded to
:func:`femorph_solver.interop.mapdl.from_cdb`.
Returns
-------
Model
Fresh Model with the deck's geometry, etypes, materials,
real constants, BCs, and loads applied.
See Also
--------
femorph_solver.interop.mapdl.from_cdb : the underlying
loader, including the full kwarg surface.
CyclicModel.from_cdb : same loader but wrapped as a
cyclic-symmetry Model.
"""
from femorph_solver.interop.mapdl import from_cdb as _from_cdb # noqa: PLC0415
return _from_cdb(str(path), **kwargs)
[docs]
@classmethod
def from_grid(cls, grid: pv.UnstructuredGrid) -> Model:
"""Wrap an existing ``pyvista.UnstructuredGrid`` as a Model.
The grid is expected to carry the structured cell/point data arrays
(``ansys_node_num``, ``ansys_elem_num``, ``ansys_elem_type_num``,
``ansys_material_type``, ``ansys_real_constant``). Missing arrays are
auto-filled with sequential 1-based ids so meshes built directly in
pyvista (e.g. a ``StructuredGrid`` cast to unstructured) are usable
without the caller stamping every field themselves.
The grid becomes the Model's sole source of truth for geometry
and connectivity — the staging dicts stay empty for the lifetime
of the Model unless the caller later mutates via :meth:`n` /
:meth:`e`. Two incidental passes happen here:
* ``cell_connectivity`` / ``offset`` are coerced to int32 if they
came in as int64 (VTK's default on 64-bit builds) — halves the
cell-structure footprint and removes a copy-cast the hot path
would otherwise do on every assembly call.
* Points and cells are reordered so ``ansys_node_num`` and
``ansys_elem_num`` are strictly increasing. Every downstream
hot path assumes node-number-sorted order to produce a deterministic
DOF layout; canonicalising here keeps the invariant local to
Model construction rather than leaking into every consumer.
Parameters
----------
grid : pyvista.UnstructuredGrid
Mesh to wrap. Mutated in place to add the auto-filled
metadata arrays and to canonicalise the node / element
ordering — pass a copy if the original grid must remain
untouched.
Returns
-------
Model
Native :class:`Model` carrying ``grid`` as its mesh.
"""
m = cls()
n_points = int(grid.n_points)
n_cells = int(grid.n_cells)
node_nums_arr = grid.point_data.get("ansys_node_num")
if node_nums_arr is None:
node_nums = np.arange(1, n_points + 1, dtype=np.int32)
grid.point_data["ansys_node_num"] = node_nums
else:
node_nums = np.asarray(node_nums_arr, dtype=np.int32)
if node_nums.dtype != np.int32:
grid.point_data["ansys_node_num"] = node_nums
if grid.cell_data.get("ansys_elem_num") is None:
grid.cell_data["ansys_elem_num"] = np.arange(1, n_cells + 1, dtype=np.int32)
if grid.cell_data.get("ansys_elem_type_num") is None:
grid.cell_data["ansys_elem_type_num"] = np.ones(n_cells, dtype=np.int32)
if grid.cell_data.get("ansys_material_type") is None:
grid.cell_data["ansys_material_type"] = np.ones(n_cells, dtype=np.int32)
if grid.cell_data.get("ansys_real_constant") is None:
grid.cell_data["ansys_real_constant"] = np.ones(n_cells, dtype=np.int32)
# Canonicalise + int32: ensures every hot path reads the grid
# in node-number-sorted order with zero-copy numpy views.
grid = _canonicalize_grid(grid)
_force_int32_cells(grid)
m._grid = grid
m._dirty = False
return m
[docs]
@classmethod
def from_lines(
cls,
nodes: np.ndarray,
elements: np.ndarray,
) -> Model:
"""Build a Model from explicit nodes + 2-node line connectivity.
Produces an :class:`pyvista.UnstructuredGrid` with VTK_LINE cells
— the canonical cell type for TRUSS2 / BEAM2 / SPRING. After
construction, attach an element kernel via ``model.assign(...)``.
Parameters
----------
nodes : numpy.ndarray
``(n_points, 3)`` float64 nodal coordinates.
elements : numpy.ndarray
``(n_elements, 2)`` int connectivity — each row is
``(node_i, node_j)`` indexing into ``nodes`` (0-based).
Returns
-------
Model
Native Model with ``ansys_node_num`` and ``ansys_elem_num``
stamped 1-based, ready for :meth:`assign` / :meth:`fix` /
:meth:`apply_force`.
Examples
--------
Two-link axial truss.
>>> import numpy as np
>>> import femorph_solver as fs
>>> from femorph_solver import ELEMENTS
>>> model = fs.Model.from_lines(
... nodes=np.array([[0, 0, 0], [1, 0, 0], [2, 0, 0]], dtype=float),
... elements=np.array([[0, 1], [1, 2]], dtype=int),
... )
>>> model.assign(ELEMENTS.TRUSS2, {"EX": 2.0e11}, real=(1e-4,))
>>> model.n_elements
2
"""
nodes_arr = np.ascontiguousarray(np.asarray(nodes, dtype=np.float64))
elements_arr = np.ascontiguousarray(np.asarray(elements, dtype=np.int64))
if nodes_arr.ndim != 2 or nodes_arr.shape[1] != 3:
raise ValueError(f"nodes must have shape (n_points, 3), got {nodes_arr.shape}")
if elements_arr.ndim != 2 or elements_arr.shape[1] != 2:
raise ValueError(f"elements must have shape (n_elements, 2), got {elements_arr.shape}")
if elements_arr.size and (
elements_arr.min() < 0 or elements_arr.max() >= nodes_arr.shape[0]
):
raise ValueError(
f"element connectivity references node index out of range [0, {nodes_arr.shape[0]})"
)
n_elems = elements_arr.shape[0]
# VTK cell-array layout: per cell ``[n_nodes, node0, node1, ...]``.
cells = np.empty((n_elems, 3), dtype=np.int64)
cells[:, 0] = 2
cells[:, 1:] = elements_arr
ctypes = np.full(n_elems, 3, dtype=np.uint8) # VTK_LINE
grid = pv.UnstructuredGrid(cells.ravel(), ctypes, nodes_arr)
return cls.from_grid(grid)
[docs]
@classmethod
def from_chain(cls, points: np.ndarray) -> Model:
"""Build a 1-D chain Model — sequential 2-node line cells.
Convenience wrapper around :meth:`from_lines` for the common
case of N points connected sequentially: ``(0,1), (1,2), …,
(N-2, N-1)``.
Parameters
----------
points : numpy.ndarray
``(n_points, 3)`` nodal coordinates along the chain.
Returns
-------
Model
Model with ``n_points - 1`` line cells.
"""
pts = np.ascontiguousarray(np.asarray(points, dtype=np.float64))
if pts.ndim != 2 or pts.shape[1] != 3:
raise ValueError(f"points must have shape (n_points, 3), got {pts.shape}")
n = pts.shape[0]
if n < 2:
raise ValueError(f"chain needs at least 2 points, got {n}")
elements = np.column_stack(
[np.arange(n - 1, dtype=np.int64), np.arange(1, n, dtype=np.int64)]
)
return cls.from_lines(pts, elements)
[docs]
@classmethod
def from_points(cls, nodes: np.ndarray) -> Model:
"""Build a Model from explicit nodes as 0-D VTK_VERTEX cells.
Each row of ``nodes`` becomes one single-node cell — the
canonical layout for POINT_MASS (POINT_MASS). Node ids and element
ids are stamped 1-based by :meth:`from_grid`.
Parameters
----------
nodes : numpy.ndarray
``(n_points, 3)`` nodal coordinates.
Returns
-------
Model
Model with ``n_points`` vertex cells.
"""
pts = np.ascontiguousarray(np.asarray(nodes, dtype=np.float64))
if pts.ndim != 2 or pts.shape[1] != 3:
raise ValueError(f"nodes must have shape (n_points, 3), got {pts.shape}")
n = pts.shape[0]
if n == 0:
raise ValueError("from_points needs at least 1 node")
cells = np.empty((n, 2), dtype=np.int64)
cells[:, 0] = 1
cells[:, 1] = np.arange(n, dtype=np.int64)
ctypes = np.full(n, 1, dtype=np.uint8) # VTK_VERTEX
grid = pv.UnstructuredGrid(cells.ravel(), ctypes, pts)
return cls.from_grid(grid)
# ---------------------------------------------------------------------
# Preprocessor verbs — APDL-dialect commands; names mirror the legacy APDL set for users porting a deck.
# ---------------------------------------------------------------------
def _invalidate_topology_caches(self) -> None:
"""Invalidate every derived state that depends on mesh connectivity."""
self._layout_cache = None
self._node_dof_labels_cache = None
self._csr_pattern_cache = None
self._K_cache = None
self._M_cache = None
self._M_lumped_cache = None
def _reset_preprocessor_state(self) -> None:
"""Reset model to a freshly-constructed state.
Used by the dat reader when a deck contains ``/CLEAR`` mid-file
(multi-analysis decks like ``vm81.dat`` re-prep a model after a
first-pass solve). Drops the grid, every staged dict, every
registered ET / material / real, and invalidates derived caches.
"""
self._grid = pv.UnstructuredGrid()
self._etypes.clear()
self._materials.clear()
self._real_constants.clear()
self._current_type = 1
self._current_mat = 1
self._current_real = 1
self._staged_nodes.clear()
self._staged_elements.clear()
self._dirty = False
self._invalidate_topology_caches()
def _invalidate_matrix_cache(self) -> None:
"""Invalidate only the assembled K / M caches — materials/real changed
but connectivity is unchanged so the DOF layout / CSR pattern stay valid.
"""
self._K_cache = None
self._M_cache = None
self._M_lumped_cache = None
[docs]
def release_matrix_cache(self) -> None:
"""Drop the cached assembled K / M matrices.
Each cached matrix is roughly ``nnz × 12 bytes`` — on a
1.2 M-DOF plate with ~100 M nnz that's ~1.2 GB each, so
releasing both saves ~2.4 GB. Call after a ``modal_solve`` /
``static_solve`` finishes and the caller is done with further
analyses on this Model. Subsequent ``stiffness_matrix()`` or
``mass_matrix()`` calls will re-assemble.
"""
self._K_cache = None
self._M_cache = None
self._M_lumped_cache = None
# ---------------------------------------------------------------------
# Private APDL-verb implementations.
# ---------------------------------------------------------------------
# The APDL-dialect preprocessor verbs (``N`` / ``E`` / ``ET`` / ``TYPE`` /
# ``MAT`` / ``REAL`` / ``MP`` / ``R`` / ``D`` / ``F``) used to live on the
# ``Model`` public surface. As of TA-10c-3 they exist only as private
# ``_<verb>_impl`` helpers; :class:`femorph_solver.interop.mapdl.APDL`
# delegates to them. User-facing code on :class:`Model` uses
# :meth:`assign`, :meth:`fix`, and :meth:`apply_force` directly.
def _n_impl(self, num: int, x: float = 0.0, y: float = 0.0, z: float = 0.0) -> None:
self._staged_nodes[int(num)] = (float(x), float(y), float(z))
self._dirty = True
self._invalidate_topology_caches()
def _e_impl(self, *node_nums: int, num: int | None = None) -> int:
if num is not None:
elem_num = int(num)
else:
existing_max = 0
if self._grid.n_cells > 0:
existing_max = int(self._grid.cell_data["ansys_elem_num"].max())
staged_max = max(self._staged_elements, default=0)
elem_num = max(existing_max, staged_max) + 1
self._staged_elements[elem_num] = (
[int(n) for n in node_nums],
self._current_type,
self._current_mat,
self._current_real,
)
self._dirty = True
self._invalidate_topology_caches()
return elem_num
def _et_impl(self, et_id: int, name: str | ElementType) -> None:
name_str = _token(name).upper()
with contextlib.suppress(KeyError):
name_str = ElementType[name_str].value
# When ET, N is *redefined* (vs. first definition) and the new
# kernel takes a different node count than the existing TYPE-N
# elements, drop those stale elements. Multi-prep decks
# without /CLEAR (vm38: PLANE182 4-node first pass, SOLID185
# 8-node second pass on TYPE 1) rely on the deck author's
# intent that the second pass is a fresh model — without this
# purge, the leftover 4-node element fails the new HEX8
# kernel's node-count check at grid materialisation.
prior_name = self._etypes.get(int(et_id))
if prior_name is not None and prior_name != name_str:
self._purge_elements_with_mismatched_etype(int(et_id), name_str)
# Try to validate the kernel exists; if not (e.g. ``PLANE77``
# thermal, ``CONTA178`` contact, ``COMBIN40`` — element-type
# families we don't yet ship a kernel for), still record the
# name in the etype table so subsequent ``E`` / ``EBLOCK``
# calls have a target. The kernel-mismatch error then
# surfaces at grid-materialisation time (with a useful
# "kernel not registered for ELEMENT_TYPE" message) rather
# than at ``ET`` parse time, which lets a deck reader walk
# past unsupported types instead of crashing the parse.
try:
_get_element(name_str)
except KeyError:
self._etypes[int(et_id)] = name_str
self._invalidate_topology_caches()
return
self._etypes[int(et_id)] = name_str
self._invalidate_topology_caches()
def _purge_elements_with_mismatched_etype(self, et_id: int, new_name: str) -> None:
"""Drop elements whose connectivity no longer fits a redefined ET.
Helper for ``_et_impl`` — see its docstring for the deck-author
intent this honours.
"""
from femorph_solver.elements._nonstructural_stubs import ( # noqa: PLC0415
NONSTRUCTURAL_TOPOLOGY,
)
try:
new_cls = _get_element(new_name)
new_n_nodes_set: set[int] = {new_cls.n_nodes}
except KeyError:
stub_table = NONSTRUCTURAL_TOPOLOGY.get(new_name, {})
new_n_nodes_set = set(stub_table.keys())
if not new_n_nodes_set:
# Unknown element type: don't purge, let the rest of the
# parser handle it.
return
stale_staged = [
en
for en, (nns, eid, _, _) in self._staged_elements.items()
if eid == et_id and len(nns) not in new_n_nodes_set
]
for en in stale_staged:
del self._staged_elements[en]
if self._grid.n_cells > 0:
et_ids_arr = np.asarray(self._grid.cell_data["ansys_elem_type_num"])
mask_type = et_ids_arr == et_id
if mask_type.any():
offsets = np.asarray(self._grid.offset, dtype=np.int32)
lens = offsets[1:] - offsets[:-1]
stale_mask = mask_type & ~np.isin(lens, sorted(new_n_nodes_set))
if stale_mask.any():
keep = ~stale_mask
self._grid = self._grid.extract_cells(np.where(keep)[0])
_force_int32_cells(self._grid)
def _type_impl(self, et_id: int) -> None:
self._current_type = int(et_id)
def _mat_impl(self, mat_id: int) -> None:
self._current_mat = int(mat_id)
def _real_impl(self, real_id: int) -> None:
self._current_real = int(real_id)
def _mp_impl(self, prop: str | MaterialProperty, mat_id: int, value: float) -> None:
prop_str = _token(prop).upper()
if not prop_str.startswith("_"):
try:
MaterialProperty(prop_str)
except ValueError as exc:
allowed = ", ".join(m.value for m in MaterialProperty)
raise ValueError(
f"unknown material property {prop!r}; expected one of: {allowed}"
) from exc
self._materials.setdefault(int(mat_id), {})[prop_str] = float(value)
self._invalidate_matrix_cache()
def _r_impl(self, real_id: int, *values: float) -> None:
self._real_constants[int(real_id)] = np.asarray(values, dtype=np.float64)
self._invalidate_matrix_cache()
# ---------------------------------------------------------------------
# Loads / boundary conditions
# ---------------------------------------------------------------------
_DOF_LABELS: dict[str, int] = {
"UX": 0,
"UY": 1,
"UZ": 2,
"ROTX": 3,
"ROTY": 4,
"ROTZ": 5,
}
_FORCE_LABELS: dict[str, int] = {
"FX": 0,
"FY": 1,
"FZ": 2,
"MX": 3,
"MY": 4,
"MZ": 5,
}
def _d_impl(self, node: int, label: str | DOFLabel, value: float = 0.0) -> None:
lbl = _token(label).upper()
if lbl == "TEMP":
# Thermal pillar — store on the parallel temperature-BC dict.
self._temp_bc[int(node)] = float(value)
return
if lbl == "ALL":
labels = self._node_dof_labels().get(int(node), _CANONICAL_DOFS[:3])
for active in labels:
if active == "TEMP":
self._temp_bc[int(node)] = float(value)
else:
self._disp_bc[(int(node), self._DOF_LABELS[active])] = float(value)
return
if lbl not in self._DOF_LABELS:
raise ValueError(f"unknown displacement DOF label {label!r}")
self._disp_bc[(int(node), self._DOF_LABELS[lbl])] = float(value)
def _ddele_impl(self, node: int, label: str | DOFLabel) -> None:
"""MAPDL ``DDELE`` — delete a previously-applied displacement BC.
``label="ALL"`` removes every displacement BC at the node;
any other label removes that single (node, DOF) entry.
Removing a BC that was never set is a no-op (matches MAPDL's
silent behaviour).
"""
lbl = _token(label).upper()
node_int = int(node)
if lbl == "ALL":
for key in list(self._disp_bc):
if key[0] == node_int:
del self._disp_bc[key]
return
if lbl not in self._DOF_LABELS:
raise ValueError(f"unknown displacement DOF label {label!r}")
self._disp_bc.pop((node_int, self._DOF_LABELS[lbl]), None)
def _f_impl(
self,
node: int,
label: str | ForceLabel,
value: float,
*,
accumulate: bool = False,
) -> None:
lbl = _token(label).upper()
if lbl == "HEAT":
# Thermal pillar — heat-flow nodal load.
existing = self._heat_bc.get(int(node), 0.0) if accumulate else 0.0
self._heat_bc[int(node)] = existing + float(value)
return
if lbl not in self._FORCE_LABELS:
raise ValueError(f"unknown force DOF label {label!r}")
key = (int(node), self._FORCE_LABELS[lbl])
if accumulate:
self._force_bc[key] = self._force_bc.get(key, 0.0) + float(value)
else:
self._force_bc[key] = float(value)
def _tref_impl(self, t_ref: float) -> None:
"""MAPDL ``TREF`` — set the stress-free reference temperature."""
self._tref = float(t_ref)
def _bfunif_impl(self, label: str, value: float) -> None:
"""MAPDL ``BFUNIF`` — uniform body force (currently only ``TEMP``)."""
upper = label.strip().upper()
if upper != "TEMP":
raise NotImplementedError(
f"BFUNIF: only label='TEMP' is supported in Phase 1, got {label!r}"
)
self._uniform_temp = float(value)
def _bf_impl(self, node: int, label: str, value: float) -> None:
"""MAPDL ``BF`` — per-node body force (currently only ``TEMP``)."""
upper = label.strip().upper()
if upper != "TEMP":
raise NotImplementedError(
f"BF: only label='TEMP' is supported in Phase 1, got {label!r}"
)
self._node_temps[int(node)] = float(value)
def _node_delta_t(self, node: int) -> float:
"""Return ``T(node) − TREF`` for the thermal-pre-strain assembly.
Per-node ``BF,n,TEMP,t`` overrides the ``BFUNIF,TEMP,t`` global,
which itself overrides the default ``TREF`` (giving ΔT = 0).
"""
node = int(node)
if node in self._node_temps:
return self._node_temps[node] - self._tref
if self._uniform_temp is not None:
return self._uniform_temp - self._tref
return 0.0
def _has_thermal_loading(self) -> bool:
if self._node_temps:
return any(v - self._tref != 0.0 for v in self._node_temps.values())
if self._uniform_temp is not None:
return self._uniform_temp != self._tref
return False
def _cp_impl(
self,
set_id: int,
label: str,
nodes: list[int],
) -> None:
"""MAPDL ``CP`` — couple a set of DOFs to share a single value.
``set_id`` keys the coupling set; ``label`` is the DOF
component (``"UX"``, ``"UY"``, …, ``"ROTZ"``); the **first**
node in ``nodes`` is the master, the rest are slaves. At
solve time the slaves are eliminated via the canonical master-
slave transformation ``K_eff = Tᵀ K T`` (see
:mod:`femorph_solver.solvers.static`).
Calling ``_cp_impl`` again on the same ``set_id`` replaces the
prior set wholesale — matches MAPDL semantics for a re-issued
``CP``.
"""
nodes_list = [int(n) for n in nodes]
if len(nodes_list) < 2:
raise ValueError(
f"CP set {set_id}: needs at least two nodes (master + ≥1 slave), got {nodes_list!r}"
)
upper = label.strip().upper()
if upper not in self._DOF_LABELS:
raise ValueError(f"CP: unknown DOF label {label!r}")
self._cp_sets[int(set_id)] = [(n, upper) for n in nodes_list]
def _coupling_pairs(self, layout: dict[int, tuple[int, list[str]]]) -> list[tuple[int, int]]:
"""Resolve every active coupling set to ``(slave, master)`` global-DOF pairs."""
out: list[tuple[int, int]] = []
for entries in self._cp_sets.values():
if not entries:
continue
master_node, master_lbl = entries[0]
master_dof = self._DOF_LABELS[master_lbl]
master_idx = self._global_index(master_node, master_dof, layout)
for node, lbl in entries[1:]:
slave_dof = self._DOF_LABELS[lbl]
slave_idx = self._global_index(node, slave_dof, layout)
if slave_idx == master_idx:
continue
out.append((slave_idx, master_idx))
return out
def _sfbeam_impl(
self,
elem: int,
face: int,
label: str,
value: float,
*,
accumulate: bool = False,
) -> None:
"""MAPDL ``SFBEAM`` — distributed surface load on a beam element.
Phase 1 supports ``label="PRES"`` only — uniform pressure / line
load distributed along the element axis, applied perpendicular
to the element's local face per the BEAM188 face conventions:
* face 1 → +Y direction
* face 2 → +Z direction
* face 3 → -Y direction
* face 4 → -Z direction
* faces 5 / 6 (axial end cross-sections) — not yet supported.
``accumulate=False`` (the default) replaces any prior load on
the same ``(elem, face, label)`` triple; ``accumulate=True`` adds.
Stored without resolving to nodal forces — the consistent-load
scatter happens at :meth:`_load_vector` time so it sees a
possibly-updated K / M shape.
"""
if label.upper() != "PRES":
raise NotImplementedError(
f"SFBEAM: only label='PRES' is supported in Phase 1, got {label!r}"
)
if face not in (1, 2, 3, 4):
raise NotImplementedError(
f"SFBEAM: only transverse faces 1..4 are supported in Phase 1, got {face!r}"
)
entry = (int(face), label.upper(), float(value))
slot = self._sfbeam_loads.setdefault(int(elem), [])
if accumulate:
slot.append(entry)
else:
# Replace any existing entry on the same (face, label).
slot[:] = [t for t in slot if (t[0], t[1]) != (entry[0], entry[1])]
slot.append(entry)
# ---------------------------------------------------------------------
# Native Python-first API
# ---------------------------------------------------------------------
# :meth:`assign`, :meth:`fix`, and :meth:`apply_force` are the
# primary Model user surface — keyword-driven, predicate-friendly,
# vectorised. Callers porting an APDL deck line-by-line reach for
# :class:`femorph_solver.interop.mapdl.APDL` instead; it wraps the
# private ``_<verb>_impl`` helpers above, and its state writes land
# on the same stores (``_etypes`` / ``_materials`` / ``_disp_bc`` /
# ``_force_bc``) as the native methods, so the two styles mix freely.
[docs]
def assign(
self,
element,
material: dict[str, float] | None = None,
*,
et_id: int = 1,
mat_id: int = 1,
real_id: int | None = None,
real: np.ndarray | tuple[float, ...] | None = None,
) -> None:
"""Declare an element kernel and a material in one call.
Parameters
----------
element :
An :class:`~femorph_solver.ELEMENTS` spec **class**
(default-configured shorthand, e.g. ``ELEMENTS.HEX8``)
or **instance** with named, typed formulation knobs
(``ELEMENTS.HEX8(integration="enhanced_strain")``).
Strings and :class:`~femorph_solver.ElementType`
members are not accepted; reach for
:class:`~femorph_solver.interop.mapdl.APDL` if you
need the string-form preprocessor verbs at the
APDL-deck interop boundary.
material :
Mapping of material properties (``{"EX": ..., "PRXY": ...,
"DENS": ...}``). Values are stamped under ``mat_id``
(default ``1``). Accepts the output of
:func:`femorph_solver.materials.fetch` directly.
et_id, mat_id, real_id :
Numeric ids under which to register the kernel / material /
real constants. Only relevant when a model mixes multiple
element types or materials; most single-physics demos use
the defaults.
real :
Real-constant values (array-like of floats) to stamp under
``real_id``. ``None`` skips the real-constant declaration.
"""
from femorph_solver.elements._specs import is_element_spec # noqa: PLC0415
if not is_element_spec(element):
raise TypeError(
f"Model.assign: `element` must be an ELEMENTS spec class or "
f"instance (e.g. `ELEMENTS.HEX8` or "
f'`ELEMENTS.HEX8(integration="enhanced_strain")`), got '
f"{element!r} of type {type(element).__name__}. Use "
f"`APDL(model).et(...)` from `femorph_solver.interop.mapdl` "
f"if you need string-form preprocessor verbs for APDL-deck porting."
)
spec = element() if isinstance(element, type) else element
self._et_impl(et_id, spec._kernel_name)
if material is not None:
for prop, value in material.items():
# ``_HEX8_INTEGRATION`` etc. are string-valued kernel
# flags — the Model already special-cases them in
# `materials`, so pass them through the dict rather
# than through ``_mp_impl``.
if not isinstance(value, (int, float)):
self.materials.setdefault(mat_id, {})[prop] = value
continue
self._mp_impl(prop, mat_id, float(value))
flags = spec._material_flags()
if flags:
self.materials.setdefault(mat_id, {}).update(flags)
if real is not None:
if real_id is None:
real_id = 1
self._r_impl(real_id, *real)
[docs]
def assign_all(
self,
material: dict[str, float],
*,
mat_id: int = 1,
real_id: int | None = None,
real: np.ndarray | tuple[float, ...] | None = None,
) -> None:
"""Apply ``material`` to every kernel already registered on the model.
Convenience wrapper around :meth:`assign` for foreign-deck flows
where :func:`from_cdb` / :func:`from_dat` has registered N
element types up front and the user wants the same material on
all of them. Walks ``self.etypes`` and updates the material
table without re-registering each ET kernel (which would be
a destructive overwrite under :meth:`assign`'s ``_et_impl``
contract). Cells using kernels not registered in the model
get nothing - no try / except wrapping needed.
Parameters
----------
material :
Mapping of property name to value, same form
:meth:`assign` accepts.
mat_id, real_id, real :
Same meaning as :meth:`assign`.
Notes
-----
Only updates the material (and optionally the real-constant
set). The kernel registrations stay as-is - if the model was
loaded from a CDB that declared HEX20 + TET10 + HEX8 + QUAD4
but only TET10 cells exist, all four ETs keep their kernel
names and any of them can subsequently host elements with the
same material.
"""
for prop, value in material.items():
if not isinstance(value, (int, float)):
self.materials.setdefault(mat_id, {})[prop] = value
continue
self._mp_impl(prop, mat_id, float(value))
if real is not None:
if real_id is None:
real_id = 1
self._r_impl(real_id, *real)
# @overload signatures — let mypy / IDE narrow the call form so the
# caller gets the right hint for either the ``nodes=`` or the
# ``where=`` mode without seeing the union-typed catch-all.
@overload
def fix(
self,
nodes: np.ndarray | list[int] | tuple[int, ...] | int,
*,
where: None = None,
component: None = None,
dof: str | DOFLabel | list[str | DOFLabel] | tuple[str | DOFLabel, ...] = ...,
value: float = ...,
) -> None: ...
@overload
def fix(
self,
nodes: None = None,
*,
where: np.ndarray,
component: None = None,
dof: str | DOFLabel | list[str | DOFLabel] | tuple[str | DOFLabel, ...] = ...,
value: float = ...,
) -> None: ...
@overload
def fix(
self,
nodes: None = None,
*,
where: None = None,
component: str | Sequence[str],
dof: str | DOFLabel | list[str | DOFLabel] | tuple[str | DOFLabel, ...] = ...,
value: float = ...,
) -> None: ...
[docs]
def fix(
self,
nodes: np.ndarray | list[int] | tuple[int, ...] | int | None = None,
*,
where: np.ndarray | None = None,
component: str | Sequence[str] | None = None,
dof: str | DOFLabel | list[str | DOFLabel] | tuple[str | DOFLabel, ...] = "ALL",
value: float = 0.0,
) -> None:
"""Pin nodal DOFs to a prescribed value (Dirichlet boundary condition).
Parameters
----------
nodes :
Single node number, or an array / iterable of node
numbers. Mutually exclusive with ``where`` / ``component``.
where :
Boolean mask of length ``Model.n_nodes`` selecting grid
points to pin. Indexed by VTK point order — the same
order used by ``grid.points``. Mutually exclusive with
``nodes`` / ``component``.
component :
Name (or sequence of names) of a CDB-side named node
component preserved on the Model by :func:`from_cdb`.
The union of every named component's node ids is the pin
set. Raises :class:`KeyError` if any name isn't in the
model's component table. Mutually exclusive with
``nodes`` / ``where``.
dof : str, :class:`~femorph_solver.DOFLabel`, or sequence thereof,
default ``"ALL"``
DOF (or DOFs) to pin. ``"ALL"`` clamps every DOF active at
the selected node(s); a sequence clamps each listed DOF in
turn.
value : float, default ``0.0``
Value to prescribe (the right-hand side in ``K u = F``).
Zero is the lossless case for every analysis type; non-zero
values only round-trip cleanly through :func:`solve_static`.
Examples
--------
Clamp everything at the ``x = 0`` face of a bar::
model.fix(where=grid.points[:, 0] < 1e-9)
Pin a single node's UZ::
model.fix(nodes=42, dof="UZ")
Pin multiple DOFs at once::
model.fix(where=base_mask, dof=["UX", "UY", "UZ"])
Pin every node in a CDB-loaded named component::
model.fix(component="_FIXEDSU", dof="ALL")
model.fix(component=("_FIXEDSU", "FIXED_COMP"), dof="ALL")
"""
n_specified = sum(1 for x in (nodes, where, component) if x is not None)
if n_specified != 1:
raise ValueError("fix: pass exactly one of `nodes=`, `where=`, or `component=`.")
if component is not None:
if isinstance(component, str):
names = (component,)
else:
names = tuple(component)
missing = [n for n in names if n not in self._node_components]
if missing:
available = sorted(self._node_components)
raise KeyError(
f"fix: node component(s) {missing!r} not registered on this Model. "
f"Available: {available}. ``from_cdb`` populates this table from "
"the CDB's node-component records; verify the CDB declares the "
"component you're asking for."
)
ids: list[int] = []
for n in names:
ids.extend(int(i) for i in self._node_components[n].tolist())
nodes_arr = np.asarray(sorted(set(ids)), dtype=np.int64)
elif where is not None:
where = np.asarray(where, dtype=bool)
if where.shape != (self.grid.n_points,):
raise ValueError(
f"fix: `where` must have shape ({self.grid.n_points},), got {where.shape}"
)
node_nums = np.asarray(self.grid.point_data["ansys_node_num"], dtype=np.int64)
nodes_arr = node_nums[where]
elif np.ndim(nodes) == 0:
nodes_arr = np.asarray([int(nodes)], dtype=np.int64)
else:
nodes_arr = np.asarray(nodes, dtype=np.int64)
if isinstance(dof, (list, tuple)):
dofs = list(dof)
else:
dofs = [dof]
for nn in nodes_arr:
for d in dofs:
self._d_impl(int(nn), d, value)
# @overload signatures — narrow the call form so callers get the
# right hint for either the ``node=`` or the ``where=`` mode.
@overload
def apply_force(
self,
node: int | np.ndarray | list[int] | tuple[int, ...],
*,
where: None = None,
fx: float = ...,
fy: float = ...,
fz: float = ...,
mx: float = ...,
my: float = ...,
mz: float = ...,
total: bool = ...,
accumulate: bool = ...,
) -> None: ...
@overload
def apply_force(
self,
node: None = None,
*,
where: np.ndarray,
fx: float = ...,
fy: float = ...,
fz: float = ...,
mx: float = ...,
my: float = ...,
mz: float = ...,
total: bool = ...,
accumulate: bool = ...,
) -> None: ...
[docs]
def apply_force(
self,
node: int | np.ndarray | list[int] | tuple[int, ...] | None = None,
*,
where: np.ndarray | None = None,
fx: float = 0.0,
fy: float = 0.0,
fz: float = 0.0,
mx: float = 0.0,
my: float = 0.0,
mz: float = 0.0,
total: bool = False,
accumulate: bool = False,
) -> None:
"""Apply a keyword-driven nodal force / moment vector.
Sparse by construction — zero-valued components are skipped.
Any of the six :class:`~femorph_solver.ForceLabel` entries can
be supplied as keyword arguments; unused ones stay zero (and
unregistered on the Model).
Parameters
----------
node :
Node number, or an array / iterable of node numbers.
The same per-component value is applied to every selected
node. Mutually exclusive with ``where``.
where :
Boolean mask of length ``Model.n_nodes`` selecting grid
points to load — same VTK point order as :meth:`fix`'s
``where``. Mutually exclusive with ``node``.
fx, fy, fz :
Translational force components (units inherited from the
Model's :attr:`unit_system`).
mx, my, mz :
Moment components. Relevant for elements with rotational
DOFs (``BEAM2`` / ``QUAD4_SHELL`` / etc.).
total :
``False`` (default): the supplied component values are the
per-node load — every selected node gets ``fx``, ``fy``,
``fz``, ... in full.
``True``: the supplied values are interpreted as the
*total* load to distribute equally across the selection,
i.e. each node receives ``fx / N``, ``fy / N``, ... where
``N`` is the selection count. Convenient for "10 kN tip
load spread over the tip face" patterns.
accumulate :
``False`` (default): the supplied components **overwrite**
any previously-applied values at this ``(node, label)``
pair (mirrors the APDL ``F`` semantics + the typical
"build-and-replace" workflow).
``True``: the components **add** to the existing values —
the natural semantics for distributing a traction or
pressure across multiple element segments that share
nodes (multiple NASTRAN ``FORCE`` cards on the same GRID,
multiple Abaqus ``*CLOAD`` rows, segmented integration of
a traction-edge integral).
Examples
--------
Distribute a 10 kN downward tip load equally across the tip
face of the bundled bladed-rotor sector.
>>> import numpy as np
>>> import femorph_solver as fs
>>> model = fs.Model.from_pv(fs.examples.cyclic_bladed_rotor_sector_path())
>>> pts = np.asarray(model.grid.points)
>>> tip_mask = pts[:, 2] > pts[:, 2].max() - 1e-6
>>> model.apply_force(where=tip_mask, fz=-10_000.0, total=True)
>>> int(model.loads.n_applied) > 0
True
Pin a single node's downward force without the boolean mask::
model.apply_force(node=42, fz=-1.0)
"""
if (node is None) == (where is None):
raise ValueError("apply_force: pass exactly one of `node=` or `where=`.")
if where is not None:
where = np.asarray(where, dtype=bool)
if where.shape != (self.grid.n_points,):
raise ValueError(
f"apply_force: `where` must have shape ({self.grid.n_points},), "
f"got {where.shape}"
)
node_nums = np.asarray(self.grid.point_data["ansys_node_num"], dtype=np.int64)
nodes_arr = node_nums[where]
elif np.ndim(node) == 0:
nodes_arr = np.asarray([int(node)], dtype=np.int64)
else:
nodes_arr = np.asarray(node, dtype=np.int64)
n_sel = int(nodes_arr.size)
if n_sel == 0:
return
if total:
scale = 1.0 / n_sel
fx, fy, fz, mx, my, mz = (c * scale for c in (fx, fy, fz, mx, my, mz))
components = {"FX": fx, "FY": fy, "FZ": fz, "MX": mx, "MY": my, "MZ": mz}
for nn in nodes_arr:
for label, value in components.items():
if value != 0.0:
self._f_impl(int(nn), label, float(value), accumulate=accumulate)
# ---------------------------------------------------------------------
# Grid materialisation
# ---------------------------------------------------------------------
def _rebuild_grid(self) -> None:
"""Merge the existing grid with any staged ``N`` / ``E`` additions
and emit a fresh node-number-sorted grid with int32 cell structure.
The grid is the Model's sole permanent storage for geometry and
connectivity; the ``_staged_*`` dicts only hold the deltas since
the last rebuild and are cleared here. Merging via dicts (keyed
on node number) honours overwrite semantics — a later ``n()``
with an existing number replaces the grid's coordinate.
"""
# Pull existing grid rows back into dicts, then overlay staged
# additions (overwrite wins). Cheap because the grid is already
# numpy; zip into a {int: tuple} dict for the bulk merge path.
nodes: dict[int, tuple[float, float, float]] = {}
if self._grid.n_points > 0:
pts = np.asarray(self._grid.points, dtype=np.float64)
nums = np.asarray(self._grid.point_data["ansys_node_num"], dtype=np.int32)
for nn, pt in zip(nums, pts):
nodes[int(nn)] = (float(pt[0]), float(pt[1]), float(pt[2]))
nodes.update(self._staged_nodes)
elements: dict[int, tuple[list[int], int, int, int]] = {}
if self._grid.n_cells > 0:
elem_nums_existing = np.asarray(self._grid.cell_data["ansys_elem_num"], dtype=np.int32)
et_existing = np.asarray(self._grid.cell_data["ansys_elem_type_num"], dtype=np.int32)
mat_existing = np.asarray(self._grid.cell_data["ansys_material_type"], dtype=np.int32)
real_existing = np.asarray(self._grid.cell_data["ansys_real_constant"], dtype=np.int32)
pt_nums_existing = np.asarray(self._grid.point_data["ansys_node_num"], dtype=np.int32)
conn_flat = np.asarray(self._grid.cell_connectivity, dtype=np.int32)
offsets = np.asarray(self._grid.offset, dtype=np.int32)
for i, en in enumerate(elem_nums_existing):
vtk_ids = conn_flat[offsets[i] : offsets[i + 1]]
elements[int(en)] = (
[int(pt_nums_existing[j]) for j in vtk_ids],
int(et_existing[i]),
int(mat_existing[i]),
int(real_existing[i]),
)
elements.update(self._staged_elements)
if not nodes:
self._grid = pv.UnstructuredGrid()
self._staged_nodes.clear()
self._staged_elements.clear()
self._dirty = False
return
# Node numbers are 1-based and may be sparse; map to contiguous
# VTK point indices by sorted node number.
node_nums = np.fromiter(sorted(nodes), dtype=np.int32)
points = np.array([nodes[n] for n in node_nums], dtype=np.float64)
num_to_idx = {n: i for i, n in enumerate(node_nums)}
if elements:
elem_nums = np.fromiter(sorted(elements), dtype=np.int32)
cells_flat: list[int] = []
cell_types: list[int] = []
etype_nums = np.empty(len(elem_nums), dtype=np.int32)
mat_ids = np.empty(len(elem_nums), dtype=np.int32)
real_ids = np.empty(len(elem_nums), dtype=np.int32)
for i, en in enumerate(elem_nums):
nns, et_id, mat_id, real_id = elements[int(en)]
if et_id not in self._etypes:
raise KeyError(f"element {en} uses TYPE={et_id} but no ET is defined for it")
cls = self._kernel_for(et_id, len(nns))
cell_types.append(cls.vtk_cell_type)
cells_flat.append(len(nns))
for nn in nns:
if nn not in num_to_idx:
raise KeyError(f"element {en} references undefined node {nn}")
cells_flat.append(num_to_idx[nn])
etype_nums[i] = et_id
mat_ids[i] = mat_id
real_ids[i] = real_id
grid = pv.UnstructuredGrid(
np.array(cells_flat, dtype=np.int64),
np.array(cell_types, dtype=np.uint8),
points,
)
grid.cell_data["ansys_elem_num"] = elem_nums
grid.cell_data["ansys_elem_type_num"] = etype_nums
grid.cell_data["ansys_material_type"] = mat_ids
grid.cell_data["ansys_real_constant"] = real_ids
else:
grid = pv.UnstructuredGrid()
grid.points = points
grid.point_data["ansys_node_num"] = node_nums
_force_int32_cells(grid)
self._grid = grid
self._staged_nodes.clear()
self._staged_elements.clear()
self._dirty = False
# ---------------------------------------------------------------------
# Global DOF layout + sparse K/M assembly
# ---------------------------------------------------------------------
# Degenerate-topology aliases for kernels whose underlying foreign-deck ET id
# spans multiple shapes — e.g. ``HEX20`` covers HEX20 + WEDGE15 +
# PYR13. mapdl-archive (and VTK) splits such elements by cell type /
# node count; this table picks the right neutral kernel without forcing
# callers to spell the degenerate name.
_DEGENERATE_ALIASES: dict[str, dict[int, str]] = {
"HEX20": {20: "HEX20", 15: "WEDGE15", 13: "PYR13"},
# SHELL281 (8-node Q8 shell) is sometimes used in decks with
# collapsed connectivity (4 corner nodes only — i.e. behaves
# as a 4-node Q4 shell). Map the 4-node case to QUAD4_SHELL
# so VM6 / vm17 / vm26 / vm34 / vm54 (degenerate Q8) load.
"QUAD8_SHELL": {8: "QUAD8_SHELL", 4: "QUAD4_SHELL"},
}
def _kernel_for(self, et_id: int, n_nodes: int) -> type[ElementBase]:
"""Resolve the element class for a given ET id + actual node count.
A single foreign-deck ET (e.g. MAPDL 186 → HEX20) can correspond to multiple shapes once
corner nodes are collapsed; the connectivity length in the grid
disambiguates them.
For non-structural element families (thermal / contact / coupled-field
— see ``elements/_nonstructural_stubs.py``) we don't yet ship real
physics kernels. The ``NONSTRUCTURAL_TOPOLOGY`` table maps each
such name + observed ``n_nodes`` to a topology-only stub so the grid
materialises cleanly; the assembler then skips them when summing
K / M because the stubs return zero matrices and carry
``is_structural = False``.
"""
from femorph_solver.elements._nonstructural_stubs import ( # noqa: PLC0415
NONSTRUCTURAL_TOPOLOGY,
)
name = self._etypes[int(et_id)]
aliases = self._DEGENERATE_ALIASES.get(name)
if aliases is not None:
alt = aliases.get(int(n_nodes))
if alt is not None:
return _get_element(alt)
try:
cls = _get_element(name)
except KeyError:
stub_table = NONSTRUCTURAL_TOPOLOGY.get(name)
if stub_table is not None:
stub_name = stub_table.get(int(n_nodes))
if stub_name is not None:
return _get_element(stub_name)
raise
if n_nodes != cls.n_nodes:
raise ValueError(
f"{name} expects {cls.n_nodes} nodes per element but an element "
f"with {n_nodes} nodes was recorded; no degenerate kernel known"
)
return cls
def _sorted_node_nums(self) -> np.ndarray:
# After ``from_grid`` / ``_rebuild_grid`` the grid is guaranteed
# node-number-sorted, so the ``point_data`` view is the answer verbatim
# (zero copy). Fall back through ``.grid`` if the model is dirty.
g = self.grid
if g.n_points == 0:
return np.empty(0, dtype=np.int32)
return np.asarray(g.point_data["ansys_node_num"], dtype=np.int32)
[docs]
def node_coord(self, node_num: int) -> tuple[float, float, float]:
"""Return ``(x, y, z)`` for the given node number.
The grid is the only permanent storage, so single-node lookups
aren't O(1) the way the old ``_nodes[nn]`` dict lookup was —
but callers that do this in a hot loop should lift the full
``grid.points`` array via :meth:`Model.grid` instead.
Parameters
----------
node_num : int
One-based ANSYS node number.
Returns
-------
tuple of (float, float, float)
``(x, y, z)`` coordinates of the node.
Raises
------
KeyError
If no node with that number exists on the model.
"""
g = self.grid
nums = np.asarray(g.point_data["ansys_node_num"])
# ``nums`` is node-number-sorted after canonicalisation, so searchsorted
# is O(log n) and matches the dict's effective lookup cost.
idx = np.searchsorted(nums, node_num)
if idx >= nums.size or int(nums[idx]) != int(node_num):
raise KeyError(f"node {node_num} not in model")
pt = g.points[idx]
return (float(pt[0]), float(pt[1]), float(pt[2]))
[docs]
def element_info(self, elem_num: int) -> tuple[list[int], int, int, int]:
"""Return ``(node_nums, et_id, mat_id, real_id)`` for an element.
Convenience accessor for the stress-recovery / strain-export
paths that previously reached into ``model._elements[en]``. O(log n)
lookup; lift ``grid.cell_data`` / ``grid.cell_connectivity`` yourself
for bulk iteration.
Parameters
----------
elem_num : int
One-based ANSYS element number.
Returns
-------
tuple
``(node_nums, et_id, mat_id, real_id)`` where ``node_nums``
is a list of one-based node numbers in connectivity order
and the other three are the element-type, material, and
real-constant ids.
Raises
------
KeyError
If no element with that number exists on the model.
"""
g = self.grid
elem_nums = np.asarray(g.cell_data["ansys_elem_num"])
idx = np.searchsorted(elem_nums, elem_num)
if idx >= elem_nums.size or int(elem_nums[idx]) != int(elem_num):
raise KeyError(f"element {elem_num} not in model")
offsets = np.asarray(g.offset, dtype=np.int32)
conn = np.asarray(g.cell_connectivity, dtype=np.int32)
pt_nums = np.asarray(g.point_data["ansys_node_num"], dtype=np.int32)
vtk_ids = conn[offsets[idx] : offsets[idx + 1]]
nns = [int(pt_nums[j]) for j in vtk_ids]
return (
nns,
int(g.cell_data["ansys_elem_type_num"][idx]),
int(g.cell_data["ansys_material_type"][idx]),
int(g.cell_data["ansys_real_constant"][idx]),
)
def _grid_element_arrays(
self,
) -> tuple[
np.ndarray, # elem_nums (n_elem,) int32
np.ndarray, # n_nodes per elem (n_elem,) int32
np.ndarray, # offsets into cell_connectivity (n_elem + 1,) int32
np.ndarray, # cell_connectivity flat (total_nns,) int32 — VTK point indices
np.ndarray, # et_ids (n_elem,) int32
np.ndarray, # mat_ids (n_elem,) int32
np.ndarray, # real_ids (n_elem,) int32
np.ndarray, # point_nums (n_points,) int32 node number per VTK point
np.ndarray, # points (n_points, 3) float64
]:
"""Bulk numpy view of the grid for the assembly / strain hot paths.
Every array is a zero-copy view onto the grid's internal buffers
(assuming the grid has been canonicalised + int32-coerced) —
no dict iteration, no per-element Python lookups. Centralised
so the hot paths don't each reach into the pyvista API.
"""
g = self.grid
offsets = np.asarray(g.offset, dtype=np.int32)
conn = np.asarray(g.cell_connectivity, dtype=np.int32)
n_nodes_per_elem = np.diff(offsets).astype(np.int32, copy=False)
return (
np.asarray(g.cell_data["ansys_elem_num"], dtype=np.int32),
n_nodes_per_elem,
offsets,
conn,
np.asarray(g.cell_data["ansys_elem_type_num"], dtype=np.int32),
np.asarray(g.cell_data["ansys_material_type"], dtype=np.int32),
np.asarray(g.cell_data["ansys_real_constant"], dtype=np.int32),
np.asarray(g.point_data["ansys_node_num"], dtype=np.int32),
np.asarray(g.points, dtype=np.float64),
)
def _node_dof_labels(self) -> dict[int, tuple[str, ...]]:
"""Return ``{node: (dof_label, ...)}`` in canonical DOF order.
The DOF set for a node is the union of ``dof_labels`` across every
element that touches it. Nodes not referenced by any element default
to the three translational DOFs so unconnected nodes still fit the
global layout.
Tolerant of partial state: if a caller stages an element
referencing an ET that hasn't been declared yet (via
``APDL(model).et(...)``), the element is simply skipped here —
``_rebuild_grid`` is the method that enforces the ET invariant
at solve time. Walks the grid + the staged buffers directly
without forcing a rebuild, so pre-rebuild introspection doesn't
crash on intentionally-partial models.
"""
if self._node_dof_labels_cache is not None:
return self._node_dof_labels_cache
default = _CANONICAL_DOFS[:3]
per_node: dict[int, set[str]] = {}
# Walk the grid (authoritative committed state).
if self._grid.n_points > 0:
pt_nums = np.asarray(self._grid.point_data["ansys_node_num"], dtype=np.int32)
for n in pt_nums:
per_node[int(n)] = set()
if self._grid.n_cells > 0:
offsets = np.asarray(self._grid.offset, dtype=np.int32)
conn = np.asarray(self._grid.cell_connectivity, dtype=np.int32)
et_ids = np.asarray(self._grid.cell_data["ansys_elem_type_num"], dtype=np.int32)
pt_nums = np.asarray(self._grid.point_data["ansys_node_num"], dtype=np.int32)
for i in range(self._grid.n_cells):
et_id = int(et_ids[i])
if et_id not in self._etypes:
continue
vtk_ids = conn[offsets[i] : offsets[i + 1]]
cls = self._kernel_for(et_id, vtk_ids.size)
for j in vtk_ids:
per_node[int(pt_nums[j])].update(cls.dof_labels)
# Overlay staged incremental additions (not yet merged into the grid).
for nn in self._staged_nodes:
per_node.setdefault(int(nn), set())
for nns, et_id, _, _ in self._staged_elements.values():
if et_id not in self._etypes:
continue
cls = self._kernel_for(et_id, len(nns))
for nn in nns:
per_node.setdefault(int(nn), set()).update(cls.dof_labels)
out: dict[int, tuple[str, ...]] = {}
for nn, s in per_node.items():
out[nn] = tuple(lbl for lbl in _CANONICAL_DOFS if lbl in s) or default
self._node_dof_labels_cache = out
return out
def _dof_layout(
self,
) -> tuple[np.ndarray, dict[int, tuple[int, tuple[str, ...]]], int]:
"""Return ``(sorted_node_nums, {node: (offset, labels)}, N)``.
``offset`` is the first global DOF index for a node; ``labels`` is its
DOF-label tuple in canonical order. Cached across calls; invalidated
by any structural mutation (n, e, et).
"""
if self._layout_cache is not None:
return self._layout_cache
node_nums = self._sorted_node_nums()
node_dofs = self._node_dof_labels()
layout: dict[int, tuple[int, tuple[str, ...]]] = {}
offset = 0
for nn in node_nums:
labels = node_dofs[int(nn)]
layout[int(nn)] = (offset, labels)
offset += len(labels)
self._layout_cache = (node_nums, layout, offset)
return self._layout_cache
[docs]
def dof_map(self) -> np.ndarray:
"""Return the per-row ``(node_num, local_dof_idx)`` map.
Row ``i`` of :meth:`stiffness_matrix` and :meth:`mass_matrix`
corresponds to ``dof_map()[i]``. ``local_dof_idx`` is the
0-based position in the canonical DOF order
``(UX, UY, UZ, ROTX, ROTY, ROTZ)``. Nodes only get rows for
the DOFs the elements at them actually need, so a truss model
stays 3-DOF per node while a beam model gets 6.
Returns
-------
numpy.ndarray of int32, shape ``(N, 2)``
Per-DOF ``(node_num, local_dof_idx)`` rows. ``N`` is the
total free-DOF count of the model.
See Also
--------
stiffness_matrix : the K matrix this map indexes.
mass_matrix : the M matrix this map indexes.
"""
node_nums, layout, total = self._dof_layout()
out = np.empty((total, 2), dtype=np.int32)
for nn in node_nums:
offset, labels = layout[int(nn)]
for k, lbl in enumerate(labels):
out[offset + k] = (int(nn), self._DOF_LABELS[lbl])
return out
def _assemble(
self,
which: str,
lumped: bool = False,
*,
full_to_free: np.ndarray | None = None,
triu_only: bool = False,
pattern: tuple[np.ndarray, np.ndarray] | None = None,
) -> _sp.csr_array:
"""Assemble the global K or M matrix.
``full_to_free`` (optional): length-N ``int32`` array mapping
each full-system DOF index to its reduced-system index
(-1 for fixed DOFs that should be dropped entirely). When
supplied, returned matrix has shape ``(n_free, n_free)`` and
is built directly in the reduced numbering — the full-system
K/M are never materialised, which roughly halves peak
assembly RSS on BC-constrained problems.
``triu_only`` drops entries below the diagonal during the scatter
so the result is upper-triangular (diagonal + strictly-upper).
Lets callers feed MKL Pardiso's SPD Cholesky factor directly
without a separate ``scipy.sparse.triu`` extraction step —
halves the CSR's nnz footprint and skips one ``~n_free × 12 B``
transient copy. The returned matrix is tagged
``.femorph_triu = True`` so downstream solvers can detect the
storage convention.
``pattern=(row_ptr, col_idx)`` skips the symbolic pattern build
and scatters into the supplied arrays directly. Lets the caller
share indptr + indices between K and M (they're both keyed on
the same element connectivity + reduced-DOF mask, so the
sparsity structures are identical when both are assembled with
the same ``triu_only`` / ``full_to_free`` flags). The returned
matrix aliases the supplied arrays — modifying them after the
fact mutates the returned CSR.
"""
_ = self.grid # triggers _rebuild_grid if dirty (also validates nodes/types)
_, layout, N = self._dof_layout()
if full_to_free is None:
N_out = N
else:
if full_to_free.shape[0] != N:
raise ValueError(f"full_to_free length {full_to_free.shape[0]} != N={N}")
# Maximum free index + 1 is the reduced dimension.
N_out = int(full_to_free.max()) + 1 if N > 0 else 0
if N == 0 or self._grid.n_cells == 0:
return _sp.csr_array((N_out, N_out), dtype=np.float64)
# Grid-backed numpy views of the element table. ``conn`` holds
# VTK point indices (0-based, contiguous after canonicalisation);
# ``pt_nums`` maps them back to node numbers when we need
# them for DOF-layout lookup. ``points`` is indexed by VTK
# point index directly, so per-group coord gather is a single
# fancy-index (no ``searchsorted``).
(
_elem_nums,
n_nodes_per_elem,
offsets,
conn,
et_ids,
mat_ids,
real_ids,
pt_nums,
points,
) = self._grid_element_arrays()
# Group by (et_id, n_nodes, material-VALUE, real-VALUE) — NOT
# raw ``mat_id`` / ``real_id``. Decks like ``ptr.cdb`` assign a
# unique ``mat_id`` per cell even when every entry holds
# identical material properties; keying on the ID value fans out
# 5000+ singleton groups and defeats ``ke_batch`` / ``me_batch``.
# Keying on the hashed property tuple collapses it to the real
# element-type groups (4 on PTR: 3069 hex + 497 pyramid +
# 19 wedge + 1581 tet) and restores batching.
# ``n_nodes`` joins the key so degenerate topologies under a
# shared foreign-deck ET id (e.g. MAPDL 186 → HEX20 / WEDGE15 / PYR13) land
# in distinct groups.
mat_cache: dict[int, tuple] = {}
real_cache: dict[int, tuple] = {}
def _mat_key(mid: int) -> tuple:
key = mat_cache.get(mid)
if key is None:
props = self._materials.get(mid, {})
# Sort items so dict-ordering doesn't split identical
# materials across groups.
key = tuple(sorted(props.items()))
mat_cache[mid] = key
return key
def _real_key(rid: int) -> tuple:
key = real_cache.get(rid)
if key is None:
vals = self._real_constants.get(rid, np.empty(0, dtype=np.float64))
# ``tobytes`` is O(n_vals) but n_vals is tiny (≤ 10 for
# any real-constant set).
key = (vals.shape, np.ascontiguousarray(vals).tobytes())
real_cache[rid] = key
return key
# Key -> (et_id, n_nodes, representative_mat_id,
# representative_real_id, cell_idx_list). Keep a
# representative ``mat_id`` / ``real_id`` for each group so we
# can look up the material dict / real-constant array below.
groups: dict[tuple, tuple[int, int, int, int, list[int]]] = {}
for i in range(et_ids.size):
mid = int(mat_ids[i])
rid = int(real_ids[i])
key = (int(et_ids[i]), int(n_nodes_per_elem[i]), _mat_key(mid), _real_key(rid))
entry = groups.get(key)
if entry is None:
groups[key] = (int(et_ids[i]), int(n_nodes_per_elem[i]), mid, rid, [i])
else:
entry[4].append(i)
# Per-group element matrices and DOF maps. ``dofs_per_elem`` is
# int32 — what the C++ scatter and scipy CSR both want.
group_dofs: list[np.ndarray] = []
group_mats: list[np.ndarray] = []
for et_id, nn_per_elem, mat_id, real_id, cell_idx_list in groups.values():
cls = self._kernel_for(et_id, nn_per_elem)
material = self._materials.get(mat_id, {})
real = self._real_constants.get(real_id, np.empty(0, dtype=np.float64))
cell_idx = np.asarray(cell_idx_list, dtype=np.int32)
# (n_grp, nn_per_elem) VTK point indices for each element in
# the group. ``offsets[cell_idx][:, None] + arange(nn)`` is a
# vectorised gather over the flat connectivity.
base = offsets[cell_idx][:, None]
per_elem_cols = np.arange(nn_per_elem, dtype=np.int32)[None, :]
elem_vtk_ids = conn[base + per_elem_cols] # (n_grp, nn_per_elem)
coords_arr = points[elem_vtk_ids] # (n_grp, nn_per_elem, 3) — zero dict hits
if which == "K":
batch_fn = getattr(cls, "ke_batch", None)
mats = batch_fn(coords_arr, material, real) if batch_fn else None
if mats is None:
mats = np.stack(
[cls.ke(coords_arr[i], material, real) for i in range(cell_idx.size)]
)
else:
batch_fn = getattr(cls, "me_batch", None)
mats = batch_fn(coords_arr, material, real, lumped=lumped) if batch_fn else None
if mats is None:
mats = np.stack(
[
cls.me(coords_arr[i], material, real, lumped=lumped)
for i in range(cell_idx.size)
]
)
# Per-element DOF indices. Layout is keyed by node
# number, so translate through ``pt_nums`` once per unique
# node touched by this group — cheaper than doing it per
# (elem, node) pair.
unique_vtk, inverse = np.unique(elem_vtk_ids.ravel(), return_inverse=True)
unique_mapdl = pt_nums[unique_vtk]
# Fast path: homogeneous DOF labels across every node in the
# group. Holds whenever a node is touched only by elements
# sharing the same ``dof_labels`` — in particular, the common
# case where the whole model is one element kind (HEX8,
# BEAM2, etc.). At 300×300×2 HEX8 (271k nodes × 3
# labels per K/M assembly) the vectorised path replaces
# ~800k Python iterations with two numpy ops, roughly
# 200 ms → 5 ms per assembly call. When labels vary per
# node (mixed-element models where a node sits between a
# HEX8 and a BEAM2 group) we fall back to the
# per-node lookup below.
first_labels = layout[int(unique_mapdl[0])][1]
homogeneous = True
for nn in unique_mapdl[1:]:
if layout[int(nn)][1] != first_labels:
homogeneous = False
break
if homogeneous:
bases = np.fromiter(
(layout[int(nn)][0] for nn in unique_mapdl),
dtype=np.int32,
count=unique_mapdl.size,
)
label_offsets = np.array(
[first_labels.index(lbl) for lbl in cls.dof_labels],
dtype=np.int32,
)
node_dofs = bases[:, None] + label_offsets[None, :]
else:
node_dofs = np.empty((unique_vtk.size, cls.n_dof_per_node), dtype=np.int32)
for u_idx, nn in enumerate(unique_mapdl):
layout_base, labels = layout[int(nn)]
for k, lbl in enumerate(cls.dof_labels):
node_dofs[u_idx, k] = layout_base + labels.index(lbl)
dofs_per_elem = node_dofs[inverse].reshape(
cell_idx.size, nn_per_elem * cls.n_dof_per_node
)
if full_to_free is not None:
# Remap to reduced numbering; fixed DOFs become -1 and the
# C++ scatter drops their rows + cols entirely.
dofs_per_elem = full_to_free[dofs_per_elem]
group_dofs.append(np.ascontiguousarray(dofs_per_elem, dtype=np.int32))
group_mats.append(np.ascontiguousarray(mats, dtype=np.float64))
# Build (or reuse) the symbolic CSR pattern from every group's
# connectivity. When the pattern is for the full system it's
# cached for reuse across K and M (geometry-only). For the
# reduced path we rebuild per call — the mask is a property of
# the BC set, not the mesh, and the cost is dominated by the
# factor anyway. ``triu_only`` always bypasses the cache — the
# triu pattern is K-specific.
if pattern is not None:
row_ptr, col_idx = pattern
elif full_to_free is None and not triu_only:
row_ptr, col_idx = self._get_or_build_csr_pattern(group_dofs, N)
else:
row_ptr, col_idx = self._build_csr_pattern(group_dofs, N_out, triu_only=triu_only)
data = np.zeros(col_idx.shape[0], dtype=np.float64)
for dofs_pe, mats in zip(group_dofs, group_mats):
_core.scatter_into_csr(
dofs_pe, mats, row_ptr, col_idx, data, N_out, triu_only=triu_only
)
# The standard ``_sp.csr_array((data, col_idx, row_ptr), ...)``
# constructor runs ``check_format`` which copies ``data`` and
# ``col_idx`` into fresh arrays — a transient 2× spike that
# shows up as ~90 MB of avoidable peak RSS on a 200×200×2
# triu HEX8 plate. We've just populated the arrays with
# the C++ scatter so we know they're valid (sorted column
# indices per row from ``build_csr_pattern``, matching dtype,
# contiguous) — construct the csr_array by assigning to the
# private attributes directly so scipy skips the copies.
out = _sp.csr_array.__new__(_sp.csr_array)
out._shape = (N_out, N_out)
out.indptr = row_ptr
out.indices = col_idx
out.data = data
# Required by scipy's internal caches / maxprint:
out._maxprint = 50
if triu_only:
# Tag so Pardiso / ARPACK can skip their own triu extraction and
# avoid calling K @ x (which would only compute the triu-product).
out.femorph_triu = True
return out
def _build_csr_pattern(
self, group_dofs: list[np.ndarray], N: int, *, triu_only: bool = False
) -> tuple[np.ndarray, np.ndarray]:
"""Symbolic CSR pattern from element-DOF groups, no caching.
Supports ``-1`` sentinels in ``group_dofs`` (the reduced-assembly
path uses them for filtered-out fixed DOFs); the C++ builder and
the scipy fallback both ignore them.
``triu_only=True`` drops entries below the diagonal so the result
is upper-triangular — see :meth:`_assemble` for why that's useful.
"""
if len(group_dofs) == 1:
return _core.build_csr_pattern(group_dofs[0], N, triu_only=triu_only)
# Mixed ``ndof_per_elem`` across groups: pad each group's per-row
# DOF array up to the max with ``-1`` sentinels, stack, then hand
# the merged block to the single-group C++ builder. The kernel
# already treats ``-1`` entries as fixed-and-dropped (matches
# the reduced-assembly contract), so padded slots are ignored
# with zero extra bookkeeping.
#
# The cost of the extra "wasted" cells is small: on ``ptr.cdb``
# padding 1581 TET10 rows (10 nodes × 3 DOF = 30) up to 60
# plus 497 PYR13 rows (13 × 3 = 39) adds ~50 k cells to a
# 5166-row block. In exchange we skip the scipy ``coo → csr``
# fallback (which materialised ~1.47 M COO entries, sorted them
# and deduplicated with a Python-level unique) AND pick up the
# OpenMP row-parallel speedup in the C++ builder. Net on PTR:
# ~500 ms → ~120 ms pattern build at 4 threads.
max_ndof = max(int(g.shape[1]) for g in group_dofs)
n_elem_total = sum(int(g.shape[0]) for g in group_dofs)
padded = np.full((n_elem_total, max_ndof), -1, dtype=np.int32)
off = 0
for g in group_dofs:
n, nd = g.shape
padded[off : off + n, :nd] = g
off += n
return _core.build_csr_pattern(padded, N, triu_only=triu_only)
def _get_or_build_csr_pattern(
self, group_dofs: list[np.ndarray], N: int
) -> tuple[np.ndarray, np.ndarray]:
cached = self._csr_pattern_cache
if cached is not None and cached[0] == N:
return cached[1], cached[2]
row_ptr, col_idx = self._build_csr_pattern(group_dofs, N)
self._csr_pattern_cache = (N, row_ptr, col_idx)
return row_ptr, col_idx
[docs]
def stiffness_matrix(self) -> _sp.csr_array:
"""Assemble the global stiffness matrix.
Loops every element in :attr:`grid`, dispatches to the per-
element kernel for the element-type / KEYOPT pair, and scatters
each :math:`K_e` block into a single sparse global :math:`K`
with the DOF layout returned by :meth:`dof_map`.
Cached on the Model: repeat calls on an unchanged Model skip
the assembly phase entirely. The cache is invalidated by any
structural mutation (``n`` / ``e`` / ``et``) or material
mutation (``mp`` / ``r``).
Returns
-------
scipy.sparse.csr_array
Symmetric positive-(semi)definite stiffness matrix of
shape ``(N, N)`` where ``N`` is the total free-DOF count.
Row / column ``i`` corresponds to ``dof_map()[i]``.
See Also
--------
mass_matrix : assembles the mass matrix with the same DOF layout.
dof_map : per-row ``(node, dof_label_index)`` mapping.
Examples
--------
Load the bundled rotor sector and inspect the assembled K.
>>> import femorph_solver as fs
>>> model = fs.Model.from_pv(fs.examples.cyclic_bladed_rotor_sector_path())
>>> K = model.stiffness_matrix()
>>> K.shape
(690, 690)
>>> K.nnz > 0
True
"""
if self._K_cache is not None and not self._dirty:
return self._K_cache
K = self._assemble("K")
self._K_cache = K
return K
[docs]
def mass_matrix(self, lumped: bool = False) -> _sp.csr_array:
"""Assemble the global mass matrix.
Companion of :meth:`stiffness_matrix` — same DOF layout, same
caching contract. Consistent and lumped variants are cached
independently so a parametric sweep can mix the two without
re-paying for assembly.
Parameters
----------
lumped : bool, default False
When ``True``, returns a row-summed lumped mass (diagonal
in the canonical DOF layout). When ``False``, returns the
consistent mass. Lumped masses are convenient for explicit
time integration; consistent masses are required for modal
analysis to recover the physical mass-orthogonality property
:math:`\\varphi_i^\\top M \\varphi_j = \\delta_{ij}`.
Returns
-------
scipy.sparse.csr_array
Symmetric positive-(semi)definite mass matrix of shape
``(N, N)`` matching the DOF layout of
:meth:`stiffness_matrix`.
See Also
--------
stiffness_matrix : the companion stiffness matrix assembly.
rayleigh_damping : assembles ``α M + β K`` with the same layout.
Examples
--------
Compare consistent vs lumped masses on the bundled fixture.
>>> import femorph_solver as fs
>>> model = fs.Model.from_pv(fs.examples.cyclic_bladed_rotor_sector_path())
>>> M = model.mass_matrix()
>>> Ml = model.mass_matrix(lumped=True)
>>> M.shape == Ml.shape
True
>>> Ml.nnz <= M.nnz
True
"""
if not self._dirty:
cached = self._M_lumped_cache if lumped else self._M_cache
if cached is not None:
return cached
M = self._assemble("M", lumped=lumped)
if lumped:
self._M_lumped_cache = M
else:
self._M_cache = M
return M
def _release_K_M_cache(self) -> None:
"""Drop the cached full ``K`` / ``M`` / lumped-``M``.
Backs the ``release_inputs=True`` contract on the analysis-driver
entry points: after the solve consumed ``K`` / ``M``, the cache
is no longer load-bearing and we'd rather hand the RAM back to
the OS than keep it warm for a hypothetical re-solve. Pass
``release_inputs=False`` on the driver when the warm cache
actually matters (parametric sweeps over the same model).
"""
self._K_cache = None
self._M_cache = None
self._M_lumped_cache = None
# ---------------------------------------------------------------------
# Analysis drivers
# ---------------------------------------------------------------------
def _global_index(
self,
node: int,
dof_canonical: int,
layout: dict[int, tuple[int, tuple[str, ...]]],
) -> int:
if node not in layout:
raise KeyError(f"DOF referenced undefined node {node}")
offset, labels = layout[node]
label = _CANONICAL_DOFS[dof_canonical]
if label not in labels:
raise KeyError(f"node {node} has no {label} DOF (active labels: {labels})")
return offset + labels.index(label)
def _load_vector(self) -> np.ndarray:
_, layout, N = self._dof_layout()
F = np.zeros(N, dtype=np.float64)
for (node, dof), value in self._force_bc.items():
try:
F[self._global_index(int(node), dof, layout)] = value
except KeyError:
# Force on a DOF the kernel doesn't expose at this node
# (e.g. ROTZ on a UX/UY-only axisymmetric shell mesh) —
# MAPDL silently drops it; match.
continue
if self._sfbeam_loads:
self._scatter_sfbeam_loads(F, layout)
if self._has_thermal_loading():
self._scatter_thermal_loads(F, layout)
return F
def _scatter_thermal_loads(self, F: np.ndarray, layout: dict) -> None:
"""Add thermal pre-strain contributions (MAPDL ``BFUNIF``/``BF``).
For each element whose kernel implements a ``thermal_load``
hook, compute the equivalent nodal force vector ``f_th = ∫ Bᵀ
E ε_th dV`` (with ``ε_th = α · ΔT``) and scatter into ``F``.
Elements without ``ALPX`` set, or with ΔT = 0 at every node,
contribute nothing.
Per-element ``ΔT`` is the mean of the node temperatures
(``node_T − tref``) in the element — same convention MAPDL
uses when each element ID has been assigned a single material
with one ALPX.
"""
if self._grid.n_cells == 0:
return
elem_nums = np.asarray(self._grid.cell_data["ansys_elem_num"], dtype=np.int64)
for elem_num in elem_nums:
nns, et_id, mat_id, real_id = self.element_info(int(elem_num))
cls = self._kernel_for(et_id, len(nns))
method = getattr(cls, "thermal_load", None)
material = self._materials.get(int(mat_id), {})
alpha = float(material.get("ALPX", 0.0))
if alpha == 0.0:
continue
delta_t = float(np.mean([self._node_delta_t(int(n)) for n in nns]))
if delta_t == 0.0:
continue
if method is None:
raise NotImplementedError(
f"thermal load on element {elem_num} ({cls.name}): kernel does "
"not implement a thermal_load contribution (TRUSS2 supported in Phase 1)"
)
coords = np.array([self.node_coord(int(n)) for n in nns], dtype=np.float64)
real = self._real_constants.get(int(real_id), np.empty(0, dtype=np.float64))
f_global = method(coords, real, material, delta_t=delta_t)
expected = cls.n_nodes * cls.n_dof_per_node
if f_global.size != expected:
raise ValueError(
f"{cls.name}.thermal_load returned shape {f_global.shape}; "
f"expected ({expected},)"
)
k = 0
for n in nns:
base, labels = layout[int(n)]
for lbl in cls.dof_labels:
if lbl in labels:
F[base + labels.index(lbl)] += f_global[k]
k += 1
def _scatter_sfbeam_loads(self, F: np.ndarray, layout: dict) -> None:
"""Add distributed-load contributions (MAPDL ``SFBEAM``) to ``F``.
For each element with a stored distributed load, looks up the
element's kernel and asks it for the consistent local-frame
load vector, transforms to global, and scatters into the
element's DOF rows. Element kernels that don't ship a
``distributed_load`` method (everything but BEAM2 today) raise
a ``NotImplementedError`` listing which kernel needs the hook.
"""
for elem_num, loads in self._sfbeam_loads.items():
nns, et_id, mat_id, real_id = self.element_info(int(elem_num))
cls = self._kernel_for(et_id, len(nns))
method = getattr(cls, "distributed_load", None)
if method is None:
raise NotImplementedError(
f"SFBEAM on element {elem_num} ({cls.name}): kernel does "
"not implement a distributed-load contribution "
"(only BEAM2 supports SFBEAM in Phase 1)"
)
coords = np.array([self.node_coord(int(n)) for n in nns], dtype=np.float64)
real = self._real_constants.get(int(real_id), np.empty(0, dtype=np.float64))
material = self._materials.get(int(mat_id), {})
for face, _label, value in loads:
f_global = method(coords, real, material, face=int(face), value=float(value))
# Scatter the 12-element local force vector into the
# global F via the element's per-node DOF labels.
if f_global.size != cls.n_nodes * cls.n_dof_per_node:
raise ValueError(
f"BEAM2 distributed_load returned shape {f_global.shape}; "
f"expected ({cls.n_nodes * cls.n_dof_per_node},)"
)
k = 0
for n in nns:
base, labels = layout[int(n)]
for lbl in cls.dof_labels:
if lbl in labels:
F[base + labels.index(lbl)] += f_global[k]
k += 1
def _prescribed_dofs(self) -> dict[int, float]:
_, layout, _ = self._dof_layout()
out: dict[int, float] = {}
for (node, dof), value in self._disp_bc.items():
# Decks routinely fix DOFs the kernel doesn't expose
# (e.g. ``D,ALL,ROTZ`` on a SHELL208 axisymmetric mesh that
# only carries UX/UY); MAPDL silently ignores those — match.
try:
out[self._global_index(int(node), dof, layout)] = value
except KeyError:
continue
return out
[docs]
def strain(self, displacement: np.ndarray) -> dict[int, np.ndarray]:
"""Compute per-element elastic strain from a global displacement vector.
Shares the kernel path with the ``write_*_result`` writers but
stays in memory — nothing hits disk. Use this to recover
strain from any displacement field (static result, one modal
shape, a morphed guess) without round-tripping through
``.pv``.
Parameters
----------
displacement : numpy.ndarray, shape ``(N,)``
Global displacement vector indexed by :meth:`dof_map`
(the same layout the solvers produce).
Returns
-------
dict of {int: numpy.ndarray}
``{elem_num: (n_nodes, 6)}`` engineering-shear Voigt
strain sampled at each element node. Elements whose
kernel returns ``None`` from ``strain_batch`` are skipped.
See Also
--------
stiffness_matrix : the K used by the solvers that produce
``displacement``.
"""
_ = self.grid
_, layout, _N = self._dof_layout()
displacement = np.ascontiguousarray(displacement, dtype=np.float64)
out: dict[int, np.ndarray] = {}
if self._grid.n_cells == 0:
return out
(
elem_nums_arr,
n_nodes_per_elem,
offsets,
conn,
et_ids,
mat_ids,
real_ids,
pt_nums,
points,
) = self._grid_element_arrays()
# Group by (et_id, n_nodes, mat_id, real_id) so each batch calls
# a single ``strain_batch`` kernel on a contiguous coord buffer.
groups: dict[tuple[int, int, int, int], list[int]] = {}
for i in range(et_ids.size):
key = (int(et_ids[i]), int(n_nodes_per_elem[i]), int(mat_ids[i]), int(real_ids[i]))
groups.setdefault(key, []).append(i)
for (et_id, nn_per_elem, _mat, _real), cell_idx_list in groups.items():
cls = self._kernel_for(et_id, nn_per_elem)
batch_fn = getattr(cls, "strain_batch", None)
if batch_fn is None:
continue
cell_idx = np.asarray(cell_idx_list, dtype=np.int32)
base = offsets[cell_idx][:, None]
per_elem_cols = np.arange(nn_per_elem, dtype=np.int32)[None, :]
elem_vtk_ids = conn[base + per_elem_cols] # (n_grp, nn_per_elem)
coords = points[elem_vtk_ids]
# Gather each element's global DOFs from the layout, same as
# the assembly path. Translate VTK → element-number once per unique
# node before the per-unique Python dict hit.
unique_vtk, inverse = np.unique(elem_vtk_ids.ravel(), return_inverse=True)
unique_mapdl = pt_nums[unique_vtk]
node_dofs = np.empty((unique_vtk.size, cls.n_dof_per_node), dtype=np.int64)
for u_idx, nn in enumerate(unique_mapdl):
layout_base, labels = layout[int(nn)]
for k, lbl in enumerate(cls.dof_labels):
node_dofs[u_idx, k] = layout_base + labels.index(lbl)
dofs_per_elem = node_dofs[inverse].reshape(
cell_idx.size, nn_per_elem * cls.n_dof_per_node
)
u_e = np.ascontiguousarray(displacement[dofs_per_elem])
# Plane kernels need ν (and the plane-stress vs plane-strain
# selector) to fill εzz; 3D solids accept the kwarg and
# ignore it.
material = self._materials.get(int(_mat))
eps = batch_fn(np.ascontiguousarray(coords), u_e, material=material)
if eps is None:
continue
group_elem_nums = elem_nums_arr[cell_idx]
for i, en in enumerate(group_elem_nums):
out[int(en)] = np.asarray(eps[i])
return out
# ---------------------------------------------------------------------
# Thermal pillar — parallel DOF layout / assembly / solve.
# ---------------------------------------------------------------------
def _thermal_cells(self) -> list[tuple[int, np.ndarray, int]]:
"""Return ``[(cell_index, vtk_node_ids, et_id), ...]`` for every cell
whose kernel is non-structural and ships a ``kt`` method.
These are the cells the thermal-pillar assembler walks; they're a
subset of ``self.grid``'s cells.
"""
g = self.grid # triggers rebuild if there are staged elements
if g.n_cells == 0:
return []
offsets = np.asarray(g.offset, dtype=np.int32)
conn = np.asarray(g.cell_connectivity, dtype=np.int32)
et_ids = np.asarray(g.cell_data["ansys_elem_type_num"], dtype=np.int32)
out: list[tuple[int, np.ndarray, int]] = []
for i in range(self._grid.n_cells):
et_id = int(et_ids[i])
if et_id not in self._etypes:
continue
vtk_ids = conn[offsets[i] : offsets[i + 1]]
cls = self._kernel_for(et_id, vtk_ids.size)
if getattr(cls, "is_structural", True):
continue
if not callable(getattr(cls, "kt", None)):
continue
out.append((i, vtk_ids, et_id))
return out
[docs]
def thermal_node_numbers(self) -> np.ndarray:
"""Return the sorted ANSYS node numbers that touch a thermal element.
Row ``i`` of :meth:`thermal_conductivity_matrix` and
:meth:`thermal_load_vector` corresponds to ``thermal_node_numbers()[i]``.
"""
cells = self._thermal_cells()
if not cells:
return np.zeros(0, dtype=np.int32)
pt_nums = np.asarray(self.grid.point_data["ansys_node_num"], dtype=np.int32)
touched: set[int] = set()
for _i, vtk_ids, _et in cells:
touched.update(int(pt_nums[j]) for j in vtk_ids)
return np.array(sorted(touched), dtype=np.int32)
[docs]
def thermal_conductivity_matrix(self) -> _sp.csr_array:
"""Assemble the global thermal conductivity matrix ``K_t``.
One DOF (``TEMP``) per node that touches a thermal element.
Uses the same scatter pattern as :meth:`stiffness_matrix` but
on the thermal-only DOF layout.
"""
cells = self._thermal_cells()
node_nums = self.thermal_node_numbers()
n = node_nums.size
if n == 0:
return _sp.csr_array((0, 0), dtype=np.float64)
node_to_idx = {int(nn): i for i, nn in enumerate(node_nums)}
g = self.grid
pts = np.asarray(g.points, dtype=np.float64)
pt_nums = np.asarray(g.point_data["ansys_node_num"], dtype=np.int32)
mat_ids = np.asarray(g.cell_data["ansys_material_type"], dtype=np.int32)
real_ids = np.asarray(g.cell_data["ansys_real_constant"], dtype=np.int32)
rows: list[int] = []
cols: list[int] = []
data: list[float] = []
for cell_i, vtk_ids, et_id in cells:
cls = self._kernel_for(et_id, vtk_ids.size)
mat = self._materials.get(int(mat_ids[cell_i]), {})
real = self._real_constants.get(int(real_ids[cell_i]), np.empty(0, dtype=np.float64))
coords = pts[vtk_ids]
ke_local = cls.kt(coords, mat, real)
global_idx = np.array([node_to_idx[int(pt_nums[j])] for j in vtk_ids], dtype=np.int64)
for a, ga in enumerate(global_idx):
for b, gb in enumerate(global_idx):
rows.append(int(ga))
cols.append(int(gb))
data.append(float(ke_local[a, b]))
# Convection contribution on element edges in any stored
# selection (SF,...,CONV). For each pair of consecutive
# vertices forming a 1-D linear edge of length L,
# ``h L t / 2`` adds as a *lumped* diagonal to the
# conductivity at each endpoint (``t`` is the element
# thickness from real[0], default 1). MAPDL's PLANE55
# CONV uses the lumped form rather than the consistent
# ``h L t / 6 [[2,1],[1,2]]`` matrix - vm99 / vm100 only
# match within tolerance under the lumped convention.
if self._surf_conv:
node_ids = [int(pt_nums[j]) for j in vtk_ids]
thk = self._surface_load_thickness(real)
n_v = len(vtk_ids)
for a in range(n_v):
b = (a + 1) % n_v
pair_a, pair_b = node_ids[a], node_ids[b]
for sel, h, _t_inf in self._surf_conv:
if pair_a in sel and pair_b in sel:
edge_len = float(np.linalg.norm(coords[a] - coords[b]))
if edge_len <= 0.0:
continue
half = h * edge_len * thk / 2.0
ga = int(global_idx[a])
gb = int(global_idx[b])
rows.extend([ga, gb])
cols.extend([ga, gb])
data.extend([half, half])
K = _sp.coo_array(
(np.asarray(data, dtype=np.float64), (np.asarray(rows), np.asarray(cols))),
shape=(n, n),
).tocsr()
return K
@staticmethod
def _surface_load_thickness(real: np.ndarray) -> float:
"""Element thickness used by 2-D thermal surface loads.
Mirrors ``elements.quad4_thermal._thickness``: real[0] = THK,
defaulting to 1.0 when the real-constant set is empty.
"""
real = np.asarray(real, dtype=np.float64)
if real.size == 0:
return 1.0
val = float(real[0])
return val if val > 0.0 else 1.0
[docs]
def thermal_load_vector(self) -> np.ndarray:
"""Assemble the thermal-load vector from F,HEAT, SF,CONV, SF,HFLUX.
Per-edge contributions on every thermal element edge whose
endpoints are entirely in a stored selection:
* convection: ``h T_inf L t / 2 · [1, 1]``
* heat-flux: ``q L t / 2 · [1, 1]``
"""
cells = self._thermal_cells()
node_nums = self.thermal_node_numbers()
n = node_nums.size
Q = np.zeros(n, dtype=np.float64)
if n == 0:
return Q
node_to_idx = {int(nn): i for i, nn in enumerate(node_nums)}
for nn, val in self._heat_bc.items():
idx = node_to_idx.get(int(nn))
if idx is None:
continue
Q[idx] += float(val)
if not (self._surf_conv or self._surf_hflux or self._bfe_hgen):
return Q
g = self.grid
pts = np.asarray(g.points, dtype=np.float64)
pt_nums = np.asarray(g.point_data["ansys_node_num"], dtype=np.int32)
real_ids = np.asarray(g.cell_data["ansys_real_constant"], dtype=np.int32)
elem_nums = np.asarray(g.cell_data["ansys_elem_num"], dtype=np.int32)
for cell_i, vtk_ids, _et_id in cells:
elem_id = int(elem_nums[cell_i])
real_for_cell = self._real_constants.get(
int(real_ids[cell_i]), np.empty(0, dtype=np.float64)
)
kernel_name = self._etypes.get(int(_et_id), "")
# BFE,HGEN: integrate volumetric heat generation over the
# element's volume and split the total equally across the
# element's nodes. For LINK33 (1D conduction), volume =
# area * length. For 2D PLANE55, volume = area * thickness.
# LINK34 (convection link) has no thermal volume; skip it.
for sel, q_gen in self._bfe_hgen:
if elem_id not in sel:
continue
if kernel_name in ("LINK33", "BAR2_CONDUCTION"):
p_a = pts[vtk_ids[0]]
p_b = pts[vtk_ids[1]]
length = float(np.linalg.norm(p_b - p_a))
area_link = float(real_for_cell[0]) if real_for_cell.size > 0 else 1.0
vol = area_link * length
elif kernel_name in ("PLANE55", "QUAD4_THERMAL", "TRI3_THERMAL"):
coords_2d = pts[vtk_ids][:, :2]
if coords_2d.shape[0] == 4:
x = coords_2d[:, 0]
y = coords_2d[:, 1]
area_xy = 0.5 * abs(
(x[0] - x[2]) * (y[1] - y[3]) - (x[1] - x[3]) * (y[0] - y[2])
)
elif coords_2d.shape[0] == 3:
x = coords_2d[:, 0]
y = coords_2d[:, 1]
area_xy = 0.5 * abs(
x[0] * (y[1] - y[2]) + x[1] * (y[2] - y[0]) + x[2] * (y[0] - y[1])
)
else:
area_xy = 0.0
vol = area_xy * self._surface_load_thickness(real_for_cell)
else:
vol = 0.0
if vol <= 0.0:
continue
per_node = q_gen * vol / float(len(vtk_ids))
for j in vtk_ids:
n_id = int(pt_nums[j])
idx = node_to_idx.get(n_id)
if idx is not None:
Q[idx] += per_node
real = self._real_constants.get(int(real_ids[cell_i]), np.empty(0, dtype=np.float64))
thk = self._surface_load_thickness(real)
coords = pts[vtk_ids]
node_ids = [int(pt_nums[j]) for j in vtk_ids]
n_v = len(vtk_ids)
for a in range(n_v):
b = (a + 1) % n_v
pair_a, pair_b = node_ids[a], node_ids[b]
edge_len = None
for sel, h, t_inf in self._surf_conv:
if pair_a in sel and pair_b in sel:
if edge_len is None:
edge_len = float(np.linalg.norm(coords[a] - coords[b]))
if edge_len <= 0.0:
continue
load = h * t_inf * edge_len * thk / 2.0
Q[node_to_idx[pair_a]] += load
Q[node_to_idx[pair_b]] += load
for sel, q in self._surf_hflux:
if pair_a in sel and pair_b in sel:
if edge_len is None:
edge_len = float(np.linalg.norm(coords[a] - coords[b]))
if edge_len <= 0.0:
continue
load = q * edge_len * thk / 2.0
Q[node_to_idx[pair_a]] += load
Q[node_to_idx[pair_b]] += load
return Q
[docs]
def thermal_capacitance_matrix(self) -> _sp.csr_array:
"""Assemble the global thermal capacitance ``C_t`` (transient)."""
cells = self._thermal_cells()
node_nums = self.thermal_node_numbers()
n = node_nums.size
if n == 0:
return _sp.csr_array((0, 0), dtype=np.float64)
node_to_idx = {int(nn): i for i, nn in enumerate(node_nums)}
g = self.grid
pts = np.asarray(g.points, dtype=np.float64)
pt_nums = np.asarray(g.point_data["ansys_node_num"], dtype=np.int32)
mat_ids = np.asarray(g.cell_data["ansys_material_type"], dtype=np.int32)
real_ids = np.asarray(g.cell_data["ansys_real_constant"], dtype=np.int32)
rows: list[int] = []
cols: list[int] = []
data: list[float] = []
for cell_i, vtk_ids, et_id in cells:
cls = self._kernel_for(et_id, vtk_ids.size)
ct_fn = getattr(cls, "ct", None)
if not callable(ct_fn):
continue
mat = self._materials.get(int(mat_ids[cell_i]), {})
real = self._real_constants.get(int(real_ids[cell_i]), np.empty(0, dtype=np.float64))
coords = pts[vtk_ids]
ct_local = ct_fn(coords, mat, real)
global_idx = np.array([node_to_idx[int(pt_nums[j])] for j in vtk_ids], dtype=np.int64)
for a, ga in enumerate(global_idx):
for b, gb in enumerate(global_idx):
rows.append(int(ga))
cols.append(int(gb))
data.append(float(ct_local[a, b]))
return _sp.coo_array(
(np.asarray(data, dtype=np.float64), (np.asarray(rows), np.asarray(cols))),
shape=(n, n),
).tocsr()
[docs]
def solve_steady_thermal(
self,
*,
linear_solver: str = "auto",
) -> Any:
"""Run a steady-state thermal analysis.
Solves :math:`K_t \\, T = Q` on every node that touches a thermal
element kernel, with Dirichlet-temperature BCs (D,TEMP) directly
eliminated. See
:func:`femorph_solver.solvers.steady_thermal.solve_steady_thermal`
for the partition algebra.
Returns
-------
femorph_solver.solvers.steady_thermal.ThermalResult
Dataclass carrying the nodal temperature field, the reaction
heat-flow at constrained nodes, and the free-DOF mask.
"""
from femorph_solver.solvers.steady_thermal import ( # noqa: PLC0415
solve_steady_thermal as _solve_steady_thermal,
)
return _solve_steady_thermal(self, linear_solver=linear_solver)
[docs]
def solve_transient_thermal(
self,
*,
dt: float,
n_steps: int,
theta: float = 0.5,
linear_solver: str = "auto",
) -> Any:
"""Run a transient thermal analysis with the θ-method.
Integrates :math:`C_t \\dot T + K_t T = Q(t)` from
:math:`t = 0` for ``n_steps`` of size ``dt``. Crank-Nicolson
(default ``theta = 0.5``) is unconditionally stable and
second-order accurate; backward Euler (``theta = 1.0``) is
first-order and dissipative.
"""
from femorph_solver.solvers.transient_thermal import ( # noqa: PLC0415
solve_transient_thermal as _solve_transient_thermal,
)
return _solve_transient_thermal(
self, dt=dt, n_steps=n_steps, theta=theta, linear_solver=linear_solver
)
[docs]
def solve_static(
self,
*,
linear_solver: str = "auto",
thread_limit: int | None = None,
mixed_precision: bool | None = False,
release_inputs: bool = True,
progress: bool | Callable[[dict[str, Any]], None] = True,
) -> StaticResult:
"""Run a linear static analysis.
Solves :math:`K\\,\\mathbf{u} = \\mathbf{f}_{\\text{ext}}` using
the assembled stiffness from :meth:`stiffness_matrix`, the
Dirichlet BCs registered through :meth:`fix`, and the nodal
loads registered through :meth:`apply_force` plus any
thermal / pressure / coupling contributions on the model.
Parameters
----------
linear_solver : str, default ``"auto"``
Sparse linear backend identifier. ``"auto"`` picks the
fastest SPD direct backend available
(Pardiso → CHOLMOD → UMFPACK → SuperLU); pass an explicit
name to override. See
:func:`femorph_solver.solvers.linear.list_linear_solvers`
for the registered options.
release_inputs : bool, default True
Drop the model's cached full ``K`` once the static factor
has consumed it. Pass ``False`` for parametric load
sweeps that re-call ``solve`` on the same model so the
warm K cache survives. Same contract as
:meth:`solve_modal`'s ``release_inputs``.
progress : bool or callable, default False
``True`` shows a per-stage progress display
(``rich`` → ``tqdm`` → logger fallback). A **callable**
receives one structured event per stage start / end —
``{"phase": str, "step": int | None, "n_steps": int | None,
"elapsed_s": float}`` — useful when piping to a UI /
WebSocket gateway instead of a terminal renderer.
Returns
-------
femorph_solver.solvers.static.StaticResult
In-memory result dataclass with ``displacement``,
``reaction``, and ``free_mask`` indexed by :meth:`dof_map`.
Call ``result.save(path, model)`` to persist as a disk-backed
:class:`~femorph_solver.result.static.StaticResultDisk` handle
that the next person on the team can lazy-load via
:func:`femorph_solver.read`.
See Also
--------
modal_solve : eigenproblem driver.
transient_solve : Newmark-β transient driver.
harmonic_solve : modal-superposition frequency response.
Examples
--------
Cantilever the bladed-rotor sector by clamping the hub face,
push axially on the tip nodes, and read back the maximum
displacement magnitude.
>>> import numpy as np
>>> import femorph_solver as fs
>>> model = fs.Model.from_pv(fs.examples.cyclic_bladed_rotor_sector_path())
>>> pts = np.asarray(model.grid.points)
>>> hub = pts[:, 2] < pts[:, 2].min() + 1e-6
>>> tip = pts[:, 2] > pts[:, 2].max() - 1e-6
>>> model.fix(where=hub, dof=["UX", "UY", "UZ"])
>>> for n in model.node_numbers[tip]:
... model.apply_force(int(n), fz=1.0 / int(tip.sum()))
>>> result = model.solve_static()
>>> result.displacement.shape[0] > 0
True
"""
from femorph_solver._progress import ProgressReporter # noqa: PLC0415
from femorph_solver._threads import limit as _thread_limit # noqa: PLC0415
from femorph_solver.solvers.static import solve_static as _solve_static # noqa: PLC0415
with _thread_limit(thread_limit), ProgressReporter(enabled=progress) as report:
with report.stage("assemble K"):
K = self.stiffness_matrix()
with report.stage("build load vector"):
F = self._load_vector()
prescribed = self._prescribed_dofs()
_, layout, _N = self._dof_layout()
coupling = self._coupling_pairs(layout) if self._cp_sets else None
with report.stage("factor + solve"):
result = _solve_static(
K,
F,
prescribed,
coupling=coupling,
linear_solver=linear_solver,
mixed_precision=mixed_precision,
)
if release_inputs:
del K
self._release_K_M_cache()
return result
[docs]
def estimate_solve(
self,
n_modes: int = 10,
*,
linear_solver: str = "auto",
eigen_solver: str = "auto",
ooc: bool = False,
host_spec=None,
):
"""Predict modal-solve cost without running it.
Returns predicted ``(wall_s, peak_rss_mb)`` for a modal solve
on this model and machine, without actually running it.
Wraps :class:`~femorph_solver.estimators.Estimator` — reads
training rows from any ``femorph-benchmark-*.json`` files in
the current working directory and fits per-(host, backend)
log-log power laws on the fly. No training data → falls
back to a shape-of-universe prior tuned from the repo's own
14900KF benchmarks; the returned ``p95`` band then sits at
2× ``p50`` so callers can tell they're looking at a prior,
not a calibrated prediction.
Parameters
----------
n_modes : int
Modes requested (matches :meth:`solve_modal`).
linear_solver, eigen_solver : str
Backend names — picks which per-bucket fit applies.
ooc : bool, default False
``True`` applies the measured OOC multipliers
(``+3.5×`` wall, ``−22 %`` peak) on top of the in-core
prediction; useful when ``modal_solve`` would otherwise
exceed RAM.
host_spec : HostSpec or None
Override host detection (useful for "what would this
take on a different box?" queries). ``None`` → auto-
detect.
Returns
-------
:class:`femorph_solver.estimators.Estimate`
``p50`` / ``p95`` bands + the training-bucket diagnostic
the :class:`Estimator` picked.
"""
from femorph_solver.estimators import ( # noqa: PLC0415
Estimator,
HostSpec,
ProblemSpec,
load_training_rows,
)
problem = ProblemSpec.from_model(
self,
n_modes=n_modes,
linear_solver=linear_solver,
eigen_solver=eigen_solver,
ooc=ooc,
)
host = host_spec or HostSpec.auto()
est = Estimator.fit(load_training_rows())
return est.predict(problem, host=host)
[docs]
def solve_modal(
self,
n_modes: int = 10,
lumped: bool = False,
*,
eigen_solver: str = "auto",
linear_solver: str = "auto",
sigma: float = 0.0,
tol: float = 1e-12,
thread_limit: int | None = None,
release_inputs: bool = True,
mixed_precision: bool | None = False,
v0: np.ndarray | None = None,
progress: bool | Callable[[dict[str, Any]], None] = True,
) -> ModalResult:
"""Run a linear modal analysis.
Solves the generalised eigenproblem
:math:`K\\,\\varphi = \\omega^2 M\\,\\varphi` with the
Dirichlet BCs registered through :meth:`fix` applied as a
free-DOF reduction, and returns the lowest ``n_modes`` modes
ordered by ascending frequency with mass-normalised mode
shapes (:math:`\\varphi_i^\\top M \\varphi_j = \\delta_{ij}`).
Parameters
----------
n_modes : int, default 10
Number of eigenpairs requested.
lumped : bool, default False
Use a row-summed lumped mass instead of consistent mass.
Lumped masses break exact mass-orthogonality with the
consistent stiffness — use the consistent variant for
anything benchmarked against analytical natural-frequency
references.
eigen_solver : str, default ``"auto"``
Sparse eigenbackend identifier — one of ``"arpack"``,
``"lobpcg"``, ``"primme"``, ``"dense"``. See
:func:`femorph_solver.solvers.eigen.list_eigen_solvers` for
the full registered set.
linear_solver : str, default ``"auto"``
Inner shift-invert factoriser identifier — see
:meth:`solve_static` for the dispatch order.
sigma : float, default 0.0
Shift used by the shift-invert ARPACK driver (rad²/s²).
Defaults to zero for SPD problems; raise above the lowest
expected eigenvalue when the model is grounded with very
stiff BCs and the factoriser would otherwise see a near-
singular ``K - σ M``.
tol : float, default 1e-12
Convergence tolerance forwarded to the eigenbackend.
v0 : numpy.ndarray or None, optional
Starting Lanczos vector for ARPACK. Default ``None``
seeds from a random vector. Pass a previous
``ModalResult.mode_shapes[:, k]`` (full-system length
``N``) to warm-start parametric sweeps - 1.5-3x fewer
Lanczos iterations when the new eigenvectors are close
to the old ones (material sensitivity studies, mistuning
identification, blade-to-blade modal sweeps). Ignored
by non-ARPACK eigen backends.
release_inputs : bool, default True
Drop the model's cached full ``K`` / ``M`` once the
BC-reduced ``K_ff`` / ``M_ff`` are built — on a 1.2 M-DOF
HEX20 problem this saves ~2 GB of peak RSS. Pass ``False``
for parametric sweeps that re-call ``modal_solve`` on the
same model so the warm K / M cache survives.
mixed_precision : bool or None, default False
Inner Pardiso factor precision (ignored by non-Pardiso
backends). ``False`` factors in double precision. ``True``
factors in single precision and refines with double-
precision residuals — halves the factor footprint when MKL
honours the request. ``None`` lets femorph-solver decide
(opts in for SPD factorisation above ~50 k free DOFs).
progress : bool or callable, default False
Same contract as :meth:`solve_static`'s ``progress`` argument.
Returns
-------
femorph_solver.solvers.modal.ModalResult
In-memory result with ``omega_sq``, ``frequency``, and the
mass-normalised ``mode_shapes`` matrix. Call
``result.save(path, model)`` to persist as a disk-backed
:class:`~femorph_solver.result.modal.ModalResultDisk` handle.
See Also
--------
femorph_solver.CyclicModel.solve_modal : sector-reduced
cyclic-symmetry modal analysis (wraps a single-sector
``Model`` and solves the harmonic-decomposed eigenproblem).
harmonic_solve : modal-superposition frequency response.
estimate_solve : predict ``modal_solve`` cost without running it.
Notes
-----
Mixed-precision Pardiso depends on MKL exposing ``iparm(28)``
through pypardiso; some MKL builds silently no-op the
request. Check ``lu._solver.get_iparm(28)`` after a factorize
if you need to confirm the request was honoured.
Examples
--------
Lowest four natural frequencies of the bladed-rotor sector
with the hub face clamped.
>>> import numpy as np
>>> import femorph_solver as fs
>>> model = fs.Model.from_pv(fs.examples.cyclic_bladed_rotor_sector_path())
>>> pts = np.asarray(model.grid.points)
>>> model.fix(where=pts[:, 2] < pts[:, 2].min() + 1e-6, dof=["UX", "UY", "UZ"])
>>> result = model.solve_modal(n_modes=4)
>>> result.frequency.shape
(4,)
>>> bool(np.all(np.diff(result.frequency) >= 0))
True
"""
from femorph_solver._progress import ProgressReporter # noqa: PLC0415
from femorph_solver._threads import limit as _thread_limit # noqa: PLC0415
with _thread_limit(thread_limit), ProgressReporter(enabled=progress) as report:
if release_inputs:
return self._modal_solve_release(
n_modes=n_modes,
lumped=lumped,
eigen_solver=eigen_solver,
linear_solver=linear_solver,
sigma=sigma,
tol=tol,
mixed_precision=mixed_precision,
v0=v0,
report=report,
)
from femorph_solver.solvers.modal import solve_modal as _solve_modal # noqa: PLC0415
with report.stage(f"assemble K ({self.grid.n_cells:,} elements)"):
K = self.stiffness_matrix()
with report.stage(
f"assemble M ({self.grid.n_cells:,} elements, lumped={'yes' if lumped else 'no'})"
):
M = self.mass_matrix(lumped=lumped)
prescribed = self._prescribed_dofs()
with report.stage(
f"reduce BCs + eigensolve "
f"(K={K.shape[0]:,}x{K.shape[0]:,}, nnz={K.nnz:,}, "
f"prescribed={len(prescribed)}, n_modes={n_modes}, "
f"eigen={eigen_solver}, linear={linear_solver}, "
f"sigma={sigma:g})"
):
return _solve_modal(
K,
M,
prescribed,
n_modes=n_modes,
eigen_solver=eigen_solver,
linear_solver=linear_solver,
sigma=sigma,
tol=tol,
mixed_precision=mixed_precision,
v0=v0,
)
def _modal_solve_release(
self,
*,
n_modes: int,
lumped: bool,
eigen_solver: str,
linear_solver: str,
sigma: float = 0.0,
tol: float,
mixed_precision: bool | None = False,
v0: np.ndarray | None = None,
report: object | None = None,
) -> ModalResult:
"""Peak-RSS-minimising modal solve — see ``modal_solve(release_inputs=True)``.
Goes directly from elements to the BC-reduced ``K_ff`` / ``M_ff``
via a single element-level scatter pass per matrix (see
:meth:`_bc_reduce_and_release`). The full ``K`` / ``M`` are
never materialised, so peak assembly RSS is bounded by
``K_ff + M_ff`` rather than ``K + M + K_ff + M_ff``.
When the resolved linear-solver backend is Pardiso, K_ff is
assembled as upper-triangular only — halves its CSR storage
and skips the duplicate ``sp.triu`` extraction Pardiso would
otherwise make. Shift-invert ARPACK at σ=0 never calls
``K @ x`` (verified via scipy's internal path), so the triu
storage doesn't break the eigensolve.
"""
from femorph_solver._progress import ProgressReporter # noqa: PLC0415
from femorph_solver.solvers.linear import select_default_linear_solver # noqa: PLC0415
from femorph_solver.solvers.modal import solve_modal_reduced # noqa: PLC0415
_null = ProgressReporter(enabled=False)
report = report if report is not None else _null
prescribed = self._prescribed_dofs()
# Decide whether to assemble K_ff as triu-only. Needs:
# - the linear solver to accept upper-triangular SPD input (only
# Pardiso currently — CHOLMOD's wrapper does its own sp.tril);
# - the eigensolver to be an ARPACK-flavour shift-invert at σ=0
# (the only mode that doesn't call ``K @ x``).
_, _, N = self._dof_layout()
n_free_est = N - len(prescribed) # upper bound; rigid-body reduction may shrink
resolved_linear = (
select_default_linear_solver(spd=True, n_dof=n_free_est)
if linear_solver == "auto"
else linear_solver
)
triu_k = (
resolved_linear == "pardiso"
and eigen_solver in ("auto", "arpack")
and sigma == 0.0 # σ != 0 calls ``K @ x``, needs full symmetric K
)
# M-as-triu is orthogonal to K — it only needs the eigen backend
# to route M @ x through ``_maybe_parallel_matvec`` (which
# detects ``femorph_triu`` and applies the symmetric wrapper).
# The scipy dense path in ``solve_modal_reduced`` also honours
# the tag via ``la.eigh(..., lower=False)``. Safe for every
# eigen backend we ship.
triu_m = eigen_solver in ("auto", "arpack")
with report.stage(
f"assemble K_ff + M_ff (N={N:,} DOFs, "
f"prescribed={len(prescribed)}, "
f"lumped={'yes' if lumped else 'no'}, "
f"triu_K={'yes' if triu_k else 'no'}, "
f"triu_M={'yes' if triu_m else 'no'})"
):
K_ff, M_ff, free = self._bc_reduce_and_release(
prescribed, lumped=lumped, triu_k=triu_k, triu_m=triu_m
)
with report.stage(
f"factor + eigensolve "
f"(K_ff={K_ff.shape[0]:,}x{K_ff.shape[0]:,}, nnz={K_ff.nnz:,}, "
f"n_modes={n_modes}, eigen={eigen_solver}, "
f"linear={resolved_linear}, sigma={sigma:g})"
):
return solve_modal_reduced(
K_ff,
M_ff,
free,
n_modes=n_modes,
eigen_solver=eigen_solver,
linear_solver=linear_solver,
sigma=sigma,
tol=tol,
mixed_precision=mixed_precision,
v0=v0,
)
def _bc_reduce_and_release(
self,
prescribed: dict[int, float],
*,
lumped: bool,
triu_k: bool = False,
triu_m: bool = False,
) -> tuple[_sp.csr_array, _sp.csr_array, np.ndarray]:
"""Assemble BC-reduced K_ff / M_ff directly from element contributions.
Builds the reduced matrices in one element-level scatter pass each —
the full N×N K / M are never materialised, so peak RSS during
assembly is roughly ``K_ff + M_ff`` instead of
``K + M + K_ff + M_ff``. On a 500 k-DOF plate that saves ~700 MB
of peak.
``triu_k=True`` assembles K_ff as upper-triangular only (diagonal +
strictly upper). Halves K's CSR footprint and lets the Pardiso
SPD path skip its own ``sp.triu`` extraction.
``triu_m=True`` assembles M_ff as upper-triangular only too.
Halves M's CSR footprint. Callers that need ``M @ x`` (ARPACK
generalized shift-invert) must route through
``_maybe_parallel_matvec`` which detects the tag and applies
``y = M @ x + M.T @ x - diag(M) * x`` — the symmetric product,
no lower-triangular mirror allocation. Only safe when M is
truly symmetric (all consistent / lumped element-mass matrices
on this codebase are).
Rigid-body DOFs (zero diagonal stiffness, e.g. TRUSS2 transverse)
are caught post-assembly via ``K_ff.diagonal()`` and dropped with a
second ``csr_submatrix_free`` pass — the common case (HEX20 /
HEX8 etc.) has none so the extra pass is skipped entirely.
"""
_, _layout, N = self._dof_layout()
# Drop any stale cached K / M so the element-level build below
# doesn't double-allocate alongside them.
self.release_matrix_cache()
mask_fixed = np.zeros(N, dtype=bool)
for idx in prescribed:
mask_fixed[int(idx)] = True
free_pre = ~mask_fixed
n_free_pre = int(free_pre.sum())
if n_free_pre == 0 or N == 0 or self._grid.n_cells == 0:
empty = _sp.csr_array((0, 0), dtype=np.float64)
return empty, empty, free_pre
full_to_free = np.full(N, -1, dtype=np.int32)
full_to_free[free_pre] = np.arange(n_free_pre, dtype=np.int32)
K_ff = self._assemble("K", full_to_free=full_to_free, triu_only=triu_k)
# K and M share element connectivity + the reduced-DOF mask, so
# their sparsity patterns (row_ptr + col_idx) are identical when
# both are built with the same ``triu_only`` flag. Pass K's
# pattern arrays into M's build so M aliases them instead of
# rebuilding its own copies — saves ~(n_free + nnz) × 4 B
# (≈30 MB on a 200×200×2 triu HEX8 plate).
shared_pattern = (K_ff.indptr, K_ff.indices) if triu_k == triu_m else None
M_ff = self._assemble(
"M",
lumped=lumped,
full_to_free=full_to_free,
triu_only=triu_m,
pattern=shared_pattern,
)
# Rigid-body pass — drop rows whose K diagonal is below a relative
# pivot tolerance. Matches the tolerance from the old post-hoc
# ``_bc_reduce`` path so parity with existing tests holds. The
# diagonal is present in both full and triu CSR so this check works
# regardless of ``triu_k``.
k_diag = K_ff.diagonal()
scale = float(np.max(np.abs(k_diag))) if k_diag.size else 1.0
tol_pivot = 1e-12 * scale if scale > 0 else 1e-30
rigid = np.abs(k_diag) <= tol_pivot
if rigid.any():
n_free = int(n_free_pre - rigid.sum())
reduce_map = np.full(n_free_pre, -1, dtype=np.int32)
reduce_map[~rigid] = np.arange(n_free, dtype=np.int32)
K_ff = self._csr_submatrix(K_ff, reduce_map, n_free)
M_ff = self._csr_submatrix(M_ff, reduce_map, n_free)
if triu_k:
# csr_submatrix_free preserves the triu-ness of the input
# (it only picks rows/cols, no transpose), but the tagging
# doesn't survive the construction of the new csr_array.
K_ff.femorph_triu = True
if triu_m:
M_ff.femorph_triu = True
# Propagate the rigid drop back into the full-DOF mask so the
# free-mask returned to ``solve_modal_reduced`` reflects both
# prescribed + rigid-body removals.
pre_free_indices = np.where(free_pre)[0]
mask_fixed[pre_free_indices[rigid]] = True
# Drop the per-node DOF layout caches before the caller
# allocates the Pardiso factor. These are pure memoization
# (``{node: (offset, labels)}`` dicts, ~O(n_nodes) with dict
# overhead) reconstructable from ``self._grid`` /
# ``self._etypes`` in a few ms. At 100×100×2 HEX8 they
# hold ~18 MB; at 300×300×2, ~160 MB. Holding them through
# the 1-3 GB Pardiso factor + eigsh shift-invert is dead
# weight — the assembly that needed them is finished, and
# neither Pardiso nor eigsh touches them. A subsequent
# ``modal_solve`` on the same Model rebuilds the caches on
# demand; the only cost is re-walking the grid once per
# solve, which is a tiny fraction of a second.
self._layout_cache = None
self._node_dof_labels_cache = None
# glibc arena hygiene before the caller (typically
# ``solve_modal_reduced``) allocates the 1-3 GB Pardiso
# Cholesky factor on top. The large transient scratches in
# assembly (``ke_batch`` / ``me_batch`` at hundreds of MB,
# ``_build_csr_pattern`` scratch at tens of MB) go through
# ``mmap`` directly — those are already returned to the OS
# on ``free()`` and ``malloc_trim`` can't touch them. What
# IS sometimes reclaimable: smaller allocations (coords_arr
# copies, dof-map scratch, scipy CSR temporaries) that land
# in glibc's arena and sit there as cached chunks. On a
# 200×200×2 HEX8 plate this typically reclaims 5-15 MB
# post-assembly — a small win but no-cost on platforms
# where ``malloc_trim`` exists (Linux/glibc) and a no-op
# everywhere else. Kept here as cheap hygiene, not a
# headline optimisation: the real peak-RSS driver is the
# Pardiso factor itself, which is attacked separately.
from femorph_solver._malloc import trim_allocator # noqa: PLC0415
trim_allocator()
return K_ff, M_ff, ~mask_fixed
@staticmethod
def _csr_submatrix(A: _sp.csr_array, full_to_free: np.ndarray, n_free: int) -> _sp.csr_array:
"""Extract ``A[keep][:, keep]`` in CSR using the C++ parallel path."""
ip, ix, d = _core.csr_submatrix_free(
np.ascontiguousarray(A.indptr, dtype=np.int32),
np.ascontiguousarray(A.indices, dtype=np.int32),
np.ascontiguousarray(A.data, dtype=np.float64),
full_to_free,
n_free,
)
return _sp.csr_array((d, ix, ip), shape=(n_free, n_free))
[docs]
def rayleigh_damping(
self,
alpha: float,
beta: float,
*,
lumped: bool = False,
) -> _sp.csr_array:
"""Assemble the Rayleigh damping matrix ``α M + β K``.
Convenience wrapper that pulls already-assembled ``K`` and ``M``
off this model and returns the canonical ``αM + βK`` form
:meth:`solve_transient` accepts as ``damping=``.
Parameters
----------
alpha : float
Mass-proportional coefficient ``α`` [1/s].
beta : float
Stiffness-proportional coefficient ``β`` [s].
lumped : bool, default False
Forwarded to :meth:`mass_matrix` so the M used here matches
the M the transient integrator will assemble.
Returns
-------
scipy.sparse.csr_array
``α M + β K`` with the same DOF layout as :meth:`dof_map`.
See Also
--------
transient_solve : accepts the returned matrix as ``damping=`` or
takes ``rayleigh=(α, β)`` to build it internally without the
second K / M assembly.
Examples
--------
Build a small Rayleigh damping matrix on the bundled sector.
>>> import femorph_solver as fs
>>> model = fs.Model.from_pv(fs.examples.cyclic_bladed_rotor_sector_path())
>>> C = model.rayleigh_damping(alpha=1.0, beta=1e-6)
>>> C.shape == model.stiffness_matrix().shape
True
"""
K = self.stiffness_matrix()
M = self.mass_matrix(lumped=lumped)
return (float(alpha) * M + float(beta) * K).tocsr()
[docs]
def solve_transient(
self,
*,
dt: float,
n_steps: int,
load: Callable[[float], np.ndarray] | np.ndarray | None = None,
lumped: bool = False,
damping: _sp.csr_array | None = None,
rayleigh: tuple[float, float] | None = None,
u0: np.ndarray | None = None,
v0: np.ndarray | None = None,
beta: float = 0.25,
gamma: float = 0.5,
thread_limit: int | None = None,
linear_solver: str = "auto",
mixed_precision: bool | None = False,
release_inputs: bool = True,
progress: bool | Callable[[dict[str, Any]], None] = True,
) -> TransientResult:
"""Run a linear transient analysis.
Integrates ``M ü + C u̇ + K u = F(t)`` with Newmark-β. The load
``F(t)`` defaults to the F-staged nodal forces held constant over
the interval; pass a callable ``t -> (N,)`` for time-varying loads
or a fixed array to override the staged values.
Damping accepts two mutually-exclusive forms:
* ``damping`` — pre-assembled ``(N, N)`` sparse damping matrix.
* ``rayleigh=(α, β)`` — Rayleigh form
:math:`C = \\alpha M + \\beta K` built inline from the same
``K`` / ``M`` the integrator already assembles.
Parameters
----------
dt : float
Time-step size [s].
n_steps : int
Number of time steps to integrate.
load : ndarray, callable, or None, optional
``(N,)`` constant load vector or a callable
``t -> (N,)`` for time-varying loads. ``None`` (default)
uses the F-staged nodal forces from :meth:`apply_force`,
held constant over the integration window.
lumped : bool, default False
Forwarded to :meth:`mass_matrix`.
damping : scipy.sparse.csr_array or None, optional
Pre-assembled damping matrix. Mutually exclusive with
``rayleigh``.
rayleigh : tuple of (float, float) or None, optional
``(α, β)`` Rayleigh damping coefficients. Mutually
exclusive with ``damping``.
u0, v0 : ndarray or None, optional
Initial displacement / velocity ``(N,)``. ``None`` is
zero.
beta : float, default 0.25
Newmark-β parameter; default is the average-acceleration
(unconditionally stable) value.
gamma : float, default 0.5
Newmark-γ parameter; default is the second-order-accurate
value.
release_inputs : bool, default True
Drop the model's cached full ``K`` / ``M`` once the
Newmark integrator has built the effective-stiffness
factor. Pass ``False`` for parametric sweeps that
re-call ``transient_solve`` on the same model so the
warm K / M cache survives. Same contract as
:meth:`solve_modal`'s ``release_inputs``.
progress : bool or callable, default False
Same contract as :meth:`solve_static`'s ``progress`` argument.
Returns
-------
femorph_solver.solvers.transient.TransientResult
In-memory time-history result with ``time``,
``displacement``, ``velocity``, ``acceleration``. Call
``result.save(path, model)`` to persist as a disk-backed
:class:`~femorph_solver.result.transient.TransientResultDisk`
handle.
See Also
--------
rayleigh_damping : assemble :math:`\\alpha M + \\beta K` separately.
Examples
--------
Integrate one period of free vibration on the bladed-rotor
sector with a small Rayleigh damping ratio.
>>> import numpy as np
>>> import femorph_solver as fs
>>> model = fs.Model.from_pv(fs.examples.cyclic_bladed_rotor_sector_path())
>>> pts = np.asarray(model.grid.points)
>>> model.fix(where=pts[:, 2] < pts[:, 2].min() + 1e-6, dof=["UX", "UY", "UZ"])
>>> result = model.solve_transient(
... dt=1e-5, n_steps=4, rayleigh=(1.0, 1e-6),
... )
>>> result.displacement.shape[0]
5
"""
from femorph_solver._progress import ProgressReporter # noqa: PLC0415
from femorph_solver._threads import limit as _thread_limit # noqa: PLC0415
from femorph_solver.solvers.transient import (
solve_transient as _solve_transient, # noqa: PLC0415
)
if damping is not None and rayleigh is not None:
raise ValueError("pass at most one of damping= or rayleigh=, not both")
with _thread_limit(thread_limit), ProgressReporter(enabled=progress) as report:
with report.stage("assemble K/M"):
K = self.stiffness_matrix()
M = self.mass_matrix(lumped=lumped)
if rayleigh is not None:
alpha, beta_r = (float(rayleigh[0]), float(rayleigh[1]))
damping = (alpha * M + beta_r * K).tocsr()
with report.stage("build load vector"):
prescribed = self._prescribed_dofs()
F = self._load_vector() if load is None else load
with report.stage("integrate (Newmark-β)"):
result = _solve_transient(
K,
M,
F,
dt=dt,
n_steps=n_steps,
prescribed=prescribed,
C=damping,
u0=u0,
v0=v0,
beta=beta,
gamma=gamma,
linear_solver=linear_solver,
mixed_precision=mixed_precision,
)
if release_inputs:
del K, M
self._release_K_M_cache()
return result
[docs]
def solve_harmonic(
self,
*,
frequencies: np.ndarray | Sequence[float],
n_modes: int = 10,
load: np.ndarray | None = None,
modal_damping_ratio: float | Sequence[float] | np.ndarray | None = None,
rayleigh: tuple[float, float] | None = None,
lumped: bool = False,
eigen_solver: str = "auto",
linear_solver: str = "auto",
tol: float = 1e-12,
thread_limit: int | None = None,
release_inputs: bool = True,
mixed_precision: bool | None = False,
modal_result: ModalResult | None = None,
progress: bool | Callable[[dict[str, Any]], None] = True,
) -> HarmonicResult:
"""Run a frequency-response (harmonic) analysis by modal superposition.
Solves ``(K - ω² M + i ω C) u(ω) = F`` at each excitation
frequency by projecting onto the lowest ``n_modes`` mass-
orthonormal eigenmodes of the undamped system. ``C`` is
introduced through per-mode damping ratios — see
:func:`femorph_solver.solvers.harmonic.solve_harmonic`
for the full superposition / damping math.
Parameters
----------
frequencies
``(n_freq,)`` excitation frequencies [Hz].
n_modes
Number of modes retained in the superposition. Ignored
when ``modal_result`` is supplied.
load
``(N,)`` complex (or real) DOF-indexed load. Defaults to
the F-staged nodal forces from :meth:`apply_force`.
modal_damping_ratio
Scalar (uniform) or per-mode damping ratio ζ_n ∈ [0, 1).
rayleigh
``(α, β)`` Rayleigh constants such that
``ζ_n = α/(2 ω_n) + β ω_n / 2``. May be combined with
``modal_damping_ratio``.
lumped, eigen_solver, linear_solver, tol
Forwarded to the underlying :meth:`solve_modal` when
``modal_result`` is not supplied.
release_inputs : bool, default True
Drop the model's cached full ``K`` / ``M`` once the
harmonic solve has consumed them. Pass ``False`` for
parametric forcing sweeps that re-call ``harmonic_solve``
on the same model so the warm K / M cache survives.
Same contract as :meth:`solve_modal`'s ``release_inputs``.
mixed_precision : bool or None, default False
Forwarded to the underlying :meth:`solve_modal` when
``modal_result`` is not supplied. Same contract as
:meth:`solve_modal`'s ``mixed_precision``. Ignored when
``modal_result`` is supplied (the precomputed factor
already determined precision).
modal_result
Reuse a precomputed :class:`ModalResult` instead of
recomputing — useful for parametric forcing sweeps where
the eigenproblem doesn't change.
progress : bool or callable, default True
Same contract as :meth:`solve_static` — ``True`` prints a stage
ticker, ``False`` is silent, a callable receives per-stage
``{stage, t_ms, ...}`` events.
Returns
-------
HarmonicResult
Frequency-domain response with ``.displacement`` shaped
``(n_freq, N)`` complex.
See Also
--------
modal_solve : the eigenproblem driver harmonic_solve relies on.
Examples
--------
Sweep five frequencies through the lowest natural-frequency
band of the bladed-rotor sector.
>>> import numpy as np
>>> import femorph_solver as fs
>>> model = fs.Model.from_pv(fs.examples.cyclic_bladed_rotor_sector_path())
>>> pts = np.asarray(model.grid.points)
>>> model.fix(where=pts[:, 2] < pts[:, 2].min() + 1e-6, dof=["UX", "UY", "UZ"])
>>> for n in model.node_numbers[pts[:, 2] > pts[:, 2].max() - 1e-6]:
... model.apply_force(int(n), fz=1.0)
>>> result = model.solve_harmonic(
... frequencies=np.linspace(100.0, 2000.0, 5),
... n_modes=4,
... modal_damping_ratio=0.02,
... )
>>> result.displacement.shape[0]
5
"""
from femorph_solver._progress import ProgressReporter # noqa: PLC0415
from femorph_solver._threads import limit as _thread_limit # noqa: PLC0415
from femorph_solver.solvers.harmonic import solve_harmonic as _solve # noqa: PLC0415
with _thread_limit(thread_limit), ProgressReporter(enabled=progress) as report:
with report.stage(f"assemble K ({self.grid.n_cells:,} elements)"):
K = self.stiffness_matrix()
with report.stage(
f"assemble M ({self.grid.n_cells:,} elements, lumped={'yes' if lumped else 'no'})"
):
M = self.mass_matrix(lumped=lumped)
prescribed = self._prescribed_dofs()
F = self._load_vector() if load is None else load
n_freq = len(frequencies) if hasattr(frequencies, "__len__") else 0
with report.stage(
f"harmonic solve "
f"(K={K.shape[0]:,}x{K.shape[0]:,}, n_modes={n_modes}, "
f"n_freq={n_freq})"
):
result = _solve(
K,
M,
F,
frequencies=frequencies,
n_modes=n_modes,
prescribed=prescribed,
modal_damping_ratio=modal_damping_ratio,
rayleigh=rayleigh,
eigen_solver=eigen_solver,
linear_solver=linear_solver,
tol=tol,
mixed_precision=mixed_precision,
modal_result=modal_result,
)
if release_inputs:
del K, M
self._release_K_M_cache()
return result
# ---------------------------------------------------------------------
# Deprecated method aliases (PR-K, #750)
# ---------------------------------------------------------------------
#
# Pre-PR-K naming: ``solve``, ``modal_solve``, ``transient_solve``,
# ``harmonic_solve``. Post-PR-K every analysis driver is named
# ``solve_<analysis>``: ``solve_static``, ``solve_modal``,
# ``solve_transient``, ``solve_harmonic``. The aliases below keep
# the old names working with a ``DeprecationWarning`` for one minor
# release; the next minor release drops them.
[docs]
def solve(self, *args: Any, **kwargs: Any) -> StaticResult:
"""Deprecated alias for :meth:`solve_static`."""
import warnings # noqa: PLC0415
warnings.warn(
"Model.solve() is deprecated; use Model.solve_static() instead. "
"The verb-noun naming (`solve_<analysis>`) is consistent across "
"every Model.*_solve entry point as of PR-K (#750).",
DeprecationWarning,
stacklevel=2,
)
return self.solve_static(*args, **kwargs)
[docs]
def modal_solve(self, *args: Any, **kwargs: Any) -> ModalResult:
"""Deprecated alias for :meth:`solve_modal`."""
import warnings # noqa: PLC0415
warnings.warn(
"Model.solve_modal() is deprecated; use Model.solve_modal() instead. "
"The verb-noun naming (`solve_<analysis>`) is consistent across "
"every Model.*_solve entry point as of PR-K (#750).",
DeprecationWarning,
stacklevel=2,
)
return self.solve_modal(*args, **kwargs)
[docs]
def transient_solve(self, *args: Any, **kwargs: Any) -> TransientResult:
"""Deprecated alias for :meth:`solve_transient`."""
import warnings # noqa: PLC0415
warnings.warn(
"Model.solve_transient() is deprecated; use Model.solve_transient() "
"instead. The verb-noun naming (`solve_<analysis>`) is consistent "
"across every Model.*_solve entry point as of PR-K (#750).",
DeprecationWarning,
stacklevel=2,
)
return self.solve_transient(*args, **kwargs)
[docs]
def harmonic_solve(self, *args: Any, **kwargs: Any) -> HarmonicResult:
"""Deprecated alias for :meth:`solve_harmonic`."""
import warnings # noqa: PLC0415
warnings.warn(
"Model.solve_harmonic() is deprecated; use Model.solve_harmonic() "
"instead. The verb-noun naming (`solve_<analysis>`) is consistent "
"across every Model.*_solve entry point as of PR-K (#750).",
DeprecationWarning,
stacklevel=2,
)
return self.solve_harmonic(*args, **kwargs)