Source code for femorph_solver.mapdl_api.dat

"""Native MAPDL ``.dat`` ASCII input-deck reader.

Mirrors :func:`femorph_solver.mapdl_api.cdb.from_cdb` but for the text
input-deck format Ansys ships with the Verification Manual
(``vm2025r1_mapdl/vm*.dat``).  No MAPDL invocation; the reader walks
the deck verb-by-verb and replays each pre-processor command against a
fresh :class:`~femorph_solver.Model` via the existing :class:`APDL`
shim.

Phase 1 verb coverage
---------------------

The reader supports the verbs the simpler statics / modal VMs need:

* **Phase markers / control (no-op):** ``/PREP7``, ``/SOLU``,
  ``/POST1``, ``FINISH``, ``/OUT``, ``/VERIFY``, ``/TITLE``, ``/COM``,
  ``/SHOW``, ``/UNITS`` (``/UNITS`` is honoured), ``OUTPR``, ``SOLVE``,
  ``NSUBST``, ``ANTYPE`` (recorded informationally).
* **Pre-processor verbs:** ``ET``, ``TYPE``, ``REAL``, ``MAT``,
  ``MP``, ``R``, ``SECTYPE`` / ``SECT``, ``SECDATA`` / ``SECD``,
  ``KEYOPT``, ``N``, ``E``, ``EGEN``, ``FILL``, ``D``, ``F``,
  ``CP``, ``BFUNIF``, ``TREF``, ``SFBEAM``.
* **APDL parameter assignments:** ``L = 15*12``, ``A = 2*L*COS(THETA)``,
  including ``*AFUN,DEG`` / ``*AFUN,RAD`` for trig units.
* **Comments:** ``!`` end-of-line, ``C***`` whole-line, ``/COM`` lines,
  ``$``-separated multi-statement lines.

Verbs the reader explicitly *recognises but skips* (post-processing
macros — the verification harness does its own assertions):
``*GET``, ``*SET``, ``*VWRITE``, ``*VFILL``, ``*DIM``, ``ETABLE``,
``NSEL``, ``ESEL``, ``NSLN``, ``ESLN``, ``FSUM``, ``PRNSOL``,
``PLDISP``, ``PLNSOL``, ``SET``, ``NALL``, ``EALL``.

Anything else is logged at WARNING level and skipped — the reader
never silently drops an unrecognised pre-processor verb.

Out of scope (file follow-up issues)
------------------------------------

* ``*DO`` / ``*ENDDO`` macro loops, ``*IF`` blocks, ``*USE`` /
  ``*ULIB`` user-macro libraries.
* ``MPDATA`` table-form material properties.
* Non-structural physics (``ETCHG``, thermal/EM analyses).

References
----------

* Ansys, Inc., *Mechanical APDL 2025R1 Verification Manual*, public
  documentation: <https://ansyshelp.ansys.com/...>.  The reader
  targets the deck format these manuals ship in.
* MAPDL command grammar: *Mechanical APDL Command Reference* —
  individual verb pages (``N``, ``E``, ``EGEN``, ``D``, ``F``, ...).
"""

from __future__ import annotations

import math
import re
from collections.abc import Callable
from pathlib import Path
from typing import TYPE_CHECKING, Any

import numpy as np

from femorph_solver._log import get_logger
from femorph_solver.interop.mapdl._apdl_dialect import APDL
from femorph_solver.labels import unit_system_from_mapdl

if TYPE_CHECKING:
    from femorph_solver.model import Model


_LOG = get_logger(__name__)


__all__ = ["from_dat", "parse_dat_with_meta"]


# Container the dispatcher fills with deck-level metadata extracted
# during the parse — analysis type, mode count, etc.  Used by
# ``run_input_deck`` to drive the matching solve automatically.
class _DeckMeta:
    """Parser-side capture of solve-driving deck directives."""

    def __init__(self) -> None:
        # Default to "STATIC" so a deck with no ANTYPE behaves like a
        # static analysis (the most common silent case).
        self.antype: str = "STATIC"
        # MODOPT,LANB,n  → record n.  Default 10 (MAPDL's MODOPT default).
        self.n_modes: int = 10
        # True iff the deck explicitly set MODOPT (we want to know
        # whether the user asked for modal even when ANTYPE later
        # reverted to STATIC for a perturbation pre-pass).
        self.has_modopt: bool = False
        # True iff any ANTYPE line had ``,PERTURB`` or
        # ``,RESTART,...,PERTURB`` — signals a linear-perturbation
        # multi-stage solve (static pre-stress → modal/buckle in the
        # perturbed configuration).  Combined with ``has_modopt`` the
        # dispatcher routes the FINAL solve to modal even though the
        # last ``meta.antype`` is STATIC.
        self.has_perturb: bool = False
        # Sub-step / load-step settings (NSUBST,NSBSTP, …) — informational.
        self.nsubst: int | None = None
        # Transient time integration: TIME,t_end and DELTIM,Δt.
        self.t_end: float | None = None
        self.deltim: float | None = None
        # Harmonic frequency sweep: HARFRQ,f_start,f_end,...
        self.harfrq_start: float | None = None
        self.harfrq_end: float | None = None
        self.nsubst_harm: int | None = None


# ---------------------------------------------------------------------
# Tokeniser
# ---------------------------------------------------------------------


def _strip_inline_comment(line: str) -> str:
    """Drop everything from the first un-quoted ``!`` to end of line."""
    bang = line.find("!")
    if bang == -1:
        return line
    return line[:bang]


def _split_statements(line: str) -> list[str]:
    """Split a line on ``$`` — MAPDL's multi-statement separator."""
    return [s for s in line.split("$") if s.strip()]


def _tokenise_deck(text: str) -> list[str]:
    """Return one logical statement per element, comments stripped.

    Doesn't handle ``*DO`` blocks (out of scope) — those land in the
    output verbatim and the dispatcher ignores them.

    Workbench-style multi-line blocks (``NBLOCK`` / ``EBLOCK`` /
    ``CMBLOCK``) are emitted as a single composite token so the
    dispatcher can route them to the matching block-handler.  The
    composite has the form::

        BLOCKVERB::<header line>::<format line>::<data lines joined by \\n>::-1

    The dispatcher splits on ``::`` to recover the parts.  This keeps
    the tokeniser stateless — every multi-line block is a single
    statement to the dispatcher loop.
    """
    out: list[str] = []
    raw_lines = text.splitlines()
    i = 0
    while i < len(raw_lines):
        raw = raw_lines[i]
        # Look for a multi-line block header (Workbench-style).  These
        # ALWAYS appear as the first non-blank thing on a line, and are
        # case-insensitive ("nblock" or "NBLOCK" both fly).
        bare = raw.lstrip()
        upper = bare.upper().split(",")[0].strip()
        if upper in {"NBLOCK", "EBLOCK", "CMBLOCK"}:
            header = bare.rstrip()
            # Next line is the Fortran format string in parentheses.
            i += 1
            fmt_line = ""
            if i < len(raw_lines):
                fmt_line = raw_lines[i].strip()
            data: list[str] = []
            i += 1
            # Consume data lines until terminator ``-1`` on its own
            # line (or the end of file as a defensive fallback).
            while i < len(raw_lines):
                d = raw_lines[i].rstrip()
                if d.strip() == "-1":
                    i += 1
                    break
                # Empty lines inside a data block are unusual but
                # tolerate them rather than truncating.
                data.append(d)
                i += 1
            composite = f"{upper}::{header}::{fmt_line}::" + "\n".join(data) + "::-1"
            out.append(composite)
            continue
        line = _strip_inline_comment(raw)
        line = line.rstrip()
        if not line.strip():
            i += 1
            continue
        out.extend(s.strip() for s in _split_statements(line))
        i += 1
    return out


# ---------------------------------------------------------------------
# APDL parameter expression evaluator
# ---------------------------------------------------------------------


# An APDL identifier: leading letter, then letters / digits / underscore.
_IDENT_RE = re.compile(r"\b([A-Z_][A-Z0-9_]*)\b", re.IGNORECASE)
# Detect a parameter assignment: ``NAME = expr`` (whitespace around =
# allowed).  Reject the right-hand-side ``=`` of "FUNC(A,B)=value" by
# requiring the LHS to be a bare identifier.
_ASSIGN_RE = re.compile(
    r"^\s*([A-Z_][A-Z0-9_]*)\s*=\s*(.+?)\s*$",
    re.IGNORECASE,
)


class _ParamTable:
    """Symbol table + safe-eval engine for APDL parameter expressions.

    Honours ``*AFUN,DEG`` / ``*AFUN,RAD`` so trig functions in the
    expressions return the correct value when the deck declared a
    different angle convention.  Implemented by passing a per-table
    ``COS`` / ``SIN`` / ``TAN`` / ``ATAN`` etc. into the eval namespace.
    """

    def __init__(self) -> None:
        self.params: dict[str, float] = {}
        self.afun: str = "RAD"

    def set_afun(self, mode: str) -> None:
        m = mode.strip().upper()
        if m not in ("DEG", "RAD"):
            raise ValueError(f"*AFUN: unknown angle mode {mode!r}; expected DEG or RAD")
        self.afun = m

    def assign(self, name: str, expr: str) -> float:
        value = self.evaluate(expr)
        self.params[name.upper()] = value
        return value

    def evaluate(self, expr: str) -> float:
        """Evaluate ``expr`` to a float using the current parameter values."""
        # APDL expressions allow caret ``^`` for power; map it to Python ``**``.
        expr = expr.replace("^", "**")
        ns: dict[str, Any] = dict(self.params)
        # Numeric / math built-ins MAPDL exposes (subset).
        ns.update(
            ABS=abs,
            SQRT=math.sqrt,
            EXP=math.exp,
            LOG=math.log,
            LOG10=math.log10,
            SIN=self._sin,
            COS=self._cos,
            TAN=self._tan,
            ASIN=self._asin,
            ACOS=self._acos,
            ATAN=self._atan,
            ATAN2=self._atan2,
            PI=math.pi,
            E=math.e,
        )
        # Use eval with a restricted globals.  The expression text comes
        # from a trusted local file — and the namespace contains only
        # numeric primitives — but we still pin builtins to {} so a
        # bad expression doesn't stumble into ``open`` / ``__import__``.
        try:
            return float(eval(expr, {"__builtins__": {}}, ns))  # noqa: S307
        except (TypeError, ValueError, NameError, SyntaxError) as exc:
            raise ValueError(f"could not evaluate APDL expression {expr!r}: {exc}") from exc

    # -- trig wrappers honouring *AFUN ----------------------------------

    def _sin(self, x: float) -> float:
        return math.sin(self._to_rad(x))

    def _cos(self, x: float) -> float:
        return math.cos(self._to_rad(x))

    def _tan(self, x: float) -> float:
        return math.tan(self._to_rad(x))

    def _asin(self, x: float) -> float:
        return self._from_rad(math.asin(x))

    def _acos(self, x: float) -> float:
        return self._from_rad(math.acos(x))

    def _atan(self, x: float) -> float:
        return self._from_rad(math.atan(x))

    def _atan2(self, y: float, x: float) -> float:
        return self._from_rad(math.atan2(y, x))

    def _to_rad(self, angle: float) -> float:
        return math.radians(angle) if self.afun == "DEG" else angle

    def _from_rad(self, rad: float) -> float:
        return math.degrees(rad) if self.afun == "DEG" else rad


# ---------------------------------------------------------------------
# Field resolution
# ---------------------------------------------------------------------


def _resolve_field(field: str, params: _ParamTable) -> Any:
    """Resolve a single comma-separated field to a Python value.

    * Empty field → ``None`` (caller decides what default to use).
    * Pure numeric → ``float`` / ``int`` as appropriate.
    * Identifier in symbol table → its value.
    * Anything else → evaluated as an arithmetic expression.
    """
    field = field.strip()
    if not field:
        return None
    upper = field.upper()
    if upper in params.params:
        return params.params[upper]
    # Bare numeric?
    try:
        # int first so node / element numbers don't drift to float.
        return int(field)
    except ValueError:
        pass
    try:
        return float(field)
    except ValueError:
        pass
    # Fall back: evaluate as expression.
    return params.evaluate(field)


def _to_int(field: str, params: _ParamTable) -> int:
    val = _resolve_field(field, params)
    if val is None:
        raise ValueError("expected an integer field, got empty")
    return int(val)


def _to_float(field: str, params: _ParamTable) -> float:
    val = _resolve_field(field, params)
    if val is None:
        return 0.0
    return float(val)


# ---------------------------------------------------------------------
# Verb handlers
# ---------------------------------------------------------------------


class _NodeHistory:
    """Trailing pair of node IDs from the last two ``N`` commands.

    The MAPDL deck grammar lets ``FILL`` be called with no arguments,
    in which case the verb fills nodes between the most recent pair
    of ``N`` definitions.  The bare verbatim VM5 deck uses this form:
    ``N,1,...; N,7,...; FILL`` ⇒ fill nodes 2..6 by linear interp.
    """

    __slots__ = ("last", "prev")

    def __init__(self) -> None:
        self.prev: int | None = None
        self.last: int | None = None

    def push(self, node_id: int) -> None:
        self.prev = self.last
        self.last = node_id


def _h_n(args: list[str], apdl: APDL, params: _ParamTable, history: _NodeHistory) -> None:
    """``N, num, x, y, z`` — node creation.  Empty coordinates default to 0."""
    num = _to_int(args[0], params)
    x = _to_float(args[1], params) if len(args) > 1 else 0.0
    y = _to_float(args[2], params) if len(args) > 2 else 0.0
    z = _to_float(args[3], params) if len(args) > 3 else 0.0
    apdl.n(num, x, y, z)
    history.push(num)


# ---------------------------------------------------------------------
# Workbench-style multi-line block handlers
# ---------------------------------------------------------------------


_FORTRAN_FIELD_RE = re.compile(
    r"(\d*)\s*([IEFD])\s*(\d+)(?:\.\d+(?:[eEdD][+-]?\d+)?)?",
    re.IGNORECASE,
)


def _parse_fortran_format(fmt: str) -> list[tuple[str, int]]:
    """Parse a Fortran format string like ``(1i9,3e20.9e3)``.

    Returns a list of ``(typecode, width)`` pairs in column order,
    where ``typecode`` is ``'I'`` (integer) or ``'F'`` (float; covers
    ``E`` / ``D`` / ``F``).  Repeat counts are expanded.

    Examples
    --------
    ``(1i9,3e20.9e3)`` → ``[('I', 9), ('F', 20), ('F', 20), ('F', 20)]``
    ``(19i9)`` → ``[('I', 9)] * 19``
    """
    body = fmt.strip()
    if body.startswith("(") and body.endswith(")"):
        body = body[1:-1]
    out: list[tuple[str, int]] = []
    for part in body.split(","):
        m = _FORTRAN_FIELD_RE.match(part.strip())
        if not m:
            raise ValueError(f"unrecognised Fortran format field: {part!r}")
        repeat = int(m.group(1)) if m.group(1) else 1
        kind = m.group(2).upper()
        width = int(m.group(3))
        # Map E/D (exponential) and F (fixed) all to "F" — we don't
        # care about exponent-digit count, just that it's a float.
        type_code = "I" if kind == "I" else "F"
        out.extend((type_code, width) for _ in range(repeat))
    return out


def _read_fixed_width(line: str, fields: list[tuple[str, int]]) -> list[Any]:
    """Slice a fixed-width Fortran data line by ``fields``.

    Trailing missing fields are tolerated (Fortran lets the writer
    omit zero-valued trailing fields).
    """
    values: list[Any] = []
    pos = 0
    for kind, width in fields:
        chunk = line[pos : pos + width]
        pos += width
        token = chunk.strip()
        if not token:
            # Trailing missing field — Fortran behaviour is "leave at
            # the previous value or zero".  Use 0 for our purposes.
            values.append(0 if kind == "I" else 0.0)
            continue
        if kind == "I":
            values.append(int(token))
        else:
            values.append(float(token))
    return values


def _h_nblock(composite: str, apdl: APDL, history: _NodeHistory) -> None:
    """``NBLOCK`` — Workbench-style fixed-width node table.

    Composite token format (set by ``_tokenise_deck``):
    ``NBLOCK::<header>::<format>::<data-lines>::-1``.

    Header is e.g. ``nblock,3,,8`` (3 fields-per-node, 8 nodes total
    — only ``NUMFIELDS`` matters for parsing).  The actual nodes
    follow the format string; we route each one through
    :meth:`APDL.n` so existing book-keeping (history, dof_layout
    invalidation) stays consistent.
    """
    parts = composite.split("::")
    fmt = parts[2]
    data = parts[3]
    fields = _parse_fortran_format(fmt)
    # First field is the node id; next 3 are X / Y / Z.
    for line in data.splitlines():
        if not line.strip():
            continue
        values = _read_fixed_width(line, fields)
        if not values:
            continue
        node = int(values[0])
        x = float(values[1]) if len(values) > 1 else 0.0
        y = float(values[2]) if len(values) > 2 else 0.0
        z = float(values[3]) if len(values) > 3 else 0.0
        apdl.n(node, x, y, z)
        history.push(node)


def _h_eblock(composite: str, apdl: APDL) -> None:
    """``EBLOCK`` — Workbench-style fixed-width element table.

    Composite token format (set by ``_tokenise_deck``):
    ``EBLOCK::<header>::<format>::<data-lines>::-1``.

    The MAPDL EBLOCK format is::

        eblock,19,SOLID,,n_elems
        (19i9)
        <mat>     <type>    <real>    <secnum>  <coord_sys>  ...
                 <element_id>  <node1> <node2> ... <nodeN>

    The first 11 fields are book-keeping (mat, type, real, secnum,
    coord_sys, death, solid_model, shape, n_nodes_in_elem,
    not_used, elem_id).  Fields 12+ are the connectivity nodes.
    Long elements (>8 nodes) wrap onto continuation lines.
    """
    parts = composite.split("::")
    header = parts[1]
    fmt = parts[2]
    data = parts[3]

    # Honour the SOLID / non-SOLID switch in the header.  SOLID-form
    # blocks have the 11 book-keeping fields; non-SOLID is a
    # connectivity-only short form that's much rarer and not yet
    # supported here.
    is_solid = "SOLID" in header.upper()
    if not is_solid:
        _LOG.warning(
            "from_dat: EBLOCK without SOLID flag is not yet supported; skipping (header=%r)",
            header,
        )
        return

    fields = _parse_fortran_format(fmt)
    raw_lines = [line for line in data.splitlines() if line.strip()]
    i = 0
    while i < len(raw_lines):
        # Read a fixed-width row's worth of fields.
        line = raw_lines[i]
        values = _read_fixed_width(line, fields)
        i += 1
        if not values:
            continue
        # MAPDL SOLID EBLOCK header layout (1-indexed in docs, 0-indexed here):
        # 0=mat, 1=type, 2=real, 3=secnum, 4=esys, 5=birth_death,
        # 6=solid_model, 7=shape, 8=n_nodes, 9=unused, 10=elem_id,
        # 11..=node_ids
        mat = int(values[0])
        et_id = int(values[1])
        real_id = int(values[2])
        n_nodes = int(values[8])
        # elem_id at index 10; we let APDL.e auto-assign so we don't
        # collide with prior element creation.
        nodes = [int(v) for v in values[11 : 11 + n_nodes]]
        # If the element wraps to continuation lines, read more.
        while len(nodes) < n_nodes and i < len(raw_lines):
            cont = _read_fixed_width(raw_lines[i], fields)
            i += 1
            nodes.extend(int(v) for v in cont if v)
            nodes = nodes[:n_nodes]  # don't overshoot
        if len(nodes) != n_nodes:
            _LOG.warning(
                "from_dat: EBLOCK row had %d nodes; expected %d. Skipping element.",
                len(nodes),
                n_nodes,
            )
            continue
        # Replay through the APDL shim so ``type`` / ``mat`` / ``real``
        # are set per-element.  This matches how MAPDL actually
        # processes EBLOCK rows.
        apdl.type(et_id)
        apdl.mat(mat)
        if real_id > 0:
            apdl.real(real_id)
        apdl.e(*nodes)


def _h_cmblock(composite: str, _apdl: APDL) -> None:
    """``CMBLOCK`` — component (named node/element set) definition.

    For Phase 1 we recognise + skip them; named selections are a
    pre-processor convenience that don't affect the K matrix.  A
    follow-up PR can wire them into the Model's ``components`` table
    once we ship one.
    """
    _LOG.info("from_dat: CMBLOCK skipped (component definitions not modelled)")


def _h_e(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``E, n1, n2, …, n8`` — element creation under current TYPE/MAT/REAL."""
    nodes = [_to_int(a, params) for a in args if a.strip()]
    if not nodes:
        raise ValueError("E: no node numbers given")
    apdl.e(*nodes)


def _h_egen(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``EGEN, ITIME, NINC, IEL1, IEL2, IEINC, MINC, TINC, RINC, CINC, SINC, DX, DY, DZ``.

    Forms supported:

    * Single-source: ``EGEN, ITIME, NINC, IEL1`` — repeat element
      ``IEL1`` ``ITIME`` total times with successive node-id offsets of
      ``NINC``.
    * Range source: ``EGEN, ITIME, NINC, IEL1, IEL2, IEINC`` — repeat
      every element in ``IEL1..IEL2`` (stride ``IEINC``).
    * Auto-node-generation: when ``DX`` / ``DY`` / ``DZ`` (positional
      slots 12–14) are non-zero, EGEN also creates the offset nodes
      by translating the source-element nodes — VM25 uses
      ``EGEN,14,8,1,,,,,,,,4/14`` to lay down 14 axisymmetric
      elements + their nodes in one shot.
    """
    if len(args) < 3:
        raise ValueError(f"EGEN: expected at least 3 fields, got {len(args)}")
    itime = _to_int(args[0], params)
    ninc = _to_int(args[1], params)
    iel1 = _to_int(args[2], params)
    iel2 = _to_int(args[3], params) if len(args) > 3 and args[3].strip() else iel1
    ieinc = _to_int(args[4], params) if len(args) > 4 and args[4].strip() else 1

    # MAPDL: ``IEL1 = -k`` means "the k-th-most-recently-defined element".
    # The most common form is ``EGEN,...,-1`` (just-created element).
    if iel1 < 0 or iel2 < 0:
        existing = list(apdl.model.element_numbers)
        if iel1 < 0:
            if -iel1 > len(existing):
                _LOG.warning(
                    "EGEN: IEL1=%d but only %d elements exist; skipping", iel1, len(existing)
                )
                return
            iel1 = int(existing[iel1])
        if iel2 < 0:
            if -iel2 > len(existing):
                _LOG.warning(
                    "EGEN: IEL2=%d but only %d elements exist; skipping", iel2, len(existing)
                )
                return
            iel2 = int(existing[iel2])

    def _opt_float(idx: int) -> float:
        if len(args) > idx and args[idx].strip():
            try:
                return _to_float(args[idx], params)
            except (ValueError, TypeError):
                return 0.0
        return 0.0

    # MAPDL EGEN's spatial-increment slots have shifted between
    # versions: pre-2020 decks (the entire VM corpus) put DX/DY/DZ
    # at indices 10/11/12, post-2020 inserts SINC ahead of them at
    # index 10 so DX/DY/DZ live at 11/12/13.  Choose by total
    # field count (≤13 → old, 14 → new).
    if len(args) <= 13:
        dx = _opt_float(10)
        dy = _opt_float(11)
        dz = _opt_float(12)
    else:
        dx = _opt_float(11)
        dy = _opt_float(12)
        dz = _opt_float(13)
    auto_nodes = (dx != 0.0) or (dy != 0.0) or (dz != 0.0)

    model = apdl.model
    src_ids = list(range(iel1, iel2 + 1, max(ieinc, 1)))

    # Pre-collect source elements + (optionally) source-node coords.
    src_info: list[tuple[list[int], dict[int, tuple[float, float, float]]]] = []
    for src_id in src_ids:
        nns_src, _et, _mat, _real = model.element_info(src_id)
        src_nodes = [int(n) for n in nns_src]
        coords: dict[int, tuple[float, float, float]] = {}
        if auto_nodes:
            for nn in src_nodes:
                try:
                    p = model.node_coord(nn)
                    coords[nn] = (float(p[0]), float(p[1]), float(p[2]))
                except KeyError:
                    pass
        src_info.append((src_nodes, coords))

    for k in range(1, itime):
        for src_nodes, coords in src_info:
            if auto_nodes:
                for nn, (x, y, z) in coords.items():
                    new_id = nn + k * ninc
                    apdl.n(new_id, x + k * dx, y + k * dy, z + k * dz)
            nodes_k = [n + k * ninc for n in src_nodes]
            apdl.e(*nodes_k)


def _h_ngen(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``NGEN, ITIME, INC, NODE1, NODE2, NINC, DX, DY, DZ, SPACE``.

    Generate ``ITIME-1`` additional sets of nodes by copying the
    ``NODE1..NODE2`` range (stride ``NINC``, default 1) with each
    successive set offset by ``INC`` in node-id and ``(DX, DY, DZ)``
    in space.  ``SPACE`` is the geometric scaling factor (Phase 1
    treats it as 1.0 — only multiplicative-factor support is rare in
    the corpus and arrives separately).

    The new node positions are read from the source nodes' current
    coordinates; we round-trip through ``apdl.n`` so book-keeping
    stays consistent with the rest of the parser.
    """
    if len(args) < 1:
        return
    itime = max(1, _to_int(args[0], params))
    inc = _to_int(args[1], params) if len(args) > 1 and args[1].strip() else 1
    node1 = _to_int(args[2], params) if len(args) > 2 and args[2].strip() else 1
    node2 = _to_int(args[3], params) if len(args) > 3 and args[3].strip() else node1
    ninc = _to_int(args[4], params) if len(args) > 4 and args[4].strip() else 1
    dx = _to_float(args[5], params) if len(args) > 5 and args[5].strip() else 0.0
    dy = _to_float(args[6], params) if len(args) > 6 and args[6].strip() else 0.0
    dz = _to_float(args[7], params) if len(args) > 7 and args[7].strip() else 0.0

    # Read existing node coords from BOTH the staged-nodes dict
    # AND the already-baked grid.  Node materialisation can happen
    # mid-parse when downstream verbs trigger a grid rebuild
    # (``E`` after ``N`` calls clears ``_staged_nodes`` into the
    # grid), so the source-set we need to copy lives in two places.
    model = apdl._model  # noqa: SLF001
    nodes: dict[int, tuple[float, float, float]] = {}
    if model._grid.n_points > 0:
        import numpy as np  # noqa: PLC0415

        pts = np.asarray(model._grid.points, dtype=np.float64)
        nums = np.asarray(model._grid.point_data["ansys_node_num"], dtype=np.int32)
        for nn, pt in zip(nums, pts, strict=False):
            nodes[int(nn)] = (float(pt[0]), float(pt[1]), float(pt[2]))
    nodes.update(getattr(model, "_staged_nodes", {}))
    if not nodes:
        return

    if ninc <= 0:
        ninc = 1
    src_ids = list(range(node1, node2 + 1, ninc))
    for set_idx in range(1, itime):
        for src in src_ids:
            if src not in nodes:
                continue
            x, y, z = nodes[src]
            new_id = src + inc * set_idx
            apdl.n(new_id, x + dx * set_idx, y + dy * set_idx, z + dz * set_idx)


def _h_fill(args: list[str], apdl: APDL, params: _ParamTable, history: _NodeHistory) -> None:
    """``FILL, NODE1, NODE2, NFILL, NSTRT, NINC, ITIME, INC, SPACE``.

    Forms the verbatim VM corpus uses:

    * ``FILL, n1, n2`` — explicit endpoints, default fill count
      (``abs(n2-n1)-1``) between consecutive node numbers.
    * ``FILL`` (no args) — endpoints default to the last two ``N``
      calls.
    * ``FILL, n1, n2, NFILL, NSTRT, NINC`` — explicit fill count with
      an arbitrary starting node number / stride (used heavily by the
      VM decks; e.g. ``FILL,1,12,1,23`` puts a single midpoint at
      node 23).

    ``ITIME`` (repetition count) and ``SPACE`` (non-uniform spacing
    bias) are not yet supported — repetition would need
    explicit endpoints per iteration which the verb doesn't provide,
    and non-unit ``SPACE`` is an exotic feature no observed deck uses.
    """
    have_n1 = len(args) > 0 and args[0].strip()
    have_n2 = len(args) > 1 and args[1].strip()
    if not have_n1 and not have_n2:
        if history.prev is None or history.last is None:
            raise ValueError("FILL with no arguments requires two prior N commands; none on record")
        n1, n2 = history.prev, history.last
    elif have_n1 and have_n2:
        n1 = _to_int(args[0], params)
        n2 = _to_int(args[1], params)
    else:
        raise ValueError(f"FILL: pass either both NODE1+NODE2 or neither (got args={args!r})")

    span = abs(n2 - n1)

    n_fill: int
    if len(args) > 2 and args[2].strip():
        try:
            n_fill = max(int(_to_int(args[2], params)), 0)
        except ValueError:
            n_fill = max(span - 1, 0)
    else:
        n_fill = max(span - 1, 0)
    if n_fill == 0:
        return

    if len(args) > 3 and args[3].strip():
        try:
            n_start = int(_to_int(args[3], params))
        except ValueError:
            n_start = min(n1, n2) + 1
    else:
        n_start = min(n1, n2) + 1

    if len(args) > 4 and args[4].strip():
        try:
            n_inc = int(_to_int(args[4], params))
        except ValueError:
            n_inc = 1
    else:
        n_inc = 1

    # ``SPACE`` (8th positional, index 7) is the ratio of the last
    # spacing to the first.  ``SPACE > 1`` clusters nodes near
    # ``n1``; ``SPACE < 1`` clusters near ``n2``; defaults to 1
    # (uniform spacing).  vm32 uses ``FILL,,,,,,,,2`` to bias the
    # axisym mesh toward the centerline.
    space = 1.0
    if len(args) > 7 and args[7].strip():
        try:
            space = float(_to_int(args[7], params))
        except (ValueError, TypeError):
            try:
                space = float(args[7])
            except ValueError:
                space = 1.0

    model = apdl.model
    p1 = model.node_coord(n1)
    p2 = model.node_coord(n2)

    # Compute the cumulative parametric positions ``t_k ∈ (0, 1)`` for
    # the fill nodes.  Uniform: ``t_k = k / (n_fill + 1)``.  Geometric
    # bias with ratio ``r`` such that the segment-length ratio
    # last/first equals ``space``: ``r = space ** (1 / n_fill)``.
    if abs(space - 1.0) < 1e-12 or n_fill == 0:
        ts = [k / (n_fill + 1) for k in range(1, n_fill + 1)]
    else:
        r = space ** (1.0 / n_fill)
        # Sum of n_fill+1 segments in geometric progression:
        #   total = a * (r^(n_fill+1) - 1) / (r - 1) = 1 (parametric)
        a = (r - 1.0) / (r ** (n_fill + 1) - 1.0)
        ts = []
        cum = 0.0
        for k in range(1, n_fill + 1):
            cum += a * r ** (k - 1)
            ts.append(cum)
    for k, t in enumerate(ts, start=1):
        x = (1.0 - t) * p1[0] + t * p2[0]
        y = (1.0 - t) * p1[1] + t * p2[1]
        z = (1.0 - t) * p1[2] + t * p2[2]
        apdl.n(n_start + (k - 1) * n_inc, x, y, z)


def _h_et(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``ET, ET_ID, NAME_OR_NUM, KOP1, KOP2, KOP3, ...`` — declare an element type.

    ``NAME_OR_NUM`` may be either the MAPDL element-name keyword
    (``LINK180``) or its numeric type code (``180``).  Numeric codes
    are translated through the same table the CDB reader uses.
    Trailing positional ``KOP1..KOPN`` slots are forwarded to
    ``APDL.et`` and through to ``KEYOPT`` mapping.
    """
    if len(args) < 2:
        raise ValueError(f"ET: expected at least 2 fields, got {len(args)}")
    et_id = _to_int(args[0], params)
    name_or_num = args[1].strip().upper()

    # Parse trailing KEYOPT slots — empty fields stay None so ``APDL.et``
    # records them as the MAPDL default (KEYOPT = 0).
    kops: list[int | None] = []
    for raw in args[2:]:
        if not raw or not raw.strip():
            kops.append(None)
            continue
        try:
            kops.append(_to_int(raw, params))
        except ValueError:
            kops.append(None)

    # If the second arg parses as an integer, treat it as a MAPDL type
    # number (e.g. ``ET,1,180`` ≡ ``ET,1,LINK180``).
    try:
        num = int(name_or_num)
        from femorph_solver.mapdl_api.cdb import _MAPDL_NUM_TO_NAME

        neutral = _MAPDL_NUM_TO_NAME.get(num)
        if neutral is None:
            _LOG.warning("ET: unrecognised MAPDL element-type number %d; skipping", num)
            return
        apdl.et(et_id, neutral, *kops)
    except ValueError:
        # Not numeric — pass the keyword through (APDL.et translates).
        apdl.et(et_id, name_or_num, *kops)


def _h_type(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    apdl.type(_to_int(args[0], params))


def _h_real(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    apdl.real(_to_int(args[0], params))


def _h_mat(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    apdl.mat(_to_int(args[0], params))


# MAPDL ships two interchangeable spellings for Poisson's ratio:
# ``PRXY`` (preferred since 2010+ docs) and ``NUXY`` (older, still
# used verbatim in the bundled VM corpus).  Translate the foreign
# spelling at the deck-reader boundary so the canonical material
# property table stays single-spelling.
_MP_PROP_ALIASES: dict[str, str] = {"NUXY": "PRXY"}


def _h_mp(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``MP, PROP, MAT, VALUE`` - material property.

    The MAT field is optional - when blank MAPDL defaults to the
    currently active material (set by ``MAT, n``); if no MAT command
    has been issued the active material is 1.  vm16 / vm10 use the
    blank-MAT form (``MP, PRXY,, 0.3``) and exercise this path.
    """
    if len(args) < 3:
        raise ValueError(f"MP: expected 3 fields, got {len(args)}")
    prop = args[0].strip().upper()
    prop = _MP_PROP_ALIASES.get(prop, prop)
    if not args[1].strip():
        mat_id = int(getattr(apdl._model, "_current_mat", 1) or 1)
    else:
        mat_id = _to_int(args[1], params)
    value = _to_float(args[2], params)
    apdl.mp(prop, mat_id, value)


def _h_r(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``R, REAL_ID, V1, V2, …`` — real constants.

    Empty positional slots are preserved as zeros so MAPDL's
    skip-leading-fields convention round-trips correctly: vm48 ships
    ``R, 2, , , , 31E-3`` to put rotary inertia at slot 3 (IXX); if
    the empty fields are filtered out, the 31E-3 lands at slot 0
    (MASSX) and the modal solve hits a singular mass matrix.
    """
    real_id = _to_int(args[0], params)
    values: list[float] = []
    # Drop trailing empties so a deck that only sets the first few
    # slots doesn't bloat the real-constant array; preserve interior
    # empties as 0.0 so positional indexing is honoured.
    raw = list(args[1:])
    while raw and not raw[-1].strip():
        raw.pop()
    for a in raw:
        if a.strip():
            values.append(_to_float(a, params))
        else:
            values.append(0.0)

    # Element-specific R-format translation: PIPE18 / PIPE16 / PIPE17
    # used a 3-value real layout ``(OD, wall, radius)`` that maps to
    # the BEAM2 (A, IZZ, IYY, J) tuple via thin-tube algebra.  vm18's
    # first analysis ships ``ET,1,PIPE18`` + ``R,1,2,1,100``.
    mapdl_name = getattr(apdl, "_mapdl_et_name", {}).get(real_id)
    if mapdl_name in ("PIPE16", "PIPE17", "PIPE18") and len(values) >= 2:
        od = float(values[0])
        wall = float(values[1])
        if od > 0.0 and wall > 0.0 and 2.0 * wall <= od:
            import math  # noqa: PLC0415

            ro = od / 2.0
            ri = ro - wall
            A = math.pi * (ro * ro - ri * ri)
            Iz = math.pi * (ro**4 - ri**4) / 4.0
            apdl.r(real_id, A, Iz, Iz, 2.0 * Iz)
            return

    apdl.r(real_id, *values)
    # Track the most-recently-defined real id so a subsequent RMORE
    # (with no id of its own) can pick up where R left off.  See
    # ``_h_rmore``.
    apdl.model._last_r_id = int(real_id)  # type: ignore[attr-defined]


def _h_rmore(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``RMORE, V7, V8, ..., V12`` - extend the active real-constants array.

    MAPDL ``R, id, V1..V6`` fills the first 6 slots; each subsequent
    ``RMORE`` advances by 6 slots and fills whatever values are
    supplied.  Empty fields become 0.0 (so positional indexing is
    preserved); a bare ``RMORE`` with no values still advances 6
    slots (writing zeros).

    The active real id is whichever ``R, id`` last ran - tracked on
    the model as ``_last_r_id``.  vm23 / vm29 / vm38 use this form
    for contact / surface-element real-constant tables that exceed
    six slots.
    """
    real_id = getattr(apdl.model, "_last_r_id", None)
    if real_id is None:
        # No prior R - silently no-op rather than raising.  The deck-
        # trace records RMORE as handled, so the gap surfaces only if
        # downstream code reads a slot that was supposed to be set.
        return
    import numpy as np  # noqa: PLC0415

    existing = apdl.model._real_constants.get(int(real_id), np.empty(0, dtype=np.float64))
    existing = list(np.asarray(existing, dtype=np.float64))
    # The R handler trims trailing empty fields to keep arrays
    # compact; pad the existing array up to the next 6-slot boundary
    # so RMORE writes into slot 6 / 12 / 18 / ... regardless of how
    # many trailing zeros R dropped.  This matches MAPDL's slot
    # semantics: R always conceptually fills 6 slots, even if some
    # are zero, and RMORE's first value lands at slot 7 (1-based).
    n_existing = len(existing)
    # Round up to the next multiple of 6 (zero-pad).
    aligned_target = ((n_existing + 5) // 6) * 6 if n_existing > 0 else 6
    if aligned_target > n_existing:
        existing.extend([0.0] * (aligned_target - n_existing))
    # Decode RMORE's positional args: empties become zeros, trailing
    # values are appended in order.  RMORE always advances 6 slots
    # regardless of how many args were supplied.
    chunk: list[float] = []
    for a in args[:6]:
        if a.strip():
            chunk.append(_to_float(a, params))
        else:
            chunk.append(0.0)
    while len(chunk) < 6:
        chunk.append(0.0)
    new_reals = existing + chunk
    apdl.model._real_constants[int(real_id)] = np.asarray(new_reals, dtype=np.float64)
    apdl.model._invalidate_matrix_cache()


def _h_d(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``D, NODE, LAB, VALUE, VALUE2, NEND, NINC, LAB2, ..., LAB6``.

    Field map (positional, indexed from ``args[0] = NODE``):

    * ``args[0]`` NODE — integer node ID, or ``ALL`` (apply to every node)
    * ``args[1]`` Lab  — DOF label or ``ALL`` (apply to every DOF the kernel exposes)
    * ``args[2]`` VALUE
    * ``args[3]`` VALUE2 (imaginary component, harmonic only — not honoured)
    * ``args[4]`` NEND, ``args[5]`` NINC — repeat the constraint over a node range
    * ``args[6..10]`` Lab2..Lab6 — apply additional DOF labels at the same value

    The ``NODE = ALL`` form is what the verbatim VM corpus uses to pin
    out-of-plane DOFs at every node in one statement (vm10 ships
    ``D,ALL,UZ,,,,,ROTX,ROTY``).  ``LAB = ALL`` is already routed
    through ``apdl.d`` which expands to every DOF the model exposes.
    """
    if len(args) < 2:
        raise ValueError(f"D: expected at least 2 fields, got {len(args)}")
    lab = args[1].strip().upper()
    value = _to_float(args[2], params) if len(args) > 2 and args[2].strip() else 0.0
    # VALUE2 (args[3]) is the imaginary component for harmonic — not honoured.
    extra_labs = [a.strip().upper() for a in args[6:11] if a.strip()]
    labs = [lab, *extra_labs]

    if args[0].strip().upper() == "ALL":
        # Apply to every node in the active NSEL selection.  When no
        # selection is active the APDL shim returns the full node list,
        # so ``D,ALL`` defaults to "every node" — matching MAPDL
        # behaviour and the existing structural deck-author intent.
        nodes = apdl._iter_selected_nodes()  # noqa: SLF001
        for nid in nodes:
            for one_lab in labs:
                apdl.d(int(nid), one_lab, value)
        return

    node = _to_int(args[0], params)
    n_end = _to_int(args[4], params) if len(args) > 4 and args[4].strip() else node
    n_inc = _to_int(args[5], params) if len(args) > 5 and args[5].strip() else 1
    if n_end == node:
        for one_lab in labs:
            apdl.d(node, one_lab, value)
        return
    step = n_inc if n_end > node else -n_inc
    n = node
    while (step > 0 and n <= n_end) or (step < 0 and n >= n_end):
        for one_lab in labs:
            apdl.d(n, one_lab, value)
        n += step


def _h_save(_args: list[str], apdl: APDL, _params: _ParamTable) -> None:
    """``SAVE`` — snapshot the current model state for a later RESUME.

    MAPDL persists the entire database to disk; we hold the snapshot
    in memory on the APDL shim instance.  Multi-prep VM decks
    (vm33: SAVE → /CLEAR → RESUME → ET,1,SOLID226) rely on this
    round-trip so the second analysis can re-use the first
    analysis's mesh under a different element type.
    """
    import copy  # noqa: PLC0415

    apdl._save_snapshot = (  # type: ignore[attr-defined]  # noqa: SLF001
        copy.deepcopy(apdl.model._etypes),
        copy.deepcopy(dict(apdl.model._materials)),
        copy.deepcopy(dict(apdl.model._real_constants)),
        copy.deepcopy(dict(apdl.model._staged_nodes)),
        copy.deepcopy(dict(apdl.model._staged_elements)),
        apdl.model._grid.copy(deep=True) if apdl.model._grid.n_points > 0 else None,
        copy.deepcopy(getattr(apdl, "_mapdl_et_name", {})),
    )


def _h_resume(_args: list[str], apdl: APDL, _params: _ParamTable) -> None:
    """``RESUME`` — restore the most recent SAVE snapshot.

    No-op when no SAVE has been called.  Restores etypes, materials,
    real constants, staged nodes / elements, the grid, and the
    parser-side ``_mapdl_et_name`` mapping.
    """
    snap = getattr(apdl, "_save_snapshot", None)
    if snap is None:
        return
    etypes, materials, reals, staged_nodes, staged_elements, grid, et_names = snap
    model = apdl.model
    model._etypes.clear()
    model._etypes.update(etypes)
    model._materials.clear()
    model._materials.update(materials)
    model._real_constants.clear()
    model._real_constants.update(reals)
    model._staged_nodes.clear()
    model._staged_nodes.update(staged_nodes)
    model._staged_elements.clear()
    model._staged_elements.update(staged_elements)
    if grid is not None:
        model._grid = grid
    apdl._mapdl_et_name.clear()
    apdl._mapdl_et_name.update(et_names)
    model._invalidate_topology_caches()
    model._dirty = True


def _h_rmodif(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``RMODIF, NSET, KEY, V1, V2, ...`` — modify individual real-constant slots.

    ``vm41`` uses RMODIF to populate the upper-triangular slots of a
    MATRIX27 element-stiffness matrix one entry at a time::

        R,1                  ! initialise real-set 1 (no values yet)
        RMODIF,1,51,10000    ! set slot 51 (matrix entry A_{10,6})

    Phase 1 supports the single-key form (``RMODIF,N,KEY,VALUE``);
    multi-value variants are tracked as a follow-up.
    """
    if len(args) < 3:
        return
    nset = _to_int(args[0], params)
    key = _to_int(args[1], params)
    value = _to_float(args[2], params)
    # Read existing real-set, expand if needed, set slot, push back.
    existing = apdl.model.real_constants.get(int(nset))
    if existing is None:
        existing = np.zeros(0, dtype=np.float64)
    n_needed = key
    if existing.size < n_needed:
        new = np.zeros(n_needed, dtype=np.float64)
        if existing.size:
            new[: existing.size] = np.asarray(existing, dtype=np.float64)
        existing = new
    existing[key - 1] = float(value)
    apdl.r(int(nset), *existing.tolist())


def _h_secnum(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``SECNUM, SECID`` — set the active section number.

    For BEAM / PIPE / SHELL elements MAPDL uses SECNUM to associate the
    next-emitted element with a section (and hence its derived real-
    constant set, populated via SECDATA).  We treat SECNUM the same as
    REAL — both drive ``model._current_real`` since the SECDATA-derived
    properties land at ``real_constants[secid]``.
    """
    if not args or not args[0].strip():
        return
    apdl.real(_to_int(args[0], params))


def _h_ddele(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``DDELE, NODE, Lab, NEND, NINC`` — release a previously-applied D BC.

    Forms supported:

    * ``DDELE, NODE, Lab`` — release single node + label.
    * ``DDELE, ALL, Lab`` — release every node in the active selection.
    * ``DDELE, NODE, ALL`` — release every DOF on a single node.
    * ``DDELE, n1, Lab, n2, ninc`` — release across the node range
      ``n1..n2`` with stride ``ninc`` (default 1).
    """
    if not args:
        return
    label = args[1].strip().upper() if len(args) > 1 and args[1].strip() else "ALL"
    if args[0].strip().upper() == "ALL":
        apdl.ddele("ALL", label)
        return
    n_start = _to_int(args[0], params)
    n_end = _to_int(args[2], params) if len(args) > 2 and args[2].strip() else n_start
    n_inc = _to_int(args[3], params) if len(args) > 3 and args[3].strip() else 1
    if n_end == n_start:
        apdl.ddele(n_start, label)
        return
    step = n_inc if n_end > n_start else -n_inc
    n = n_start
    while (step > 0 and n <= n_end) or (step < 0 and n >= n_end):
        apdl.ddele(n, label)
        n += step


def _h_f(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``F, NODE, LAB, VALUE[, VALUE2[, NEND[, NINC]]]`` - applied force.

    Field map:
    * ``args[0]``  NODE  - first node to apply at
    * ``args[1]``  Lab   - DOF label (FX / FY / FZ / MX / MY / MZ)
    * ``args[2]``  VALUE - force / moment magnitude (blank -> 0,
                          equivalent to MAPDL's "remove force" form)
    * ``args[3]``  VALUE2 - imaginary component (harmonic only; not honoured)
    * ``args[4]``  NEND  - if set, apply over the node range NODE..NEND
    * ``args[5]``  NINC  - step size for the NODE..NEND iteration

    vm16 case 2 uses ``F,6,FX,,,16,10`` (clear FX at nodes 6 and 16)
    and ``F,6,FY,150,,16,10`` (apply FY=150 at nodes 6 and 16).
    """
    if len(args) < 2:
        raise ValueError(f"F: expected at least 2 fields, got {len(args)}")
    node = _to_int(args[0], params)
    lab = args[1].strip().upper()
    # Blank VALUE means "set to zero" (MAPDL's force-remove form).
    val_str = args[2].strip() if len(args) > 2 else ""
    value = _to_float(val_str, params) if val_str else 0.0
    nend_str = args[4].strip() if len(args) > 4 else ""
    ninc_str = args[5].strip() if len(args) > 5 else ""
    if nend_str:
        try:
            nend = _to_int(nend_str, params)
            ninc = _to_int(ninc_str, params) if ninc_str else 1
        except (ValueError, TypeError):
            nend, ninc = node, 1
        ninc = max(int(ninc), 1)
        for n in range(int(node), int(nend) + 1, ninc):
            apdl.f(int(n), lab, value)
    else:
        apdl.f(int(node), lab, value)


_MAPDL_BEAM_FACE_TO_KERNEL: dict[int, int] = {1: 4, 2: 3, 3: 1, 4: 2}
"""MAPDL BEAM188 face id -> solver-native Beam2 face id.

MAPDL BEAM188 face 1 is the -z surface (a positive PRES pushes the
beam in -z); the Beam2 kernel uses face 4 for the same direction.
The remap lives at this dat-loader boundary so downstream code -
including the programmatic ``APDL.sfbeam`` API - is solver-native.
"""


def _h_sfbeam(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``SFBEAM, Elem, LKEY, Lab, VAL1`` - distributed beam surface load.

    ``Elem='ALL'`` applies the load to every element in the active
    ESEL selection (vm21 form); a numeric ``Elem`` targets one
    element directly.  Phase 1 honours ``Lab='PRES'``.

    The MAPDL ``LKEY`` face number (1-4) is remapped to the solver-
    native Beam2 face convention here; the programmatic
    ``APDL.sfbeam`` reads kernel-native face ids directly.
    """
    if len(args) < 4:
        return
    elem_arg = args[0].strip().upper()
    try:
        mapdl_face = _to_int(args[1], params)
    except ValueError:
        return
    kernel_face = _MAPDL_BEAM_FACE_TO_KERNEL.get(mapdl_face, mapdl_face)
    label = args[2].strip().upper()
    val_str = args[3].strip()
    if not val_str:
        return
    try:
        value = _to_float(val_str, params)
    except ValueError:
        return
    if elem_arg == "ALL":
        elements = list(apdl._iter_selected_elements())  # noqa: SLF001
    else:
        try:
            elements = [_to_int(elem_arg, params)]
        except ValueError:
            return
    for e in elements:
        apdl.sfbeam(int(e), kernel_face, label, value)


def _h_dk(args: list[str], apdl: APDL, params: _ParamTable, geom: _GeometryBuilder) -> None:
    """``DK, KPOI, Lab[, VAL][, VAL2][, KEXPND][, Lab2..6]`` - keypoint constraint.

    Resolves the keypoint to its meshed node (the LMESH driver places
    a node at every keypoint coord) and forwards each label through
    ``APDL.d``.  ``KPOI = 'ALL'`` constrains every defined keypoint.
    Up to six DOF labels may be packed into one DK call (Lab + Lab2..6).
    """
    if len(args) < 2:
        return
    kpoi = args[0].strip().upper()
    if kpoi == "ALL":
        keypoints = list(geom.keypoints)
    else:
        try:
            keypoints = [_to_int(kpoi, params)]
        except (ValueError, TypeError):
            return
    primary = args[1].strip().upper()
    val = _to_float(args[2], params) if len(args) > 2 and args[2].strip() else 0.0
    # Lab2..6 sit in slots 5, 6, 7, 8, 9 (1-based 6..10) - i.e. positions
    # 5..9 of args[]; the in-between slots (VAL2, KEXPND) are skipped.
    extra_labels = [a.strip().upper() for a in args[5:10] if a and a.strip()]
    labels = [primary, *extra_labels]
    # Find the model node closest to each keypoint coord.  K/L/LMESH
    # places the keypoint endpoints as the *first two* generated nodes
    # of each line, so coordinate matching is exact.
    import numpy as np  # noqa: PLC0415

    if apdl.model.n_nodes == 0:
        return
    pts = np.asarray(apdl.model.grid.points, dtype=np.float64)
    nums = np.asarray(apdl.model.grid.point_data["ansys_node_num"], dtype=np.int64)
    for kpt in keypoints:
        coord = geom.keypoints.get(int(kpt))
        if coord is None:
            continue
        target = np.array(coord, dtype=np.float64)
        d = np.linalg.norm(pts - target, axis=1)
        idx = int(np.argmin(d))
        if d[idx] > 1e-8 * (1.0 + np.linalg.norm(target)):
            continue  # no matching node - skip rather than constrain wrong one
        node_id = int(nums[idx])
        for lab in labels:
            apdl.d(node_id, lab, val)


def _h_dsym(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``DSYM, Lab, Normal[, KCN]`` - symmetry / antisymmetry constraint.

    Acts on the active NSEL selection.  ``Lab='SYMM'`` constrains the
    face-normal translation + two in-plane rotations; ``Lab='ASYM'``
    constrains the two in-plane translations + the out-of-plane
    rotation.  ``Normal`` is X/Y/Z of the coordinate system ``KCN``
    (default 0 = global).
    """
    if len(args) < 2:
        return
    lab = args[0].strip().upper()
    normal = args[1].strip().upper()
    kcn = _to_int(args[2], params) if len(args) > 2 and args[2].strip() else 0
    apdl.dsym(lab, normal, kcn)


def _h_cp(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``CP, SET_ID, Lab, NODE1, NODE2, ...`` - couple shared DOF across nodes."""
    if len(args) < 3:
        raise ValueError(f"CP: expected at least 3 fields, got {len(args)}")
    set_id = _to_int(args[0], params)
    lab = args[1].strip().upper()
    nodes = [_to_int(a, params) for a in args[2:] if a.strip()]
    apdl.cp(set_id, lab, *nodes)


def _h_tref(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``TREF, TREF`` - stress-free reference temperature for thermal pre-strain."""
    if not args:
        raise ValueError("TREF: expected reference temperature")
    apdl.tref(_to_float(args[0], params))


def _h_bfunif(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``BFUNIF, LAB, VALUE`` - uniform body force (TEMP for thermal pre-strain)."""
    if len(args) < 2:
        raise ValueError(f"BFUNIF: expected 2 fields, got {len(args)}")
    apdl.bfunif(args[0].strip().upper(), _to_float(args[1], params))


def _h_bf(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``BF, NODE, LAB, VALUE`` - per-node body force (TEMP for thermal pre-strain)."""
    if len(args) < 3:
        raise ValueError(f"BF: expected 3 fields, got {len(args)}")
    apdl.bf(_to_int(args[0], params), args[1].strip().upper(), _to_float(args[2], params))


def _h_nsel(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``NSEL, Type, Item, Comp, VMIN, VMAX, VINC, KABS``.

    Routes to ``apdl.nsel`` so the APDL-shim's selection state stays
    authoritative.  Only the minimum form the VM corpus uses
    (S/R/A/U/ALL/NONE/INVE × LOC/NODE) is wired up; unsupported items
    fall through to ``apdl.nsel`` itself which raises a clear error.
    """
    type_ = (args[0].strip().upper() if args else "S") or "S"
    if type_ in {"ALL", "NONE", "INVE"}:
        apdl.nsel(type_)
        return
    item = (args[1].strip().upper() if len(args) > 1 and args[1].strip() else "NODE") or "NODE"
    comp = args[2].strip().upper() if len(args) > 2 and args[2].strip() else None

    def _opt_field(idx: int) -> float | None:
        if len(args) > idx and args[idx].strip():
            try:
                return _to_float(args[idx], params)
            except (ValueError, TypeError):
                return None
        return None

    vmin = _opt_field(3)
    vmax = _opt_field(4)
    vinc_f = _opt_field(5)
    vinc = int(vinc_f) if vinc_f is not None else None
    # MAPDL convention: ``NSEL,S,LOC,X`` (no VMIN) defaults VMIN to 0
    # — the deck author's "select nodes at the X = 0 face" idiom.
    # vm100 ships ``NSEL,S,LOC,X`` then ``SF,ALL,CONV,12,100`` to put
    # the convection on the inner-radius surface.
    if vmin is None and item == "LOC":
        vmin = 0.0
    apdl.nsel(type_, item, comp, vmin, vmax, vinc)


def _h_allsel(args: list[str], apdl: APDL, params: _ParamTable) -> None:  # noqa: ARG001
    """``ALLSEL`` / ``NALL`` — drop every active selection."""
    apdl.nsel("ALL")


def _h_sf(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``SF, Nlist, Lab, VAL1, VAL2`` — surface load on a node-set.

    Currently honours the thermal-pillar forms:

    * ``SF, ALL, CONV, h, T_inf`` — surface convection on every face
      whose endpoint nodes are entirely in the active selection.
      Stored on the model and consumed by the steady-state thermal
      assembler.
    * ``SF, ALL, HFLUX, q``       — applied surface heat flux.

    ``Nlist=ALL`` consumes the active ``NSEL`` selection (the form the
    VM corpus uses); explicit node lists aren't supported here yet
    (``apdl.sf`` would handle them, but the deck grammar in the corpus
    routes through NSEL+ALL exclusively).
    """
    if len(args) < 2:
        return
    nlist = args[0].strip().upper() or "ALL"
    lab = args[1].strip().upper()
    val1 = _to_float(args[2], params) if len(args) > 2 and args[2].strip() else 0.0
    val2 = _to_float(args[3], params) if len(args) > 3 and args[3].strip() else 0.0
    if nlist != "ALL":
        # Explicit node-list form — fall back to letting the APDL shim
        # handle it (will error today, surfacing the gap clearly).
        try:
            node = _to_int(args[0], params)
        except ValueError:
            return
        apdl.sf([node], lab, val1, val2)
        return
    nodes = apdl._iter_selected_nodes()  # noqa: SLF001
    apdl.sf(nodes, lab, val1, val2)


def _h_esel(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``ESEL, Action, Item, Comp, VMIN, VMAX, VINC`` - element selection.

    Currently honoured forms:
    * ``ESEL,ALL`` - clear selection.
    * ``ESEL,S,ELEM,,vmin[,vmax[,vinc]]`` - select element-id range.
    * ``ESEL,A,ELEM,,vmin[,vmax[,vinc]]`` - add element-id range.
    """
    action = args[0].strip().upper() if args else "S"
    if action == "ALL":
        apdl.esel("ALL")
        return
    item = args[1].strip() if len(args) > 1 else ""
    comp = args[2].strip() if len(args) > 2 else ""
    vmin = args[3].strip() if len(args) > 3 else ""
    vmax = args[4].strip() if len(args) > 4 else ""
    vinc = args[5].strip() if len(args) > 5 else ""
    try:
        vmin_i = _to_int(vmin, params) if vmin else 1
        vmax_i = _to_int(vmax, params) if vmax else vmin_i
        vinc_i = _to_int(vinc, params) if vinc else 1
    except (ValueError, TypeError):
        return
    apdl.esel(action, item, comp, vmin_i, vmax_i, vinc_i)


def _h_bfe(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``BFE, Elem, Lab, STLOC, VAL1[, VAL2[, VAL3[, VAL4]]]``.

    Element-level body force.  Forms supported:

    * ``BFE,ALL,HGEN,STLOC,VAL`` - apply heat generation ``VAL`` to all
      elements in the active ``ESEL`` selection.
    * ``BFE,<elem_id>,HGEN,STLOC,VAL`` - apply to one explicit element.

    ``STLOC`` is recorded for legacy parity but ignored by the
    thermal-load assembler today (VM corpus only uses STLOC=0 / blank).
    """
    if len(args) < 4:
        return
    elem_arg = args[0].strip().upper()
    lab = args[1].strip().upper()
    stloc_str = args[2].strip()
    val1_str = args[3].strip()
    if not val1_str:
        return
    try:
        stloc = _to_int(stloc_str, params) if stloc_str else 0
        val1 = _to_float(val1_str, params)
    except (ValueError, TypeError):
        return
    if elem_arg == "ALL":
        elements = apdl._iter_selected_elements()  # noqa: SLF001
    else:
        try:
            elements = [_to_int(elem_arg, params)]
        except (ValueError, TypeError):
            return
    apdl.bfe(elements, lab, stloc, val1)


class _SectionState:  # noqa: PLR0903 — light scratch container, not a real class
    """Sticky state between ``SECTYPE`` and the following ``SECDATA``.

    APDL's deck grammar pairs them: a ``SECTYPE`` line opens a section
    record, the next ``SECDATA`` (often the very next line) supplies
    the geometric values.  The reader keeps the open record here so
    handlers stay pure-functional (no ``Model``-side scratch attributes).
    """

    __slots__ = ("active_id", "active_subtype", "active_type")

    def __init__(self) -> None:
        self.active_id: int | None = None
        self.active_type: str = ""
        self.active_subtype: str = ""


class _GeometryBuilder:
    """1D geometry-builder state for ``K`` / ``L`` / ``LESIZE`` / ``LMESH``.

    Decks like vm50, vm51, vm57, vm60, vm61 build a single line between
    two keypoints, set the number of divisions with ``LESIZE``, and let
    ``LMESH`` create the nodes + elements.  The reader tracks each piece
    here and materialises nodes / elements on ``LMESH``.

    Only the 1D-line subset is implemented.  ``A`` / ``V`` (areas /
    volumes) and ``AMESH`` / ``VMESH`` are still gaps.

    Attributes
    ----------
    keypoints : dict[int, tuple[float, float, float]]
        ``{kp_id: (x, y, z)}``.
    lines : dict[int, tuple[int, int]]
        ``{line_id: (kp_a, kp_b)}``.  Line ids are assigned in
        deck-order starting at 1 to match MAPDL's auto-numbering.
    next_line_id : int
        Next line id to assign.
    divisions : dict[int | str, int]
        Per-line division count from ``LESIZE,<line>,...``.  The key
        ``"ALL"`` is the default for ``LESIZE,ALL,...`` and applies to
        any line without an explicit override.
    """

    __slots__ = ("divisions", "keypoints", "lines", "next_line_id")

    def __init__(self) -> None:
        self.keypoints: dict[int, tuple[float, float, float]] = {}
        self.lines: dict[int, tuple[int, int]] = {}
        self.next_line_id: int = 1
        self.divisions: dict[int | str, int] = {}


def _h_k(args: list[str], apdl: APDL, params: _ParamTable, geom: _GeometryBuilder) -> None:
    """``K, n, x, y, z`` — define keypoint ``n`` at ``(x, y, z)``.

    Mirrors the keypoint into ``model.geometry`` (the new
    :class:`femorph_solver.geometry.GeometryRegistry`) so the registry
    reflects the deck's geometry alongside the legacy
    ``_GeometryBuilder`` dicts.  The legacy dicts remain the source of
    truth for the 1-D mesh path until PR-B migrates that path onto
    the registry.
    """
    if not args:
        return
    n = _to_int(args[0], params)
    x = _to_float(args[1], params) if len(args) > 1 and args[1].strip() else 0.0
    y = _to_float(args[2], params) if len(args) > 2 and args[2].strip() else 0.0
    z = _to_float(args[3], params) if len(args) > 3 and args[3].strip() else 0.0
    geom.keypoints[int(n)] = (float(x), float(y), float(z))
    apdl.model.geometry.point(x, y, z, id=int(n))


def _h_l(args: list[str], apdl: APDL, params: _ParamTable, geom: _GeometryBuilder) -> None:
    """``L, k1, k2`` — define a line between two keypoints.

    Returns the new line id implicitly via ``geom.next_line_id``;
    MAPDL itself auto-numbers the same way.  Mirrors the line into
    ``model.geometry`` so the registry reflects the deck's geometry
    alongside the legacy ``_GeometryBuilder`` dicts.
    """
    if len(args) < 2:
        return
    k1 = _to_int(args[0], params)
    k2 = _to_int(args[1], params)
    line_id = geom.next_line_id
    geom.lines[line_id] = (int(k1), int(k2))
    geom.next_line_id += 1
    # Line may reference a keypoint we haven't ingested into the
    # registry yet (the deck might use KGEN-generated keypoints,
    # which PR-B handles).  Skip the registry mirror in that case;
    # the legacy 1-D mesh path stays correct via geom.lines.
    import contextlib  # noqa: PLC0415

    with contextlib.suppress(KeyError):
        apdl.model.geometry.line(int(k1), int(k2), id=int(line_id))


def _h_lesize(args: list[str], _apdl: APDL, params: _ParamTable, geom: _GeometryBuilder) -> None:
    """``LESIZE, NL1, SIZE, ANGSIZ, NDIV, ...`` — set divisions on line ``NL1``.

    Only the ``NDIV`` (4th positional) field is honoured today;
    ``SIZE`` (target element length) and the higher-order
    refinement fields would need geometry-aware sizing that 1D
    lines don't need.  ``NL1 = "ALL"`` applies to every line that
    doesn't have a per-line override.
    """
    if not args:
        return
    nl1 = args[0].strip().upper()
    ndiv = _to_int(args[3], params) if len(args) > 3 and args[3].strip() else None
    if ndiv is None or ndiv <= 0:
        # The line still parses for SIZE-only forms — record nothing
        # rather than dropping a stale entry.
        return
    if nl1 == "ALL":
        geom.divisions["ALL"] = int(ndiv)
        return
    try:
        line_id = int(_to_int(nl1, params))
    except (ValueError, TypeError):
        return
    geom.divisions[line_id] = int(ndiv)


def _h_esize(args: list[str], _apdl: APDL, params: _ParamTable, geom: _GeometryBuilder) -> None:
    """``ESIZE, SIZE, NDIV`` — global default element division count.

    The second positional ``NDIV`` is the per-line division count for
    any line without a per-line ``LESIZE`` override.  Stored under
    the same ``"ALL"`` key as ``LESIZE,ALL,,,N`` so the LMESH
    dispatch picks it up identically.  ``SIZE`` (target element
    length) isn't used for the 1D-line subset.
    """
    if not args:
        return
    ndiv = _to_int(args[1], params) if len(args) > 1 and args[1].strip() else None
    if ndiv is not None and ndiv > 0:
        geom.divisions["ALL"] = int(ndiv)


def _h_a(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``A, K1, K2, ..., KN`` - define an area by an ordered loop of keypoints.

    Constructs the bounding lines implicitly: for each consecutive
    keypoint pair (Ki, Ki+1) - and the closing pair (KN, K1) - we
    look for an existing line connecting them and create one if
    missing, then register an Area with that line loop.
    """
    if len(args) < 3:
        return
    kp_ids: list[int] = []
    for a in args:
        s = a.strip()
        if not s:
            continue
        try:
            kp_ids.append(_to_int(s, params))
        except (ValueError, TypeError):
            continue
    if len(kp_ids) < 3:
        return
    g = apdl.model.geometry
    # MAPDL allows trailing repeat (A,1,2,3,3 for a triangle in the
    # 4-arg slot-fill form); de-duplicate the closing point.
    while len(kp_ids) > 3 and kp_ids[-1] == kp_ids[-2]:
        kp_ids.pop()
    line_ids: list[int] = []
    for i in range(len(kp_ids)):
        a_id = kp_ids[i]
        b_id = kp_ids[(i + 1) % len(kp_ids)]
        found = None
        for ln in g.lines():
            if (ln.start_id, ln.end_id) == (a_id, b_id) or (
                ln.start_id,
                ln.end_id,
            ) == (b_id, a_id):
                found = ln.id
                break
        if found is None:
            try:
                ln_new = g.line(a_id, b_id)
            except KeyError:
                return  # missing keypoint; surface as a soft skip
            found = ln_new.id
        line_ids.append(found)
    g.area(*line_ids)


def _h_aesize(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``AESIZE, ANUM, SIZE`` - record per-area mesh-size hint."""
    if len(args) < 2:
        return
    anum_arg = args[0].strip().upper()
    try:
        size = _to_float(args[1], params)
    except (ValueError, TypeError):
        return
    g = apdl.model.geometry
    if anum_arg == "ALL":
        for area in g.areas():
            g.set_area_size(area, size)
    else:
        try:
            aid = _to_int(anum_arg, params)
        except (ValueError, TypeError):
            return
        import contextlib  # noqa: PLC0415

        with contextlib.suppress(KeyError):
            g.set_area_size(aid, size)


def _h_amesh(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``AMESH, NA1[, NA2[, NINC]]`` - mesh areas using the active TYPE's kernel.

    Pulls the active TYPE id from the model state and uses its
    solver-native kernel name as the meshing kernel.  When TYPE
    isn't yet set or doesn't map to a meshable kernel the call
    silently no-ops; the result-side comparison will surface the
    gap downstream.
    """
    if not args:
        return
    from femorph_solver.geometry import mesh_area  # noqa: PLC0415

    g = apdl.model.geometry
    arg0 = args[0].strip().upper()
    if arg0 == "ALL":
        targets = [a.id for a in g.areas()]
    else:
        try:
            na1 = _to_int(arg0, params)
        except (ValueError, TypeError):
            return
        na2 = _to_int(args[1], params) if len(args) > 1 and args[1].strip() else na1
        ninc = _to_int(args[2], params) if len(args) > 2 and args[2].strip() else 1
        targets = list(range(na1, na2 + 1, max(int(ninc), 1)))

    et_id = getattr(apdl.model, "_current_type", None)
    # Fall back to the most-recently-allocated ET when the deck
    # didn't issue an explicit TYPE - MAPDL's behaviour for a deck
    # that skips TYPE relies on the same defaulting.
    if et_id is None or et_id not in apdl.model._etypes:
        if not apdl.model._etypes:
            return
        et_id = max(apdl.model._etypes)
    kernel = apdl.model._etypes[et_id]

    for aid in targets:
        try:
            area = g.area_by_id(aid)
        except KeyError:
            continue
        try:
            mesh_area(apdl.model, area, kernel=kernel, n_div_per_edge=4)
        except (ValueError, ImportError, OSError):
            # Kernel not in the gmsh shape map, or gmsh isn't
            # installed / fails to dlopen its C library on this
            # platform.  Skip silently - the result-side comparison
            # surfaces the gap downstream.
            continue


def _h_lmesh(args: list[str], apdl: APDL, params: _ParamTable, geom: _GeometryBuilder) -> None:
    """``LMESH, NL1[, NL2[, NINC]]`` — mesh lines into the active ET.

    Generates ``NDIV + 1`` nodes evenly spaced between the two
    keypoint endpoints and connects them with ``NDIV`` 2-node
    elements via the active ``TYPE`` / ``MAT`` / ``REAL``.  ``NL1
    = "ALL"`` meshes every defined line.

    Node and element ids start at the model's current next-id (no
    user-supplied numbering — MAPDL would let users specify start
    ids via the ``NSTRT`` / ``ESTRT`` arguments, not yet wired).
    """
    if not args:
        return
    nl1 = args[0].strip().upper()
    if nl1 == "ALL":
        line_ids = sorted(geom.lines)
    else:
        try:
            line_ids = [int(_to_int(nl1, params))]
        except (ValueError, TypeError):
            return
    for line_id in line_ids:
        endpoints = geom.lines.get(line_id)
        if endpoints is None:
            continue
        kp_a, kp_b = endpoints
        coords_a = geom.keypoints.get(kp_a)
        coords_b = geom.keypoints.get(kp_b)
        if coords_a is None or coords_b is None:
            continue
        ndiv = geom.divisions.get(line_id) or geom.divisions.get("ALL", 1)
        existing_nodes = list(apdl.model.node_numbers) if apdl.model.n_nodes else []
        next_node = (max(existing_nodes) + 1) if existing_nodes else 1
        # MAPDL LMESH numbering: keypoint endpoints get the *first*
        # two new node ids, then the internal nodes follow.  vm57's
        # BEAM189 section relies on this so ``E,2`` (mass element)
        # lands at the K2 endpoint instead of the second internal node.
        endpoint_a_id = next_node
        endpoint_b_id = next_node + 1
        apdl.n(endpoint_a_id, coords_a[0], coords_a[1], coords_a[2])
        apdl.n(endpoint_b_id, coords_b[0], coords_b[1], coords_b[2])
        # Generate ``ndiv - 1`` interior nodes evenly spaced (or none
        # for ``ndiv == 1``).
        interior_ids: list[int] = []
        for k in range(1, ndiv):
            frac = k / ndiv
            x = coords_a[0] + frac * (coords_b[0] - coords_a[0])
            y = coords_a[1] + frac * (coords_b[1] - coords_a[1])
            z = coords_a[2] + frac * (coords_b[2] - coords_a[2])
            new_id = next_node + 1 + k
            apdl.n(new_id, x, y, z)
            interior_ids.append(new_id)
        # Connect endpoints + interior nodes in order (a -> i_1 -> ... -> i_n -> b).
        chain = [endpoint_a_id, *interior_ids, endpoint_b_id]
        for k in range(len(chain) - 1):
            apdl.e(chain[k], chain[k + 1])


def _beam_real_from_secdata(
    sec_type: str, values: list[float]
) -> tuple[float, float, float, float] | None:
    """Convert ``SECDATA`` values to ``(AREA, IZZ, IYY, J)`` for a beam section.

    Returns ``None`` for sections we don't yet understand — the caller
    leaves the real-constant table empty so a subsequent ``solve``
    raises a clear "BEAM2 requires REAL[…]" error instead of silently
    using zero stiffness.

    Supported subtypes:

    * ``CSOLID``  — solid circular bar.  SECDATA = ``[radius, n_div]``;
      ``A = π R²``, ``Iz = Iy = π R⁴/4``, ``J = π R⁴/2``.
    * ``RECT``    — solid rectangular bar.  SECDATA = ``[h, b, ...]``;
      ``A = h b``, ``Iz = b h³/12``, ``Iy = h b³/12``, ``J ≈ Iz + Iy``
      (the thin-walled torsion approximation; good enough for the
      modal / eigenfrequency VMs in the corpus).
    * ``CTUBE``   — hollow circular tube.  SECDATA = ``[Ri, Ro, n_div]``;
      ``A = π (Ro² - Ri²)``, ``Iz = Iy = π (Ro⁴ - Ri⁴)/4``,
      ``J = π (Ro⁴ - Ri⁴)/2``.
    """
    import math  # noqa: PLC0415

    if not values:
        return None
    sec = sec_type.strip().upper()
    if sec == "CSOLID":
        R = float(values[0])
        if R <= 0.0:
            return None
        A = math.pi * R * R
        Iz = math.pi * R**4 / 4.0
        return A, Iz, Iz, 2.0 * Iz
    if sec == "RECT":
        if len(values) < 2:
            return None
        h = float(values[0])
        b = float(values[1])
        if h <= 0.0 or b <= 0.0:
            return None
        A = h * b
        Iz = b * h**3 / 12.0
        Iy = h * b**3 / 12.0
        return A, Iz, Iy, Iz + Iy
    if sec == "CTUBE":
        if len(values) < 2:
            return None
        Ri = float(values[0])
        Ro = float(values[1])
        # Allow Ri == 0 (solid rod) but reject negative or inverted.
        if Ro <= 0.0 or Ri < 0.0 or Ro < Ri:
            return None
        if Ro == Ri:
            return None
        A = math.pi * (Ro * Ro - Ri * Ri)
        Iz = math.pi * (Ro**4 - Ri**4) / 4.0
        return A, Iz, Iz, 2.0 * Iz
    if sec == "ASEC":
        # SECDATA, A, Iyy, Iyz, Izz, Iw, J, CGy, CGz, SHy, SHz, TKz, TKy
        # Direct arbitrary section — pull (A, Iyy, Izz, J).
        if len(values) < 6:
            return None
        A = float(values[0])
        Iyy = float(values[1])
        Izz = float(values[3])
        J = float(values[5])
        if A <= 0.0:
            return None
        return A, Izz, Iyy, J
    if sec == "I":
        # SECDATA, W1, W2, W3, t1, t2, t3
        # W1, W2 = top/bottom flange widths; W3 = total height;
        # t1, t2 = flange thicknesses; t3 = web thickness.
        # Symmetric form (W1=W2, t1=t2) gets the textbook I-beam props;
        # unsymmetric uses the general parallel-axis split.
        if len(values) < 6:
            return None
        W1 = float(values[0])
        W2 = float(values[1])
        H = float(values[2])
        t1 = float(values[3])
        t2 = float(values[4])
        tw = float(values[5])
        if min(W1, W2, H, t1, t2, tw) <= 0.0:
            return None
        web_h = H - t1 - t2
        if web_h <= 0.0:
            return None
        A_top = W1 * t1
        A_bot = W2 * t2
        A_web = web_h * tw
        A = A_top + A_bot + A_web
        # Centroid (measured from bottom of bottom flange).
        y_top = H - t1 / 2.0
        y_bot = t2 / 2.0
        y_web = t2 + web_h / 2.0
        y_bar = (A_top * y_top + A_bot * y_bot + A_web * y_web) / A
        # Iz about horizontal centroidal axis (bending in vertical plane).
        Iz = (
            W1 * t1**3 / 12.0
            + A_top * (y_top - y_bar) ** 2
            + W2 * t2**3 / 12.0
            + A_bot * (y_bot - y_bar) ** 2
            + tw * web_h**3 / 12.0
            + A_web * (y_web - y_bar) ** 2
        )
        # Iy about vertical centroidal axis (bending out of plane).
        Iy = t1 * W1**3 / 12.0 + t2 * W2**3 / 12.0 + web_h * tw**3 / 12.0
        # Saint-Venant torsion (thin-walled approximation).
        J = (W1 * t1**3 + W2 * t2**3 + web_h * tw**3) / 3.0
        return A, Iz, Iy, J
    if sec == "T":
        # SECDATA, W1, W2, T1, T2
        # W1 = flange width; W2 = web height (excluding flange);
        # T1 = flange thickness; T2 = web thickness.
        if len(values) < 4:
            return None
        W1 = float(values[0])
        W2 = float(values[1])
        T1 = float(values[2])
        T2 = float(values[3])
        if min(W1, W2, T1, T2) <= 0.0:
            return None
        A_flange = W1 * T1
        A_web = W2 * T2
        A = A_flange + A_web
        # Centroid measured from web bottom (web sits below flange).
        y_flange = W2 + T1 / 2.0
        y_web = W2 / 2.0
        y_bar = (A_flange * y_flange + A_web * y_web) / A
        Iz = (
            W1 * T1**3 / 12.0
            + A_flange * (y_flange - y_bar) ** 2
            + T2 * W2**3 / 12.0
            + A_web * (y_web - y_bar) ** 2
        )
        Iy = T1 * W1**3 / 12.0 + W2 * T2**3 / 12.0
        J = (W1 * T1**3 + W2 * T2**3) / 3.0
        return A, Iz, Iy, J
    if sec == "CHAN":
        # SECDATA, W1, W2, W3, t1, t2, t3
        # W1, W2 = top/bottom flange widths; W3 = total channel height;
        # t1, t2 = flange thicknesses; t3 = web thickness.
        # Same numerical structure as I but the Iy is dominated by the
        # web offset (asymmetric); we use a simpler thin-wall closed
        # form that's good enough for the corpus's modal/static cases.
        if len(values) < 6:
            return None
        W1 = float(values[0])
        W2 = float(values[1])
        H = float(values[2])
        t1 = float(values[3])
        t2 = float(values[4])
        tw = float(values[5])
        if min(W1, W2, H, t1, t2, tw) <= 0.0:
            return None
        web_h = H - t1 - t2
        if web_h <= 0.0:
            return None
        A = W1 * t1 + W2 * t2 + web_h * tw
        # Approximate Iz / Iy / J via the same centroid split as I.
        y_top = H - t1 / 2.0
        y_bot = t2 / 2.0
        y_web = t2 + web_h / 2.0
        y_bar = (W1 * t1 * y_top + W2 * t2 * y_bot + web_h * tw * y_web) / A
        Iz = (
            W1 * t1**3 / 12.0
            + W1 * t1 * (y_top - y_bar) ** 2
            + W2 * t2**3 / 12.0
            + W2 * t2 * (y_bot - y_bar) ** 2
            + tw * web_h**3 / 12.0
            + web_h * tw * (y_web - y_bar) ** 2
        )
        # Iy with the channel offset (web one side, flanges the other).
        Iy = t1 * W1**3 / 12.0 + t2 * W2**3 / 12.0 + web_h * tw**3 / 12.0
        J = (W1 * t1**3 + W2 * t2**3 + web_h * tw**3) / 3.0
        return A, Iz, Iy, J
    return None


def _h_sectype(args: list[str], _apdl: APDL, params: _ParamTable, section: _SectionState) -> None:
    """``SECTYPE, SECID, TYPE, SUBTYPE, NAME`` — open a section record.

    Phase 1 records the section ID + type so the following ``SECDATA``
    / ``SECD`` lands in the right place; the actual cross-section data
    is collected via :func:`_h_secdata` and currently translated only
    for the ``LINK`` family (single-area cross section), which is the
    form VM1 uses.  Other types (``BEAM``, ``SHELL``) are tracked as
    kernel-side gaps (see #515).
    """
    if not args:
        return
    section.active_id = _to_int(args[0], params)
    section.active_type = args[1].strip().upper() if len(args) > 1 else ""
    section.active_subtype = args[2].strip().upper() if len(args) > 2 else ""


def _h_secdata(args: list[str], apdl: APDL, params: _ParamTable, section: _SectionState) -> None:
    """``SECDATA, V1, V2, ...`` — section data for the open SECTYPE.

    For LINK / TRUSS sections the single field is cross-section
    area, landed verbatim as ``real[0]``.  For BEAM sections we
    translate the SECDATA fields plus the SECTYPE *sub-type*
    (``CSOLID`` / ``RECT`` / ``CTUBE``) into the ``(AREA, IZZ,
    IYY, J)`` tuple Beam2 expects in ``real[0..3]``.  See
    :func:`_beam_real_from_secdata` for the conversion table.
    Other section types fall through with a warning.
    """
    if section.active_id is None:
        _LOG.warning("SECDATA without preceding SECTYPE — skipping")
        return
    # Preserve positional indexing for SECDATA: empty interior slots
    # become 0.0 so vm41's ``SECD,100,1000,1,1000,,1`` (BEAM,ASEC)
    # lands the trailing ``1`` at slot 5 rather than collapsing to
    # slot 4 and tripping the "len(values) >= 6" gate.  Non-numeric
    # trailing fields (vm78 layer names like "LAYER1") still skip.
    raw = list(args)
    while raw and not raw[-1].strip():
        raw.pop()
    values: list[float] = []
    for a in raw:
        if not a.strip():
            values.append(0.0)
            continue
        try:
            values.append(_to_float(a, params))
        except (ValueError, TypeError):
            continue
    if section.active_type in ("LINK", "TRUSS"):
        apdl.r(section.active_id, *values)
        return
    if section.active_type == "BEAM":
        # Route through APDL.secdata so the full BeamSectionProperties
        # (incl. extreme-fiber distances c_top / c_bot for fiber-stress
        # recovery) lands on the model.  The shim sets active_section
        # internally and computes both the (A, Iz, Iy, J) reals and
        # the auxiliary section metadata.
        try:
            apdl.sectype(section.active_id, section.active_type, section.active_subtype)
            apdl.secdata(*values)
            return
        except (ValueError, NotImplementedError, KeyError):
            pass
        # Fallback to the simpler beam-real translation when the shim
        # rejects (subtypes the shim doesn't handle but our table does).
        beam_real = _beam_real_from_secdata(section.active_subtype, values)
        if beam_real is not None:
            apdl.r(section.active_id, *beam_real)
            return
    if section.active_type == "SHELL" and values:
        # SECDATA, t, ... — first slot is the layer thickness.
        # Multi-layer composite shells (vm78 SHELL281 with 4 layers of
        # 0.5 each) accumulate via repeated SECDATA calls; sum the
        # thicknesses into ``real[0]`` so the kernel sees the total
        # cross-section thickness.  Per-layer treatment is a separate
        # follow-up.
        existing = apdl.model.real_constants.get(int(section.active_id))
        prior_t = float(existing[0]) if existing is not None and len(existing) > 0 else 0.0
        apdl.r(section.active_id, prior_t + float(values[0]))
        return
    if section.active_type == "PIPE" and len(values) >= 2:
        # SECDATA, OD, wall, ... → BEAM (A, Iz, Iy, J) for a tube.
        # Route through APDL.secdata so the full BeamSectionProperties
        # (incl. cardinal extreme-fiber distances c_top / c_bot /
        # c_left / c_right = ±OD/2) lands on the model for downstream
        # PLNSOL principal-stress fiber sampling.
        od = float(values[0])
        wall = float(values[1])
        if od > 0.0 and wall > 0.0 and wall <= od / 2.0:
            try:
                apdl.sectype(section.active_id, "PIPE")
                apdl.secdata(od, wall)
                return
            except (ValueError, NotImplementedError, KeyError):
                pass
            # Fallback path used when the shim doesn't accept the OD/wall
            # combo (e.g. a future variant we haven't taught).  Land the
            # CTUBE-equivalent (A, Iz, Iy, J) but lose the c_top / c_bot
            # bookkeeping — fiber-stress probes against this element will
            # silently return None.
            ro = od / 2.0
            ri = ro - wall
            if ri == ro:
                return
            tube_real = _beam_real_from_secdata("CTUBE", [ri, ro])
            if tube_real is not None:
                apdl.r(section.active_id, *tube_real)
                return
    _LOG.warning(
        "SECDATA: section type %r (subtype %r) is not yet supported; "
        "kernel-side gap tracked at #515 (BEAM I) / future issues — skipping",
        section.active_type,
        section.active_subtype,
    )


def _h_keyopt(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``KEYOPT, ITYPE, KNUM, VALUE`` — set a single KEYOPT slot.

    Forwards the slot to :meth:`APDL.keyopt`, which maps recognised
    combinations onto kernel material flags (PLANE182 KEYOPT(1)=2 →
    enhanced strain; PLANE182 KEYOPT(3)=3 → plane strain; …) and
    silently records the rest.
    """
    if len(args) < 3:
        raise ValueError(f"KEYOPT: expected ITYPE,KNUM,VALUE, got {args}")
    et_id = _to_int(args[0], params)
    knum = _to_int(args[1], params)
    value = _to_int(args[2], params)
    apdl.keyopt(et_id, knum, value)


def _h_tb(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``TB, Lab, MAT, NTEMP, NPTS, TBOPT`` — open a material data table."""
    if not args:
        raise ValueError("TB: expected at least Lab")
    lab = args[0].strip()
    mat_id = _to_int(args[1], params) if len(args) > 1 and args[1].strip() else 1
    ntemp = _to_int(args[2], params) if len(args) > 2 and args[2].strip() else 1
    npts = _to_int(args[3], params) if len(args) > 3 and args[3].strip() else 0
    tbopt = args[4].strip() if len(args) > 4 else ""
    apdl.tb(lab, mat_id, ntemp, npts, tbopt)


def _h_tbtemp(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``TBTEMP, TEMP, ...`` — temperature row marker."""
    if not args:
        return
    temp = _to_float(args[0], params)
    apdl.tbtemp(temp)


def _h_tbdata(args: list[str], apdl: APDL, params: _ParamTable) -> None:
    """``TBDATA, START, V1, V2, ...`` — populate the open table."""
    if len(args) < 2:
        raise ValueError(f"TBDATA: expected at least START + 1 value, got {args}")
    start = _to_int(args[0], params)
    values = [_to_float(a, params) for a in args[1:] if a.strip()]
    apdl.tbdata(start, *values)


def _h_skip(args: list[str], apdl: APDL, _params: _ParamTable) -> None:  # noqa: ARG001
    """No-op handler for verbs we recognise but intentionally skip."""
    return


def _record_skipped(model: Model, verb: str, line: str, reason: str) -> None:
    """Record an intentionally-skipped verb on the model for traceability.

    ``model._deck_skipped`` is a list of ``(verb, line, reason)`` tuples.
    Downstream tools (``convert_to_script``, the
    ``compare_first100`` harness) read this to emit ``# SKIPPED ...``
    comments or to audit which deck commands were dropped.
    """
    skipped = getattr(model, "_deck_skipped", None)
    if skipped is None:
        skipped = []
        model._deck_skipped = skipped  # type: ignore[attr-defined]
    skipped.append((verb, line.strip(), reason))


def _record_unknown(model: Model, verb: str, line: str) -> None:
    """Record a verb the parser doesn't recognise.

    Unknown verbs are ones that aren't in the handler dispatch nor in
    the curated ``_SKIP_VERBS`` / ``_SLASH_NOOP`` skip tables - they
    indicate genuine missing dialect coverage.  The parser collects
    them on ``model._deck_unknown`` (a list of ``(verb, line)``
    tuples) so the ``compare_first100`` harness and other VM-side
    checkers can fail loudly when a deck triggers any of them.

    The parser still continues - some VM decks contain unknowns the
    probe of interest doesn't depend on - but the harness flags the VM
    as failing if its ``_deck_unknown`` is non-empty.
    """
    unknown = getattr(model, "_deck_unknown", None)
    if unknown is None:
        unknown = []
        model._deck_unknown = unknown  # type: ignore[attr-defined]
    unknown.append((verb, line.strip()))
    _LOG.warning("from_dat: unknown verb %r in line %r", verb, line.strip())


def _h_units(args: list[str], apdl: APDL, _params: _ParamTable) -> None:
    """``/UNITS, TOKEN`` — stamp the unit system on the Model."""
    if not args:
        return
    token = args[0].strip()
    apdl.model.set_unit_system(unit_system_from_mapdl(token))


# Verbs we recognise but intentionally skip - mapping verb -> reason.
# The reason text is shown when ``convert_to_script`` emits the line as
# a comment, and recorded on ``model._deck_skipped`` at parse time so
# downstream tooling can audit what was dropped.  Anything not in this
# table and not in the verb-handler dispatch is reported as an unknown
# verb (a hard signal that some physics may be missing) - see the
# ``model._deck_unknown`` list and the ``compare_first100`` harness.
_SKIP_VERBS: dict[str, str] = {
    # Post-processing macros: the test harness reads results from the
    # solver's Result object directly, so these deck-side commands are
    # observed only for traceability.
    "*GET": "post-processing query - harness reads probe values directly off the Result",
    "*SET": "post-processing query - same as *GET",
    "*VWRITE": "post-processing report formatter",
    "*VFILL": "post-processing array fill",
    "*DIM": "post-processing array allocation",
    "*STATUS": "post-processing diagnostic dump",
    "*MSG": "post-processing message print",
    "ETABLE": "post-processing element table - harness reads element-level fields off the Result",
    "PRNSOL": "post-processing print - we expose this via Result accessors",
    "PLDISP": "post-processing plot - GUI-only",
    "PLNSOL": "post-processing plot - GUI-only",
    "PLESOL": "post-processing plot - GUI-only",
    "PRESOL": "post-processing print - we expose this via Result accessors",
    "PRRSOL": "post-processing reaction print",
    "PRRFOR": "post-processing reaction print",
    "*LIST": "post-processing array list",
    "SET": "post-processing result-set selector - harness picks set/sbstep itself",
    "*VOPER": "post-processing array op",
    # Selection / database queries.  NSEL / ALLSEL / NALL are handled
    # via dispatch (see ``_h_nsel`` / ``_h_allsel``); the rest below
    # have no model-state effect once the load step finishes parsing.
    "ESEL": "element selection - the harness selects elements directly",
    "NSLN": "selection helper (nodes-on-line)",
    "ESLN": "selection helper (elements-on-node)",
    "FSUM": "diagnostic print of summed forces",
    "EALL": "select-all-elements convenience",
    "KSEL": "keypoint selection",
    "LSEL": "line selection",
    "ASEL": "area selection",
    "VSEL": "volume selection",
    # Solver / output flow.
    "OUTPR": "print-output control - we always compute the full result set",
    "OUTRES": "result-output control - we always retain the full result set",
    "SOLVE": "explicit solve trigger - run_input_deck dispatches solves itself",
    "TRNOPT": "transient analysis option (FULL/MSUP/REDUC) - we run FULL transient regardless",
    "HROPT": "harmonic analysis option (FULL/MSUP/REDUC) - we run FULL harmonic regardless",
    "DERIV": "POST26 numeric derivative - we recompute time-series operations off the Result",
    # NB: ANTYPE / NSUBST / MODOPT are NOT in the skip set - the
    # dispatcher consumes them into ``_DeckMeta`` so ``run_input_deck``
    # can pick the matching solve verb automatically.
    "FINISH": "module phase boundary (/PREP7 -> /SOLU -> /POST1)",
    "FINI": "module phase boundary (FINISH alias)",
    # Plot prefs - GUI-only.
    "JPGPRF": "JPEG output preference",
    "/EFACET": "element-facet plot pref",
    "/PNUM": "plot numbering pref",
    "/VIEW": "viewport pref",
    "/REPLOT": "plot refresh",
    # Workbench dialect - export-only verbs that don't change the model.
    "SHPP": "shape-checking preference (Workbench export)",
    "ETCON": "automatic KEYOPT control - explicit KEYOPT in the deck overrides anyway",
    "NUMOFF": "numbering offset for Workbench-internal book-keeping",
    "TOFFST": "absolute-zero temperature offset (read by /SOLU; we don't model)",
    "CSYS": "active coordinate system - we always work in global",
    "RSYS": "results coordinate system (post-only)",
    "DSYS": "display coordinate system",
    "ESYS": "element coordinate system per-element (uncommon)",
    "LOCAL": "define local CSYS - see CSYS",
    "DESIZE": "default mesh sizing (Workbench-internal)",
    "MSHAPE": "mesh shape pref - deck is already meshed",
    "MSHKEY": "mesh key pref - deck is already meshed",
    "MOPT": "meshing option - deck is already meshed",
    "/INPUT": "nested input file - we don't follow includes yet",
    "/SYS": "OS shell-out",
    "*USE": "macro library invocation",
    "*ULIB": "macro library load",
    "*CFOPEN": "*CFOPEN / *CFWRITE / *CFCLOSE write to file",
    "*CFWRITE": "*CFWRITE / *CFCLOSE / *CFOPEN write to file",
    "*CFCLOSE": "*CFCLOSE / *CFWRITE / *CFOPEN write to file",
    "_PRMETH": "internal scratch parameter (Workbench)",
    "_PRTYPE": "internal scratch parameter (Workbench)",
    # POST26 time-history macros - the harness regexes the deck text
    # for NSOL / ESOL declarations and reads the corresponding result
    # series off the Result object directly, so the deck-side macros
    # never need to execute against the model.
    "NSOL": "POST26 nodal-solution variable - the harness reads result series off the Result",
    "ESOL": "POST26 element-solution variable - the harness reads result series off the Result",
    "PRVAR": "POST26 print variable - we expose this via Result accessors",
    "PLVAR": "POST26 plot variable - GUI-only",
    "STORE": "POST26 store-from-result-set marker",
    "FILE": "POST26 result-file selector - we always read the active result",
    "PRETAB": "post-processing print of the active ETABLE",
    "PRSECT": "post-processing print of section results",
    "*VFUN": "post-processing array-function macro",
    "PATH": "post-processing path definition for PRPATH/PLPATH",
    "PLPATH": "post-processing plot of a path series",
    "PRPATH": "post-processing print of a path series",
    "AVPRIN": "post-processing principal-stress averaging pref",
    # Modal solve options.  We always expand and retain every requested
    # mode, so MXPAND control is informational only.
    "MXPAND": "modal mode-expansion control - we always expand all requested modes",
    # Mesh preferences that don't change the meshed model (the deck has
    # already meshed by the time these run).
    "SMRT": "smart-meshing preference - deck is already meshed",
    "NUMSTR": "default numbering offset for generation commands - non-physics",
    "EMUNIT": "electromagnetic unit-system pref - irrelevant for structural / thermal",
    "NSORT": "post-processing sort-by-value pref",
    # Load-step kind.  KBC chooses ramped (default) vs stepped loading
    # within a load step; for a single-step linear analysis the deck-
    # side preference has no effect on the result.  Decks that require
    # a multi-step ramp (NLGEOM / time-history) are surfaced separately
    # by NLGEOM / TRNOPT staying as unknowns - skipping KBC alone does
    # not mask those gaps.
    "KBC": "load-step kind (ramped/stepped) - immaterial for single-step linear analyses",
    # Automatic time stepping.  Only meaningful inside a nonlinear or
    # transient solve loop - which we don't run.  Decks that need NLGEOM
    # or TRNOPT are surfaced as unknowns separately, so AUTOTS by itself
    # is just informational.
    "AUTOTS": "auto time-stepping pref - only matters inside nonlinear / transient solves we don't run",
    # Convergence tolerance for nonlinear solves.  Same logic as AUTOTS:
    # if NLGEOM / TRNOPT are off (which is our linear-only path) the
    # CNVTOL setting is unreachable.  When we add nonlinear support we
    # MUST move this out of the skip set.
    "CNVTOL": "nonlinear-convergence tolerance - only matters inside nonlinear solves we don't run",
    # Transient time-integration parameters.  Same logic as AUTOTS /
    # CNVTOL - only matters inside transient solves we don't run.
    "TINTP": "transient time-integration parameters - only matters inside transient solves we don't run",
    # Output-flow / parameter persistence.  These manage the deck's
    # interaction with the .parm / .rdb files MAPDL spawns.  We don't
    # checkpoint to disk between solves; the in-memory Model carries
    # the state across the deck regardless.
    "PARSAV": "save parameters to .parm file - in-memory state already carries across the deck",
    "PARRES": "restore parameters from .parm file - in-memory state already carries across the deck",
    "RESCONTROL": "restart-file write control - we don't checkpoint to disk between solves",
    # Harmonic-analysis output controls.  The solver-mode-selecting
    # verb (HROPT) stays unknown so the gap surfaces; only the
    # *output* control verb is safe to skip.
    "HROUT": "harmonic-analysis output control - immaterial when not running harmonic",
    # More post-processing prints / aggregations.  Same justification
    # as the existing PRNSOL / FSUM skips: the harness reads result
    # values off the Result object directly.
    "PRNLD": "post-processing print of nodal loads",
    "PRTIME": "post-processing print of analysis time",
    "SSUM": "post-processing sum of element-table values",
    "MSHA": "mesh-shape preference (MSHAPE alias) - deck is already meshed",
}


# Slash verbs we recognise as no-ops or info-only.  Same shape as
# ``_SKIP_VERBS`` (verb -> reason) so ``convert_to_script`` can emit
# a ``# SKIPPED <verb>: <reason>`` comment and the parser can record
# the skip on ``model._deck_skipped`` for traceability.
_SLASH_NOOP_GUI: dict[str, str] = {
    # GUI annotation / plot prefs that have no model-state effect.
    "/ANUM": "GUI annotation number",
    "/AXLAB": "GUI plot axis label",
    "/WINDOW": "GUI plot window pref",
    "/LINE": "GUI line-plot pref",
    "/LSPEC": "GUI line-symbol pref",
    "/LSYMBOL": "GUI line-symbol pref",
    "/CTYPE": "GUI contour type",
    "/FACET": "GUI plot facet pref",
    "/ERASE": "GUI plot erase",
    "/XRANGE": "GUI plot X range",
    "/YRANGE": "GUI plot Y range",
    "/CLABEL": "GUI contour label",
    "/GTHK": "GUI plot line-thickness pref",
    "/FILNAM": "deck file-naming preference",
    "/GRID": "GUI plot grid pref",
    "/ESHAPE": "GUI element-shape display pref",
    "/GRAPHICS": "GUI graphics-mode pref (FULL / POWER / etc)",
}


def _build_verb_table(
    section: _SectionState,
    history: _NodeHistory,
    geom: _GeometryBuilder | None = None,
) -> dict[str, Callable[[list[str], APDL, _ParamTable], None]]:
    """Build the verb dispatch table.  Section verbs share a
    :class:`_SectionState`; ``N`` / ``FILL`` share a :class:`_NodeHistory`
    so the bare-``FILL`` form can recover its endpoints; the 1D
    geometry-builder verbs (``K`` / ``L`` / ``LESIZE`` / ``LMESH``)
    share a :class:`_GeometryBuilder` so ``LMESH`` can resolve the
    keypoint coords + division count its earlier siblings recorded.
    """
    if geom is None:
        geom = _GeometryBuilder()
    return {
        "N": lambda args, apdl, params: _h_n(args, apdl, params, history),
        "E": _h_e,
        "EGEN": _h_egen,
        "FILL": lambda args, apdl, params: _h_fill(args, apdl, params, history),
        "NGEN": _h_ngen,
        "ET": _h_et,
        "TYPE": _h_type,
        "REAL": _h_real,
        "MAT": _h_mat,
        "MP": _h_mp,
        "R": _h_r,
        "RMORE": _h_rmore,
        "D": _h_d,
        "DDELE": _h_ddele,
        "F": _h_f,
        "TREF": _h_tref,
        "BFUNIF": _h_bfunif,
        "BF": _h_bf,
        "CP": _h_cp,
        "DSYM": _h_dsym,
        "SFBEAM": _h_sfbeam,
        "SECNUM": _h_secnum,
        "SECN": _h_secnum,
        "RMODIF": _h_rmodif,
        "SAVE": _h_save,
        "RESUME": _h_resume,
        "NSEL": _h_nsel,
        "ESEL": _h_esel,
        "ALLSEL": _h_allsel,
        "NALL": _h_allsel,
        "EALL": lambda args, apdl, params: apdl.esel("ALL"),
        "SF": _h_sf,
        "SFE": _h_sf,
        "BFE": _h_bfe,
        "SECTYPE": lambda args, apdl, params: _h_sectype(args, apdl, params, section),
        "SECT": lambda args, apdl, params: _h_sectype(args, apdl, params, section),
        "SECDATA": lambda args, apdl, params: _h_secdata(args, apdl, params, section),
        "SECD": lambda args, apdl, params: _h_secdata(args, apdl, params, section),
        "KEYOPT": _h_keyopt,
        "TB": _h_tb,
        "TBTEMP": _h_tbtemp,
        "TBDATA": _h_tbdata,
        "K": lambda args, apdl, params: _h_k(args, apdl, params, geom),
        "L": lambda args, apdl, params: _h_l(args, apdl, params, geom),
        "LESIZE": lambda args, apdl, params: _h_lesize(args, apdl, params, geom),
        "ESIZE": lambda args, apdl, params: _h_esize(args, apdl, params, geom),
        "LMESH": lambda args, apdl, params: _h_lmesh(args, apdl, params, geom),
        "A": _h_a,
        "AESIZE": _h_aesize,
        "AMESH": _h_amesh,
        "DK": lambda args, apdl, params: _h_dk(args, apdl, params, geom),
    }


# ---------------------------------------------------------------------
# Slash-verb / star-verb handlers
# ---------------------------------------------------------------------


# Slash verbs we recognise as no-ops or info-only.  Same shape as
# ``_SKIP_VERBS`` (verb -> reason) so ``convert_to_script`` can emit
# a ``# SKIPPED <verb>: <reason>`` comment and the parser can record
# the skip on ``model._deck_skipped`` for traceability.
_SLASH_NOOP: dict[str, str] = {
    "/PREP7": "module phase marker (preprocessor)",
    "/SOLU": "module phase marker (solver)",
    "/POST1": "module phase marker (general post)",
    "/POST26": "module phase marker (time-history post)",
    "/AUX2": "module phase marker (binary file inspection)",
    "/AUX12": "module phase marker (radiation matrix)",
    "/AUX15": "module phase marker (legacy IO)",
    "/OUT": "redirect printed output to a file",
    "/VERIFY": "VM-corpus verification banner",
    "/TITLE": "deck title for printouts",
    "/COM": "deck comment line",
    "/SHOW": "graphics output device pref",
    "/NOPR": "suppress printed output",
    "/NOLIST": "suppress deck listing",
    "/GO": "resume printed output",
    "/GOPR": "resume printed output",
    "/EOF": "end-of-file marker (stop reading further verbs)",
    "/CONFIG": "build/config flag",
    "/BATCH": "batch-mode marker",
    # Workbench dialect markers - ``/wb,...`` is Workbench's internal
    # phase tracking (``/wb,elem,start`` etc.); harmless.
    "/WB": "Workbench-internal phase tracking",
    "/UIS": "interactive UI setting",
    "/MSTART": "post-processing start marker",
    "/REPLOT": "GUI plot refresh",
    "/PSF": "plot surface load pref",
    "/PBC": "plot boundary-condition pref",
    "/PNUM": "plot numbering pref",
    "/EFACET": "element-facet plot pref",
    "/VIEW": "viewport pref",
    "/COLOR": "color pref",
    "/CONTOUR": "contour pref",
    "/PLOPTS": "plot options",
    "/AUTO": "automatic plot scaling",
    "/USER": "user-defined plot pref",
    "/FOCUS": "viewport focus point",
    "/DSCALE": "displacement-plot scaling",
    "/SCALE": "plot scaling",
    "/RESET": "reset plot defaults",
    "/DELETE": "delete temporary file",
    "/RGB": "color preference",
    "/PSPEC": "plot symbol pref",
    "/PMOR": "modal plot pref",
    "/HEADER": "report header pref",
    "/FORMAT": "report format pref",
    "/INQUIRE": "filesystem query",
    "/EDGE": "plot edge pref",
    "/MENU": "GUI menu pref",
    "/PSTATUS": "plot status pref",
    "/STITLE": "subtitle pref",
    "/UCMD": "user command alias",
    "/EXIT": "MAPDL session exit - we don't model session state",
}
_SLASH_NOOP.update(_SLASH_NOOP_GUI)


def _dispatch_slash(line: str, apdl: APDL, _params: _ParamTable) -> None:
    """Handle ``/<VERB>, ARG1, ARG2, …`` (or ``/COM <text>``)."""
    # MAPDL ``/COM`` accepts both comma-separated (``/COM, text``) and
    # space-separated (``/COM REFERENCE: ...``) forms.  Without this
    # split the entire comment text gets parsed as one giant verb name
    # and shows up as an "unknown verb" hit on every reference line.
    if line.upper().startswith("/COM ") or line.strip().upper() == "/COM":
        _record_skipped(apdl._model, "/COM", line, _SLASH_NOOP["/COM"])
        return
    parts = [p.strip() for p in line.split(",")]
    verb = parts[0].upper()
    args = parts[1:]
    if verb == "/UNITS":
        _h_units(args, apdl, _params)
        return
    if verb == "/CLEAR":
        # Multi-analysis decks (e.g. vm81) reuse a single input file
        # for two independent prep+solve passes separated by /CLEAR.
        # Reset the model so leftover ETs / elements / nodes from the
        # first pass don't contaminate the second.
        apdl._model._reset_preprocessor_state()
        return
    if verb in _SLASH_NOOP:
        _record_skipped(apdl._model, verb, line, _SLASH_NOOP[verb])
        return
    _record_unknown(apdl._model, verb, line)


def _dispatch_star(line: str, apdl: APDL, params: _ParamTable) -> None:  # noqa: ARG001
    """Handle ``*<VERB>, ARG1, …`` — most are post-processing macros."""
    parts = [p.strip() for p in line.split(",")]
    verb = parts[0].upper()
    if verb == "*AFUN":
        if len(parts) < 2:
            return
        params.set_afun(parts[1])
        return
    if verb in _SKIP_VERBS:
        _record_skipped(apdl._model, verb, line, _SKIP_VERBS[verb])
        return
    _record_unknown(apdl._model, verb, line)


# ---------------------------------------------------------------------
# Public entry point
# ---------------------------------------------------------------------


[docs] def from_dat(path: str | Path) -> Model: """Read a MAPDL ASCII input deck (``.dat``) into a Model. Mirrors :func:`femorph_solver.mapdl_api.from_cdb` — the deck's pre-processor block is replayed against a fresh :class:`~femorph_solver.Model` via the :class:`APDL` shim, and the populated Model is returned. The reader stops short of running ``SOLVE``; the caller is responsible for invoking :meth:`Model.solve_static` (or one of the other solve verbs). Parameters ---------- path Path to the ``.dat`` file. Returns ------- femorph_solver.Model Pre-processor block of the deck, fully applied. Notes ----- Phase 1 verb coverage and limitations live at the top of this module's docstring. Pre-processor verbs the reader doesn't recognise emit a ``logging.WARNING`` and are skipped — they never raise and they never silently drop. """ from femorph_solver.model import Model # noqa: PLC0415 text = Path(path).read_text(encoding="utf-8", errors="replace") model = Model() # Initialise the deck-trace lists eagerly so callers can read them # even when the deck has zero skips / unknowns (an empty list is # easier to introspect than an attribute that may or may not exist). model._deck_skipped = [] # type: ignore[attr-defined] model._deck_unknown = [] # type: ignore[attr-defined] apdl = APDL(model) params = _ParamTable() section = _SectionState() history = _NodeHistory() meta = _DeckMeta() verb_handlers = _build_verb_table(section, history) # APDL block-skipper. Three nestable opener -> closer pairs that # we skip the body of at parse time: # # *CREATE -> *END macro definition (body executes only via # *USE, which we already skip) # *IF, ...,THEN -> *ENDIF conditional (most VM-corpus uses are # post-processing report formatting) # *DO / *DOWHILE -> *ENDDO parameter loops (mostly post- # processing iteration; vm24's solve- # driving form is a known limitation # -- the deck flags itself with a # followup unknown if the body matters) # # State is a stack of expected closers so nested blocks (e.g. *IF # inside *DO) close cleanly. *ELSE / *ELSEIF / *CYCLE / *EXIT # are recorded as in-block skips but don't change the stack. block_close_stack: list[str] = [] block_opener_to_closer: dict[str, str] = { "*CREATE": "*END", "*IF": "*ENDIF", "*DO": "*ENDDO", "*DOWHILE": "*ENDDO", } for stmt in _tokenise_deck(text): # Skip C*** / C-prefixed comment lines. if stmt.upper().startswith("C***") or stmt.upper().startswith("C "): continue upper_stmt_first = stmt.upper().strip().split(",", 1)[0].strip() # Single-line *IF (``*IF, A, OP, B, action``) doesn't open a # block - only the ``,THEN`` form does. Distinguish here. is_block_if = upper_stmt_first == "*IF" and ",THEN" in stmt.upper().replace(" ", "") if block_close_stack: if upper_stmt_first == block_close_stack[-1]: closer = block_close_stack.pop() _record_skipped(model, closer, stmt, "APDL block terminator - body was skipped") continue # Nested opener inside an already-open block: push another # closer so the inner end doesn't pop the outer. if upper_stmt_first in block_opener_to_closer: if upper_stmt_first == "*IF" and not is_block_if: # Single-line *IF inside an already-skipped block # is just another in-body line. Pass through. pass else: block_close_stack.append(block_opener_to_closer[upper_stmt_first]) _record_skipped( model, upper_stmt_first or "<block-body>", stmt, "inside an APDL *CREATE / *IF / *DO block - body skipped", ) continue if upper_stmt_first == "*IF" and not is_block_if: # Single-line *IF outside any block: record as skipped (we # don't evaluate the conditional, but the action wouldn't # change model state for anything in our supported subset # anyway). _record_skipped( model, "*IF", stmt, "single-line *IF conditional - condition not evaluated", ) continue if upper_stmt_first in block_opener_to_closer: block_close_stack.append(block_opener_to_closer[upper_stmt_first]) _record_skipped( model, upper_stmt_first, stmt, f"APDL block opener - body skipped until matching {block_opener_to_closer[upper_stmt_first]}", ) continue # Capture solve-driving directives into the meta record so # ``run_input_deck`` can dispatch to the right solve verb. upper = stmt.upper().strip() if upper.startswith("ANTYPE,"): value = upper.split(",", 1)[1].strip().split(",")[0].strip() if value: # MAPDL accepts STATIC / MODAL / HARMIC / TRANS / BUCKLE # plus integer aliases (0=STATIC, 2=MODAL, 3=HARMIC, # 4=TRANS, 1=BUCKLE). Normalise both forms to the name. _ANTYPE_NUM_TO_NAME = { "0": "STATIC", "2": "MODAL", "3": "HARMIC", "4": "TRANS", "1": "BUCKLE", } meta.antype = _ANTYPE_NUM_TO_NAME.get(value, value) # Detect linear-perturbation flag — ANTYPE,STATIC,RESTART,,,PERTURB # signals a multi-stage solve where the FINAL stage is whatever # MODOPT / BUCOPT was set up for (modal, buckle, etc.) even # though ANTYPE itself stays STATIC. if "PERTURB" in upper: meta.has_perturb = True continue if upper.startswith("MODOPT,"): # MODOPT, METHOD, NMODES, ... meta.has_modopt = True tail = [p.strip() for p in upper.split(",")[1:]] if len(tail) >= 2 and tail[1]: import contextlib # noqa: PLC0415 with contextlib.suppress(ValueError): meta.n_modes = int(tail[1]) continue if upper.startswith("NSUBST,"): tail = [p.strip() for p in upper.split(",")[1:]] if tail and tail[0]: import contextlib # noqa: PLC0415 with contextlib.suppress(ValueError): meta.nsubst = int(tail[0]) continue if upper.startswith("TIME,"): # TIME,t_end — transient end time. tail = [p.strip() for p in upper.split(",")[1:]] if tail and tail[0]: import contextlib # noqa: PLC0415 with contextlib.suppress(ValueError): meta.t_end = float(tail[0]) continue if upper.startswith("DELTIM,"): # DELTIM,Δt[,dt_min,dt_max] — transient time-step. tail = [p.strip() for p in upper.split(",")[1:]] if tail and tail[0]: import contextlib # noqa: PLC0415 with contextlib.suppress(ValueError): meta.deltim = float(tail[0]) continue if upper.startswith("HARFRQ,"): # HARFRQ,f_start[,f_end] — harmonic frequency sweep. tail = [p.strip() for p in upper.split(",")[1:]] import contextlib # noqa: PLC0415 if tail and tail[0]: with contextlib.suppress(ValueError): meta.harfrq_start = float(tail[0]) if len(tail) > 1 and tail[1]: with contextlib.suppress(ValueError): meta.harfrq_end = float(tail[1]) continue # Workbench-style multi-line block: tokeniser emits as a # ``BLOCKVERB::header::format::data::-1`` composite. Route to # the matching block-handler before line-oriented dispatch. if stmt.startswith(("NBLOCK::", "EBLOCK::", "CMBLOCK::")): block_verb = stmt.split("::", 1)[0] try: if block_verb == "NBLOCK": _h_nblock(stmt, apdl, history) elif block_verb == "EBLOCK": _h_eblock(stmt, apdl) elif block_verb == "CMBLOCK": _h_cmblock(stmt, apdl) except (ValueError, KeyError) as exc: _LOG.warning("from_dat: %s block parse failed (%s); skipped", block_verb, exc) continue # FORTRAN FORMAT continuation line for the preceding ``*VWRITE`` # — e.g. ``(1X,A8,A8,F10.3,...)``. The deck uses these solely to # render the result-comparison table; nothing physical depends on # them. Record-skip so the scoreboard's unknown-verb flag stays # clean. if stmt.startswith("(") and stmt.endswith(")"): _record_skipped(model, "(format)", stmt, "*VWRITE FORTRAN format continuation") continue # APDL array-element assignment: ``LABEL(i,j) = '...'`` etc. # The comma inside the parenthesised index list breaks the plain # comma-split verb dispatch (verb becomes ``LABEL(1``); these # assignments populate I/O-only CHAR/REAL arrays the deck uses to # print the comparison block. Record-skip rather than warn. if "=" in stmt and "(" in stmt.split("=", 1)[0]: _record_skipped(model, "(array=)", stmt, "I/O-only array element assignment") continue # APDL parameter assignment: ``NAME = expr``. # Handle this *before* the slash/star/verb dispatch so a # parameter named ``E`` (the elastic-modulus convention) doesn't # get rerouted into the ``E`` (element-create) verb. m = _ASSIGN_RE.match(stmt) if m: name, expr = m.group(1), m.group(2) try: params.assign(name, expr) except ValueError as exc: _LOG.warning("from_dat: parameter assignment failed: %s", exc) continue if stmt.startswith("/"): _dispatch_slash(stmt, apdl, params) continue if stmt.startswith("*"): _dispatch_star(stmt, apdl, params) continue # Plain verb: ``VERB, A, B, C``. parts = [p.strip() for p in stmt.split(",")] verb = parts[0].upper() args = parts[1:] handler = verb_handlers.get(verb) if handler is not None: try: handler(args, apdl, params) except (ValueError, NotImplementedError, KeyError) as exc: # KeyError fires when a verb references an unregistered # element type / material / real-set — typically because # an upstream ``ET`` declared an element kernel we don't # ship a kernel for (e.g., ``SOLSH190`` solid-shell). # Logging-and-skipping here lets the rest of the deck # continue parsing; subsequent verbs that depend on the # missing kernel will log their own warning and skip. _LOG.warning("from_dat: %s, %s — skipped (%s)", verb, ",".join(args), exc) continue if verb in _SKIP_VERBS: _record_skipped(model, verb, stmt, _SKIP_VERBS[verb]) continue _record_unknown(model, verb, stmt) # Stash the parser-side meta on the Model under a private # attribute so ``run_input_deck`` can pick it up without a # second parse pass. Hidden under ``_`` so we don't commit to # it as a public Model attribute. model._deck_meta = meta # noqa: SLF001 return model
def parse_dat_with_meta(path: str | Path) -> tuple[Model, _DeckMeta]: """Parse a deck and return ``(Model, _DeckMeta)``. Same parser ``from_dat`` runs, but the analysis-driving metadata (``ANTYPE`` / ``MODOPT`` / ``NSUBST``) is returned explicitly so a higher-level caller can dispatch the matching solve verb without poking at the Model's private attributes. Used by :func:`femorph_solver.interop.mapdl.run_input_deck`. """ model = from_dat(path) return model, model._deck_meta # noqa: SLF001