Source code for femorph_solver.solvers.eigen._lobpcg

"""LOBPCG (Locally Optimal Block Preconditioned Conjugate Gradient).

Iterative eigensolver — works well when an efficient preconditioner is
available (e.g. AMG).  Without one it generally loses to shift-invert
Lanczos on structural problems; the preconditioner is the whole game.

This wrapper offers three preconditioner modes:

- ``"factor"`` (default) — run a full direct solver on ``K`` and use
  the factor as a preconditioner.  Convergent but has the same memory
  footprint as the shift-invert path; useful when the problem is too
  ill-conditioned for a cheaper preconditioner but you need LOBPCG's
  convergence pattern on clustered eigenvalues.
- ``"jacobi"`` — diagonal Jacobi preconditioner (``M = diag(K)``).
  Free.  Memory-friendly: storage is ``O(n)`` floats, no factor.  On
  well-conditioned structural problems it typically converges in
  2–5× more iterations than ``"factor"`` but uses ~10× less memory
  at the factor stage.  The right default when ``K`` is too big to
  factor.
- ``"none"`` — no preconditioner.  Occasionally wins on tiny problems
  where the preconditioner setup dominates; mostly for diagnostics.

Pass an explicit :class:`scipy.sparse.linalg.LinearOperator` via the
``preconditioner=`` argument to override entirely.

References
----------
- Knyazev, A. V.  "Toward the Optimal Preconditioned Eigensolver:
  Locally Optimal Block Preconditioned Conjugate Gradient Method."
  SIAM J. Sci. Comput. 23 (2), 517-541 (2001).  [the LOBPCG
  algorithm implemented by ``scipy.sparse.linalg.lobpcg``]
- Knyazev, A. V., Argentati, M. E., Lashuk, I., & Ovtchinnikov,
  E. E.  "Block Locally Optimal Preconditioned Eigenvalue Xolvers
  (BLOPEX) in hypre and PETSc."  SIAM J. Sci. Comput. 29 (5),
  2224-2239 (2007).  [block-variant stability fixes carried
  forward into scipy's implementation]
"""

from __future__ import annotations

from typing import ClassVar

import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as spla

from ..linear import get_linear_solver
from ._base import EigenSolverBase


def _jacobi_preconditioner(K: sp.spmatrix | sp.sparray) -> spla.LinearOperator:
    """Diagonal-Jacobi preconditioner ``M⁻¹ = 1 / diag(K)``.

    Skips zeros on the diagonal (rigid-body DOFs) so the multiplication
    is well-defined; the zeroed-out inverse means those rows pass
    through unchanged, which the outer LOBPCG iteration handles.
    """
    diag = np.asarray(K.diagonal(), dtype=np.float64)
    inv = np.zeros_like(diag)
    nz = diag != 0.0
    inv[nz] = 1.0 / diag[nz]
    n = K.shape[0]

    def _matvec(x: np.ndarray) -> np.ndarray:
        x = np.ascontiguousarray(x)
        if x.ndim == 1:
            return inv * x
        # (n, k) block — LOBPCG passes multi-column blocks during inner
        # iterations.  Broadcast over columns.
        return inv[:, None] * x

    return spla.LinearOperator((n, n), matvec=_matvec, dtype=np.float64)


[docs] class LobpcgSolver(EigenSolverBase): """``scipy.sparse.linalg.lobpcg`` with pluggable preconditioner.""" name: ClassVar[str] = "lobpcg" kind: ClassVar[str] = "sparse-iterative" spd_only: ClassVar[bool] = True install_hint: ClassVar[str] = "bundled with SciPy — no install needed"
[docs] @staticmethod def solve( K: sp.spmatrix | sp.sparray, M: sp.spmatrix | sp.sparray, n_modes: int, *, sigma: float = 0.0, linear_solver: str = "auto", tol: float = 1e-8, maxiter: int = 400, preconditioner: str | spla.LinearOperator = "factor", ) -> tuple[np.ndarray, np.ndarray]: """Solve the generalised eigenproblem ``K φ = ω² M φ`` with LOBPCG. Parameters ---------- preconditioner : ``"factor"`` (default), ``"jacobi"``, ``"none"``, or an explicit :class:`scipy.sparse.linalg.LinearOperator`. ``"factor"`` reproduces the historical behaviour (full direct factor of K). ``"jacobi"`` is the memory-friendly path — diagonal inverse of K, no factor, ``O(n)`` storage. ``"none"`` disables preconditioning. When an explicit LinearOperator is supplied ``linear_solver`` is ignored. """ n = K.shape[0] rng = np.random.default_rng(0) X = rng.standard_normal((n, max(n_modes, 2 * n_modes // 2 + 1))) if isinstance(preconditioner, spla.LinearOperator): M_op: spla.LinearOperator | None = preconditioner elif preconditioner == "factor": # K is SPD; use the same factor as preconditioner. Has the # same peak-RSS cost as the shift-invert Lanczos path. Solver = get_linear_solver(linear_solver) P = Solver(K, assume_spd=True) M_op = spla.LinearOperator( K.shape, matvec=lambda x: ( P.solve(x) if x.ndim == 1 else np.column_stack([P.solve(x[:, j]) for j in range(x.shape[1])]) ), dtype=np.float64, ) elif preconditioner == "jacobi": M_op = _jacobi_preconditioner(K) elif preconditioner == "none": M_op = None else: raise ValueError( f"preconditioner must be 'factor', 'jacobi', 'none', or a " f"LinearOperator instance; got {preconditioner!r}" ) w, v = spla.lobpcg( K, X[:, :n_modes], B=M, M=M_op, tol=tol, largest=False, maxiter=maxiter, ) order = np.argsort(w) return w[order], v[:, order]