pub trait Lapack: Scalar {
Show 25 methods
// Required methods
fn eig(
calc_v: bool,
l: MatrixLayout,
a: &mut [Self],
) -> Result<(Vec<Self::Complex>, Vec<Self::Complex>), Error>;
fn eigh(
calc_eigenvec: bool,
layout: MatrixLayout,
uplo: UPLO,
a: &mut [Self],
) -> Result<Vec<Self::Real>, Error>;
fn eigh_generalized(
calc_eigenvec: bool,
layout: MatrixLayout,
uplo: UPLO,
a: &mut [Self],
b: &mut [Self],
) -> Result<Vec<Self::Real>, Error>;
fn householder(l: MatrixLayout, a: &mut [Self]) -> Result<Vec<Self>, Error>;
fn q(l: MatrixLayout, a: &mut [Self], tau: &[Self]) -> Result<(), Error>;
fn qr(l: MatrixLayout, a: &mut [Self]) -> Result<Vec<Self>, Error>;
fn svd(
l: MatrixLayout,
calc_u: bool,
calc_vt: bool,
a: &mut [Self],
) -> Result<SvdOwned<Self>, Error>;
fn svddc(
layout: MatrixLayout,
jobz: JobSvd,
a: &mut [Self],
) -> Result<SvdOwned<Self>, Error>;
fn least_squares(
a_layout: MatrixLayout,
a: &mut [Self],
b: &mut [Self],
) -> Result<LeastSquaresOwned<Self>, Error>;
fn least_squares_nrhs(
a_layout: MatrixLayout,
a: &mut [Self],
b_layout: MatrixLayout,
b: &mut [Self],
) -> Result<LeastSquaresOwned<Self>, Error>;
fn lu(l: MatrixLayout, a: &mut [Self]) -> Result<Vec<i32>, Error>;
fn inv(l: MatrixLayout, a: &mut [Self], p: &Vec<i32>) -> Result<(), Error>;
fn solve(
l: MatrixLayout,
t: Transpose,
a: &[Self],
p: &Vec<i32>,
b: &mut [Self],
) -> Result<(), Error>;
fn bk(
l: MatrixLayout,
uplo: UPLO,
a: &mut [Self],
) -> Result<Vec<i32>, Error>;
fn invh(
l: MatrixLayout,
uplo: UPLO,
a: &mut [Self],
ipiv: &Vec<i32>,
) -> Result<(), Error>;
fn solveh(
l: MatrixLayout,
uplo: UPLO,
a: &[Self],
ipiv: &Vec<i32>,
b: &mut [Self],
) -> Result<(), Error>;
fn cholesky(
l: MatrixLayout,
uplo: UPLO,
a: &mut [Self],
) -> Result<(), Error>;
fn inv_cholesky(
l: MatrixLayout,
uplo: UPLO,
a: &mut [Self],
) -> Result<(), Error>;
fn solve_cholesky(
l: MatrixLayout,
uplo: UPLO,
a: &[Self],
b: &mut [Self],
) -> Result<(), Error>;
fn rcond(
l: MatrixLayout,
a: &[Self],
anorm: Self::Real,
) -> Result<Self::Real, Error>;
fn opnorm(t: NormType, l: MatrixLayout, a: &[Self]) -> Self::Real;
fn solve_triangular(
al: MatrixLayout,
bl: MatrixLayout,
uplo: UPLO,
d: Diag,
a: &[Self],
b: &mut [Self],
) -> Result<(), Error>;
fn lu_tridiagonal(
a: Tridiagonal<Self>,
) -> Result<LUFactorizedTridiagonal<Self>, Error>;
fn rcond_tridiagonal(
lu: &LUFactorizedTridiagonal<Self>,
) -> Result<Self::Real, Error>;
fn solve_tridiagonal(
lu: &LUFactorizedTridiagonal<Self>,
bl: MatrixLayout,
t: Transpose,
b: &mut [Self],
) -> Result<(), Error>;
}
Expand description
Trait for primitive types which implements LAPACK subroutines
Required Methods§
sourcefn eig(
calc_v: bool,
l: MatrixLayout,
a: &mut [Self],
) -> Result<(Vec<Self::Complex>, Vec<Self::Complex>), Error>
fn eig( calc_v: bool, l: MatrixLayout, a: &mut [Self], ) -> Result<(Vec<Self::Complex>, Vec<Self::Complex>), Error>
Compute right eigenvalue and eigenvectors for a general matrix
sourcefn eigh(
calc_eigenvec: bool,
layout: MatrixLayout,
uplo: UPLO,
a: &mut [Self],
) -> Result<Vec<Self::Real>, Error>
fn eigh( calc_eigenvec: bool, layout: MatrixLayout, uplo: UPLO, a: &mut [Self], ) -> Result<Vec<Self::Real>, Error>
Compute right eigenvalue and eigenvectors for a symmetric or Hermitian matrix
sourcefn eigh_generalized(
calc_eigenvec: bool,
layout: MatrixLayout,
uplo: UPLO,
a: &mut [Self],
b: &mut [Self],
) -> Result<Vec<Self::Real>, Error>
fn eigh_generalized( calc_eigenvec: bool, layout: MatrixLayout, uplo: UPLO, a: &mut [Self], b: &mut [Self], ) -> Result<Vec<Self::Real>, Error>
Compute right eigenvalue and eigenvectors for a symmetric or Hermitian matrix
sourcefn householder(l: MatrixLayout, a: &mut [Self]) -> Result<Vec<Self>, Error>
fn householder(l: MatrixLayout, a: &mut [Self]) -> Result<Vec<Self>, Error>
Execute Householder reflection as the first step of QR-decomposition
For C-continuous array, this will call LQ-decomposition of the transposed matrix $ A^T = LQ^T $
sourcefn q(l: MatrixLayout, a: &mut [Self], tau: &[Self]) -> Result<(), Error>
fn q(l: MatrixLayout, a: &mut [Self], tau: &[Self]) -> Result<(), Error>
Reconstruct Q-matrix from Householder-reflectors
sourcefn qr(l: MatrixLayout, a: &mut [Self]) -> Result<Vec<Self>, Error>
fn qr(l: MatrixLayout, a: &mut [Self]) -> Result<Vec<Self>, Error>
Execute QR-decomposition at once
sourcefn svd(
l: MatrixLayout,
calc_u: bool,
calc_vt: bool,
a: &mut [Self],
) -> Result<SvdOwned<Self>, Error>
fn svd( l: MatrixLayout, calc_u: bool, calc_vt: bool, a: &mut [Self], ) -> Result<SvdOwned<Self>, Error>
Compute singular-value decomposition (SVD)
sourcefn svddc(
layout: MatrixLayout,
jobz: JobSvd,
a: &mut [Self],
) -> Result<SvdOwned<Self>, Error>
fn svddc( layout: MatrixLayout, jobz: JobSvd, a: &mut [Self], ) -> Result<SvdOwned<Self>, Error>
Compute singular value decomposition (SVD) with divide-and-conquer algorithm
sourcefn least_squares(
a_layout: MatrixLayout,
a: &mut [Self],
b: &mut [Self],
) -> Result<LeastSquaresOwned<Self>, Error>
fn least_squares( a_layout: MatrixLayout, a: &mut [Self], b: &mut [Self], ) -> Result<LeastSquaresOwned<Self>, Error>
Compute a vector $x$ which minimizes Euclidian norm $| Ax - b|$ for a given matrix $A$ and a vector $b$.
sourcefn least_squares_nrhs(
a_layout: MatrixLayout,
a: &mut [Self],
b_layout: MatrixLayout,
b: &mut [Self],
) -> Result<LeastSquaresOwned<Self>, Error>
fn least_squares_nrhs( a_layout: MatrixLayout, a: &mut [Self], b_layout: MatrixLayout, b: &mut [Self], ) -> Result<LeastSquaresOwned<Self>, Error>
Solve least square problems $\argmin_X | AX - B|$
sourcefn lu(l: MatrixLayout, a: &mut [Self]) -> Result<Vec<i32>, Error>
fn lu(l: MatrixLayout, a: &mut [Self]) -> Result<Vec<i32>, Error>
Computes the LU decomposition of a general $m \times n$ matrix with partial pivoting with row interchanges.
For a given matrix $A$, LU decomposition is described as $A = PLU$ where:
- $L$ is lower matrix
- $U$ is upper matrix
- $P$ is permutation matrix represented by Pivot
This is designed as two step computation according to LAPACK API:
- Factorize input matrix $A$ into $L$, $U$, and $P$.
- Solve linear equation $Ax = b$ by Lapack::solve or compute inverse matrix $A^{-1}$ by Lapack::inv using the output of LU decomposition.
§Output
- $U$ and $L$ are stored in
a
after LU decomposition has succeeded. - $P$ is returned as Pivot
§Error
- if the matrix is singular
- On this case,
return_code
in Error::LapackComputationalFailure meansreturn_code
-th diagonal element of $U$ becomes zero.
- On this case,
sourcefn inv(l: MatrixLayout, a: &mut [Self], p: &Vec<i32>) -> Result<(), Error>
fn inv(l: MatrixLayout, a: &mut [Self], p: &Vec<i32>) -> Result<(), Error>
Compute inverse matrix $A^{-1}$ from the output of LU-decomposition
sourcefn solve(
l: MatrixLayout,
t: Transpose,
a: &[Self],
p: &Vec<i32>,
b: &mut [Self],
) -> Result<(), Error>
fn solve( l: MatrixLayout, t: Transpose, a: &[Self], p: &Vec<i32>, b: &mut [Self], ) -> Result<(), Error>
Solve linear equations $Ax = b$ using the output of LU-decomposition
sourcefn bk(l: MatrixLayout, uplo: UPLO, a: &mut [Self]) -> Result<Vec<i32>, Error>
fn bk(l: MatrixLayout, uplo: UPLO, a: &mut [Self]) -> Result<Vec<i32>, Error>
Factorize symmetric/Hermitian matrix using Bunch-Kaufman diagonal pivoting method
For a given symmetric matrix $A$, this method factorizes $A = U^T D U$ or $A = L D L^T$ where
- $U$ (or $L$) are is a product of permutation and unit upper (lower) triangular matrices
- $D$ is symmetric and block diagonal with 1-by-1 and 2-by-2 diagonal blocks.
This takes two-step approach based in LAPACK:
- Factorize given matrix $A$ into upper ($U$) or lower ($L$) form with diagonal matrix $D$
- Then solve linear equation $Ax = b$, and/or calculate inverse matrix $A^{-1}$
sourcefn invh(
l: MatrixLayout,
uplo: UPLO,
a: &mut [Self],
ipiv: &Vec<i32>,
) -> Result<(), Error>
fn invh( l: MatrixLayout, uplo: UPLO, a: &mut [Self], ipiv: &Vec<i32>, ) -> Result<(), Error>
Compute inverse matrix $A^{-1}$ using the result of Lapack::bk
sourcefn solveh(
l: MatrixLayout,
uplo: UPLO,
a: &[Self],
ipiv: &Vec<i32>,
b: &mut [Self],
) -> Result<(), Error>
fn solveh( l: MatrixLayout, uplo: UPLO, a: &[Self], ipiv: &Vec<i32>, b: &mut [Self], ) -> Result<(), Error>
Solve symmetric/Hermitian linear equation $Ax = b$ using the result of Lapack::bk
sourcefn cholesky(l: MatrixLayout, uplo: UPLO, a: &mut [Self]) -> Result<(), Error>
fn cholesky(l: MatrixLayout, uplo: UPLO, a: &mut [Self]) -> Result<(), Error>
Solve symmetric/Hermitian positive-definite linear equations using Cholesky decomposition
For a given positive definite matrix $A$, Cholesky decomposition is described as $A = U^T U$ or $A = LL^T$ where
- $L$ is lower matrix
- $U$ is upper matrix
This is designed as two step computation according to LAPACK API
- Factorize input matrix $A$ into $L$ or $U$
- Solve linear equation $Ax = b$ by Lapack::solve_cholesky or compute inverse matrix $A^{-1}$ by Lapack::inv_cholesky
sourcefn inv_cholesky(
l: MatrixLayout,
uplo: UPLO,
a: &mut [Self],
) -> Result<(), Error>
fn inv_cholesky( l: MatrixLayout, uplo: UPLO, a: &mut [Self], ) -> Result<(), Error>
Compute inverse matrix $A^{-1}$ using $U$ or $L$ calculated by Lapack::cholesky
sourcefn solve_cholesky(
l: MatrixLayout,
uplo: UPLO,
a: &[Self],
b: &mut [Self],
) -> Result<(), Error>
fn solve_cholesky( l: MatrixLayout, uplo: UPLO, a: &[Self], b: &mut [Self], ) -> Result<(), Error>
Solve linear equation $Ax = b$ using $U$ or $L$ calculated by Lapack::cholesky
sourcefn rcond(
l: MatrixLayout,
a: &[Self],
anorm: Self::Real,
) -> Result<Self::Real, Error>
fn rcond( l: MatrixLayout, a: &[Self], anorm: Self::Real, ) -> Result<Self::Real, Error>
Estimates the the reciprocal of the condition number of the matrix in 1-norm.
anorm
should be the 1-norm of the matrix a
.
sourcefn opnorm(t: NormType, l: MatrixLayout, a: &[Self]) -> Self::Real
fn opnorm(t: NormType, l: MatrixLayout, a: &[Self]) -> Self::Real
Compute norm of matrices
For a $n \times m$ matrix $$ A = \begin{pmatrix} a_{11} & \cdots & a_{1m} \\ \vdots & \ddots & \vdots \\ a_{n1} & \cdots & a_{nm} \end{pmatrix} $$ LAPACK can compute three types of norms:
-
Operator norm based on 1-norm for its domain linear space: $$ \Vert A \Vert_1 = \sup_{\Vert x \Vert_1 = 1} \Vert Ax \Vert_1 = \max_{1 \le j \le m } \sum_{i=1}^n |a_{ij}| $$ where $\Vert x\Vert_1 = \sum_{j=1}^m |x_j|$ is 1-norm for a vector $x$.
-
Operator norm based on $\infty$-norm for its domain linear space: $$ \Vert A \Vert_\infty = \sup_{\Vert x \Vert_\infty = 1} \Vert Ax \Vert_\infty = \max_{1 \le i \le n } \sum_{j=1}^m |a_{ij}| $$ where $\Vert x\Vert_\infty = \max_{j=1}^m |x_j|$ is $\infty$-norm for a vector $x$.
-
Frobenious norm $$ \Vert A \Vert_F = \sqrt{\mathrm{Tr} \left(AA^\dagger\right)} = \sqrt{\sum_{i=1}^n \sum_{j=1}^m |a_{ij}|^2} $$
fn solve_triangular( al: MatrixLayout, bl: MatrixLayout, uplo: UPLO, d: Diag, a: &[Self], b: &mut [Self], ) -> Result<(), Error>
sourcefn lu_tridiagonal(
a: Tridiagonal<Self>,
) -> Result<LUFactorizedTridiagonal<Self>, Error>
fn lu_tridiagonal( a: Tridiagonal<Self>, ) -> Result<LUFactorizedTridiagonal<Self>, Error>
Computes the LU factorization of a tridiagonal m x n
matrix a
using
partial pivoting with row interchanges.