add: section normal and Ising MLE estimation

This commit is contained in:
Daniel Kapla 2023-12-01 13:43:56 +01:00
parent 39555ec1fa
commit e5bc0bdf57
13 changed files with 764 additions and 255 deletions

View File

@ -270,3 +270,97 @@
year = {2021},
publisher = {Heldermann Verlag}
}
@article{TensorNormalMLE-ManceurDutilleul2013,
title = {Maximum likelihood estimation for the tensor normal distribution: Algorithm, minimum sample size, and empirical bias and dispersion},
author = {Ameur M. Manceur and Pierre Dutilleul},
journal = {Journal of Computational and Applied Mathematics},
volume = {239},
pages = {37-49},
year = {2013},
issn = {0377-0427},
doi = {10.1016/j.cam.2012.09.017},
url = {https://www.sciencedirect.com/science/article/pii/S0377042712003810}
}
@article{StatIdentTensorGaussian-DeesMandic2019,
title = {A Statistically Identifiable Model for Tensor-Valued Gaussian Random Variables},
author = {Bruno Scalzo Dees and Danilo P. Mandic},
journal = {ArXiv},
year = {2019},
volume = {abs/1911.02915},
url = {https://api.semanticscholar.org/CorpusID:207847615}
}
@article{Ising-Ising1924,
author = {Ising, Ernst},
title = {{Beitrag zur Theorie des Ferromagnetismus}},
journal = {Zeitschrift f\"ur Physik},
pages = {253-258},
volume = {31},
number = {1},
year = {1924},
month = {2},
issn = {0044-3328},
doi = {10.1007/BF02980577}
}
# TODO: Fix the following!!!
@book{GraphicalModels-Whittaker2009,
author = {J. Whittaker},
title = {Graphical Models in Applied Multivariate Statistics},
publisher = {Wiley},
year = {2009}
}
@article{MVB-Dai2012,
author = {B. Dai},
title = {Multivariate bernoulli distribution models},
year = {2012}
}
@article{MVB-DaiDingWahba2013,
author = {B. Dai, S. Ding, and G. Wahba},
title = {Multivariate bernoulli distribution},
year = {2013}
}
@article{sdr-mixedPredictors-BuraForzaniEtAl2022,
author = {Bura and Forzani and TODO},
title = {Sufficient reductions in regression with mixed predictors},
journal = {},
volume = {},
number = {},
year = {2022}
}
@article{sparseIsing-ChengEtAt2014,
author = {J. Cheng, E. Levina, and J. Wang, P.and Zhu},
title = {A sparse Ising model with covariates},
journal = {},
volume = {},
number = {},
year = {2014},
doi = {10.1111/biom.12202}
}
@book{deeplearningbook-GoodfellowEtAl2016,
title = {Deep Learning},
author = {Ian Goodfellow and Yoshua Bengio and Aaron Courville},
publisher = {MIT Press},
url = {\url{http://www.deeplearningbook.org}},
year = {2016}
}
@misc{rmsprop-Hinton2012,
title = {Neural networks for machine learning},
author = {Hinton, G.},
year = {2012}
}
@article{self-kapla2019,
title = {Comparison of Different Word Embeddings and Neural Network Types for Sentiment Analysis of German Political Speeches},
author = {Kapla, Daniel},
year = {2019}
}

View File

@ -98,6 +98,7 @@
\newcommand{\ternary}[3]{{#2}{\ \mathrm{if}\ }{#1}{\ \mathrm{else}\ }{#3}}
% Define math macros
\renewcommand{\hat}{\widehat}
% \newcommand*{\ten}[1]{\mathcal{#1}}
\DeclareMathOperator{\sym}{sym}
\renewcommand*{\vec}{\operatorname{vec}}
@ -142,9 +143,10 @@
\newcommand{\SkewSymMat}[1]{\mathrm{Skew}^{{#1}\times {#1}}}
\newcommand{\SymPosDefMat}[1]{\mathrm{Sym}_{++}^{{#1}\times {#1}}}
\newcommand{\SymDiagZeroMat}[1]{\mathrm{Sym}_{\diag=0}^{p\times p}}
\newcommand{\SymBand}[2]{\mathrm{SymBand}^{{#1}\times {#1}}_{#2}}
\newcommand{\todo}[1]{\text{\color{red}TODO: #1}}
\newcommand{\efi}[1]{\text{\color{blue}Effie: #1}}
\newcommand{\todo}[1]{{\color{red}TODO: #1}}
\newcommand{\efi}[1]{{\color{blue}Effie: #1}}
% \newcommand{\todo}[1]{}
% \newcommand{\efi}[1]{}
@ -444,9 +446,6 @@ Distributions within the quadratic exponential family include the \emph{tensor n
\section{The Generalized Multi-Linear Model}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% In model \eqref{eq:quad-density}, the relationship of $\ten{X}$ and $Y$ is absorbed in $\mat{\eta}_y$, and $\mat{t}(\ten{X})$ is the minimal sufficient statistic for the pseudo-parameter $\mat{\eta}_y = (\vec\mat{\eta}_1, \vec\mat{\eta}_2)$ with $\mat{t}(\ten{X}) = (\vec{\ten{X}}, \t{\mat{D}_p}\mat{D}_p\vech(\ten{X}\circ\ten{X}))$ where $\mat{D}_p$ is the duplication matrix \textcite[Ch.~11]{MatrixAlgebra-AbadirMagnus2005}, for $p = \prod_{j = 1}^r p_j$. $\t{\mat{D}_p}\mat{D}_p$ acts on $\vech(\ten{X}\circ\ten{X})$ by multiplying the off diagonal components of $\vech(\ten{X}\circ\ten{X})$ with $2$ while keeping the diagonal components untouched.
% The duplication matrix ensures the natural parameter vector $\mat{\eta}_y$ is minimal.
In model \eqref{eq:quad-density}, the relationship of $\ten{X}$ and $Y$ is absorbed in $\mat{\eta}_y$, and $\mat{t}(\ten{X})$ is the minimal sufficient statistic for the \textit{pseudo}-parameter\footnote{$\mat{\eta}_y$ is a function of the response $Y$, thus it is not a parameter in the formal statistical sense. It is considered as a parameter when using the equivalence in \eqref{eq:inverse-regression-sdr} and view $Y$ as a parameter as a device to derive the sufficient reduction from the inverse regression.} $\mat{\eta}_y = (\mat{\eta}_{1y}, \mat{\eta}_{2y})$ with
\begin{align}\label{eq:t-stat}
\mat{t}(\ten{X}) &= (\mat{t}_1(\ten{X}),\mat{t}_2(\ten{X}))=(\vec{\ten{X}}, \mat{T}_2\vech((\vec\ten{X})\t{(\vec\ten{X})})),
@ -455,7 +454,6 @@ where the $d\times p(p + 1) / 2$ dimensional matrix $\mat{T}_2$ with $p = \prod_
We can reexpress the exponent in \eqref{eq:quad-density} as
\begin{align*}
% \t{\mat{\eta}_y} \mat{t}(\ten{X}) = \langle \ten{X}, \mat{\eta}_1 \rangle + \langle \ten{X}\circ\ten{X}, \mat{D}_p\mat{\eta}_2 \rangle
\t{\mat{\eta}_y} \mat{t}(\ten{X})
&= \langle \vec \ten{X}, \mat{\eta}_{1y} \rangle + \langle \mat{T}_2\vech(\ten{X}\circ\ten{X}), \mat{\eta}_{2y} \rangle = \langle \vec \ten{X}, \mat{\eta}_{1y} \rangle + \langle \vec(\ten{X}\circ\ten{X}), \t{(\mat{T}_2\pinv{\mat{D}_p})}\mat{\eta}_{2y} \rangle
\end{align*}
@ -465,26 +463,25 @@ where $\mat{D}_p$ is the \emph{duplication matrix} from \textcite[Ch.~11]{Matrix
\end{equation}
The exponential family in \eqref{eq:quadratic-exp-fam} is easily generalizable to any order. This, though, would result in the number of parameters becoming prohibitive to estimate. This is also the reason why we opted for the second order exponential family in our formulation.
By the equivalence in \eqref{eq:inverse-regression-sdr}, in order to find the sufficient reduction $\ten{R}(\ten{X})$ we need to infer $\mat{\eta}_{1y}$, and $\mat{\eta}_{2y}$. This is reminiscent of generalized linear modeling, which we extend to a multi-linear formulation next.
Suppose $\ten{F}_y$ is a known mapping of $y$ %with values in $\mathbb{R}^{q_1\times ...\times q_r}$
with zero expectation $\E_Y\ten{F}_Y = 0$. We assume the dependence of $\ten{X}$ and $Y$ is reflected only in the first parameter and let
By the equivalence in \eqref{eq:inverse-regression-sdr}, in order to find the sufficient reduction $\ten{R}(\ten{X})$ we need to infer $\mat{\eta}_{1y}$, and $\mat{\eta}_{2y}$. This is reminiscent of generalized linear modeling, which we extend to a multi-linear formulation next.
Suppose $\ten{F}_y$ is a known mapping of $y$ with zero expectation $\E_Y\ten{F}_Y = 0$. We assume the dependence of $\ten{X}$ and $Y$ is reflected only in the first parameter and let
\begin{align}
\mat{\eta}_{1y} &= \vec{\overline{\ten{\eta}}} + \mat{B}\vec\ten{F}_y, \label{eq:eta1-manifold} \\
\mat{\eta}_{2} &= \t{(\pinv{(\mat{T}_2\pinv{\mat{D}_p})})}\vec(c\,\mat{\Omega}), \label{eq:eta2-manifold}
\end{align}
where $\overline{\ten{\eta}}\in\mathbb{R}^{p_1\times\ldots\times p_r}$, $\mat{\Omega} \in \mathbb{R}^{p \times p}$ is positive definite with $p = \prod_{j = 1}^{r} p_j$, and $c\in\mathbb{R}$ is a known constant determined by the distribution to ease modeling. That is, we assume that only $\mat{\eta}_{1y}$ depends on $Y$ through $\mat{B}$. The second parameter $\mat{\eta}_2$ captures the second order interaction structure of $\ten{X}$, which we assume not to depend on the response $Y$. In order to relate individual modes of $\ten{X}$ to the response, allowing flexibility in modeling, we assume $\ten{F}_y$ takes values in $\mathbb{R}^{q_1\times ...\times q_r}$; that is, $\ten{F}_y$ is a tensor valued independent variable in the inverse regression setting. This, in turn, leads to imposing corresponding tensor structure to the regression parameter $\mat{B}$. Thus, \eqref{eq:eta1-manifold} becomes
where $\overline{\ten{\eta}}\in\mathbb{R}^{p_1\times\ldots\times p_r}$, $\mat{\Omega} \in \mathbb{R}^{p \times p}$ is positive definite with $p = \prod_{j = 1}^{r} p_j$, and $c\in\mathbb{R}$ is a known constant determined by the distribution to ease modeling. That is, we assume that only $\mat{\eta}_{1y}$ depends on $Y$ through $\mat{B}$. The second parameter $\mat{\eta}_2$ captures the second order interaction structure of $\ten{X}$, which we assume not to depend on the response $Y$. In order to relate individual modes of $\ten{X}$ to the response, allowing flexibility in modeling, we assume $\ten{F}_y$ takes values in $\mathbb{R}^{q_1\times ...\times q_r}$; that is, $\ten{F}_y$ is a tensor valued independent variable in the inverse regression setting. This, in turn, leads to imposing corresponding tensor structure to the regression parameter $\mat{B}$. Thus, \eqref{eq:eta1-manifold} becomes
\begin{align}
\mat{\eta}_{1y} &= %\equiv
\mat{\eta}_{1y} &=
\vec\biggl(\overline{\ten{\eta}} + \ten{F}_y\mlm_{j = 1}^{r}\mat{\beta}_j\biggr), \label{eq:eta1}
\end{align}
where $\mat{B} = \bigotimes_{j = r}^{1}\mat{\beta}_j$ and the component matrices $\mat{\beta}_j\in\mathbb{R}^{p_j\times q_j}$ are of known rank for $j = 1, \ldots, r$. Given the high potential value of $p$, we further assume that
where $\mat{B} = \bigotimes_{j = r}^{1}\mat{\beta}_j$ and the component matrices $\mat{\beta}_j\in\mathbb{R}^{p_j\times q_j}$ are of known rank for $j = 1, \ldots, r$. Given the high potential value of $p$, we further assume that
\begin{align}
\t{(\mat{T}_2\pinv{\mat{D}_p})}\mat{\eta}_{2y}= \t{(\mat{T}_2\pinv{\mat{D}_p})}\mat{\eta}_{2} &= \vec\biggl(c\bigotimes_{j = r}^{1}\mat{\Omega}_j\biggr). \label{eq:eta2}
\end{align}
where $\mat{\Omega}_j\in\mathbb{R}^{p_j\times p_j}$ are symmetric positive definite matrices for $j = 1, \ldots, r$. That is, we require $\mat{\Omega} = \bigotimes_{j = r}^{1}\mat{\Omega}_j$, which substantially reduces the number of parameters to estimate in $\mat{\Omega}$. The assumption that the $\mat{\Omega}_j$'s be positive definite is possible due to the constant $c$.
Equation \eqref{eq:eta2} is underdetermined since $\t{(\mat{T}_2\pinv{\mat{D}_p})}$ has full column rank $d < p^2$ (with a non-strict inequality if $\ten{X}$ is univariate) but $\mat{\eta}_2$ is uniquely determined given any $\mat{\Omega}$ as $\t{(\pinv{(\mat{T}_2\pinv{\mat{D}_p})})}$ has full row rank. We let $\mat{\xi} = (\vec{\overline{\ten{\eta}}}, \vec{\mat{B}}, \vech{\mat{\Omega}})$ be a $p(p + 2 q + 3) / 2$-parameter vector and $\mat{\theta} = (\vec{\overline{\ten{\eta}}}, \vec{\mat{B}}, \vech{\mat{\Omega}})$ be the constrained parameter vector, where $\mat{B}=\bigotimes_{j = r}^{1}\mat{\beta}_j$ and $\mat{\Omega} = \bigotimes_{j = r}^{1}\mat{\Omega}_j$. We also let $\Xi$ and $\Theta$ denote the unconstrained and constrained parameter spaces, with $\mat{\xi}$ and $\mat{\theta}$ varying in $\Xi$ and $\Theta$, respectively. The parameter space $\Xi$ is an open subset of $\mathbb{R}^{p(p + 2 q + 3) / 2}$ so that \eqref{eq:quadratic-exp-fam} is a proper density.
where $\mat{\Omega}_j\in\mathbb{R}^{p_j\times p_j}$ are symmetric positive definite matrices for $j = 1, \ldots, r$. That is, we require $\mat{\Omega} = \bigotimes_{j = r}^{1}\mat{\Omega}_j$, which substantially reduces the number of parameters to estimate in $\mat{\Omega}$. The assumption that the $\mat{\Omega}_j$'s be positive definite is possible due to the constant $c$.
Equation \eqref{eq:eta2} is underdetermined since $\t{(\mat{T}_2\pinv{\mat{D}_p})}$ has full column rank $d < p^2$ (with a non-strict inequality if $\ten{X}$ is univariate) but $\mat{\eta}_2$ is uniquely determined given any $\mat{\Omega}$ as $\t{(\pinv{(\mat{T}_2\pinv{\mat{D}_p})})}$ has full row rank. We let $\mat{\xi} = (\vec{\overline{\ten{\eta}}}, \vec{\mat{B}}, \vech{\mat{\Omega}})$ be a $p(p + 2 q + 3) / 2$-parameter vector and $\mat{\theta} = (\vec{\overline{\ten{\eta}}}, \vec{\mat{B}}, \vech{\mat{\Omega}})$ be the constrained parameter vector, where $\mat{B}=\bigotimes_{j = r}^{1}\mat{\beta}_j$ and $\mat{\Omega} = \bigotimes_{j = r}^{1}\mat{\Omega}_j$. We also let $\Xi$ and $\Theta$ denote the unconstrained and constrained parameter spaces, with $\mat{\xi}$ and $\mat{\theta}$ varying in $\Xi$ and $\Theta$, respectively. The parameter space $\Xi$ is an open subset of $\mathbb{R}^{p(p + 2 q + 3) / 2}$ so that \eqref{eq:quadratic-exp-fam} is a proper density. Later, we will relax the assumptions for $\mat{\beta}_k$ and $\mat{\Omega}_k$ as a consequence of \cref{thm:param-manifold}, which is subject of \cref{sec:kron-manifolds,sec:matrix-manifolds}.
\todo{Maybe already here introduce the ``constraint'' set of $\Omega$'s allowed as $\{ \Omega\in\mathbb{R}_{++}^{p\times p} : \vec{\Omega} = \t{(\pinv{(\mat{T}_2\pinv{\mat{D}_p})}\mat{T}_2\pinv{\mat{D}_p})}\vec{\Omega} \}$}
% \todo{Maybe already here introduce the ``constraint'' set of $\Omega$'s allowed as $\{ \Omega\in\mathbb{R}_{++}^{p\times p} : \vec{\Omega} = \t{(\pinv{(\mat{T}_2\pinv{\mat{D}_p})}\mat{T}_2\pinv{\mat{D}_p})}\vec{\Omega} \}$}
In a classical \emph{generalized linear model} (GLM), the link function connecting the natural parameters to the expectation of the sufficient statistic $\mat{\eta}_y = \mat{g}(\E[\mat{t}(\ten{X}) \mid Y = y])$ is invertible. Such a link may not exist in our setting, but for our purpose what we call the ``inverse'' link suffices. The ``inverse'' link $\widetilde{\mat{g}}$ exists as the natural parameters fully describe the distribution. As in the non-minimal formulation \eqref{eq:quadratic-exp-fam}, we define the ``inverse'' link through its tensor valued components
\begin{align}
@ -495,7 +492,7 @@ as $\widetilde{\mat{g}}(\mat{\eta}_y) = (\vec\ten{g}_1(\mat{\eta}_y), \vec\ten{g
Under the quadratic exponential family model \eqref{eq:quadratic-exp-fam}, a sufficient reduction for the regression of $Y$ on $\ten{X}$ is given in \cref{thm:sdr}.
\begin{theorem}[SDR]\label{thm:sdr}
A sufficient reduction for the regression $Y\mid \ten{X}$ under the quadratic exponential family inverse regression model \eqref{eq:quadratic-exp-fam} with natural parameters modeled \eqref{eq:eta1} and \eqref{eq:eta2} is given by
A sufficient reduction for the regression $Y\mid \ten{X}$ under the quadratic exponential family inverse regression model \eqref{eq:quadratic-exp-fam} with natural parameters \eqref{eq:eta1} and \eqref{eq:eta2} is given by
\begin{align}\label{eq:sdr}
\ten{R}(\ten{X})
% &= (\ten{X} - \E\ten{X})\times\{ \t{\mat{\beta}_1}, \ldots, \t{\mat{\beta}_r} \}.
@ -569,7 +566,7 @@ A straightforward and general method for parameter estimation is \emph{gradient
If $\mat{T}_2$ is the identity matrix $\mat{I}_{p(p + 1) / 2}$, then $\ten{G}_2(\mat{\eta}_y) = \ten{g}_2(\mat{\eta}_y)$.
\end{theorem}
Although the general case of any GMLM model can be fitted via gradient descent using \cref{thm:grad}, this may be very inefficient. In case of the tensor normal distribution an iterative cyclic updating scheme can be derived by setting the partial gradients of \cref{thm:grad} to zero. Described in more details in \cref{sec:tensor_normal_estimation}, this method has much faster convergence, is stabel and does not requires any hyper parameters. On the other hand, the Ising model does not allow such a scheme. There we need to use a gradient based method, which is the subject of \cref{sec:ising_estimation}.
Although the general case of any GMLM model can be fitted via gradient descent using \cref{thm:grad}, this may be very inefficient. In case of the tensor normal distribution an iterative cyclic updating scheme is derived in \cref{sec:tensor_normal_estimation}. This method has much faster convergence, is stabel and does not requires any hyper parameters. On the other hand, the Ising model does not allow such a scheme. There we need to use a gradient based method, which is the subject of \cref{sec:ising_estimation}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Tensor Normal}\label{sec:tensor_normal_estimation}
@ -578,7 +575,7 @@ With $\ten{X}\mid Y = y$ following a tensor normal distribution, its density, wi
\begin{displaymath}
f_{\mat{\theta}}(\ten{X}\mid Y = y) = (2\pi)^{-p / 2}\prod_{k = 1}^{r}\det(\mat{\Sigma}_k)^{-p / 2 p_k}\exp\left( -\frac{1}{2}\left\langle\ten{X} - \ten{\mu}_y, (\ten{X} - \ten{\mu}_y)\mlm_{k = 1}^{r}\mat{\Sigma}_k^{-1} \right\rangle \right).
\end{displaymath}
We assume the distribution is non-degenerate which means that the covariances $\mat{\Sigma}_k$ are symmetric positive definite matrices. For the sake of simplicity, we w.l.o.g. assume $\ten{X}$ to have marginal expectation equal zero, $\E\ten{X} = 0$. Rewriting this in the quadratic exponential family form \eqref{eq:quadratic-exp-fam}, determines tha scaling constant $c = -1/2$, and give the relation to the GMLM parameters $\overline{\ten{\eta}}, \mat{\beta}_k$ and $\mat{\Omega}_k$, for $k = 1, \ldots, r$ as
We assume the distribution is non-degenerate which means that the covariances $\mat{\Sigma}_k$ are symmetric positive definite matrices. For the sake of simplicity, we w.l.o.g. assume $\ten{X}$ to have marginal expectation equal zero, $\E\ten{X} = 0$. Rewriting this in the quadratic exponential family form \eqref{eq:quadratic-exp-fam}, determines the scaling constant $c = -1/2$, and give the relation to the GMLM parameters $\overline{\ten{\eta}}, \mat{\beta}_k$ and $\mat{\Omega}_k$, for $k = 1, \ldots, r$ as
\begin{displaymath}
\ten{\mu}_y = \ten{F}_y\mlm_{k = 1}^{r}\mat{\Omega}_k^{-1}\mat{\beta}_k, \qquad
\mat{\Omega}_k = \mat{\Sigma}_k^{-1}
@ -606,7 +603,7 @@ The initial estimates are then set to
\end{displaymath}
The initial values of $\mat{\Omega}_k$ are set to the identity $\mat{\Omega}_k^{(0)} = \mat{I}_{p_k}$.
Given any estimates $\hat{\mat{\beta}}_1, \ldots, \hat{\mat{\beta}}_r, \hat{\mat{\Omega}}_1, \ldots, \hat{\mat{\Omega}}_r$, we take the gradient $\nabla_{\mat{\beta}_j}$ of the log-likelihood $l_n$ in \eqref{eq:log-likelihood} of the tensor normal distributed from \cref{thm:grad} and keep all other parameters except $\mat{\beta}_j$ fixed. Then, $\nabla_{\mat{\beta}_j}l_n = 0$ has a closed form solution
Given any estimates $\hat{\mat{\beta}}_1, \ldots, \hat{\mat{\beta}}_r, \hat{\mat{\Omega}}_1, \ldots, \hat{\mat{\Omega}}_r$, we take the gradient $\nabla_{\mat{\beta}_j}l_n$ of the log-likelihood $l_n$ in \eqref{eq:log-likelihood} of the tensor normal distributed from \cref{thm:grad} and keep all other parameters except $\mat{\beta}_j$ fixed. Then, $\nabla_{\mat{\beta}_j}l_n = 0$ has a closed form solution
\begin{equation}\label{eq:tensor_normal_beta_solution}
\t{\mat{\beta}_j} = \biggl(
\sum_{i = 1}^{n}
@ -638,22 +635,113 @@ Solving for the scaling value gives
\end{displaymath}
resulting in the estimates $\hat{\mat{\Omega}}_j = (\tilde{s}\tilde{\mat{\Sigma}}_j)^{-1}$.
Estimation is then performed by updating for $j = 1, \ldots, r$ the estimates $\hat{\mat{\beta}}_j$ via \eqref{eq:tensor_normal_beta_solution}, then we recompute the $\hat{\mat{\Omega}}_j$ estimates simultaniously keeping the $\hat{\mat{\beta}}_j$'s fixed. This procedure is repeated untill convergence. Convergence is very fast, experiments showed that convergence occures usualy in less than $10$ iterations.
Estimation is then performed by updating for $j = 1, \ldots, r$ the estimates $\hat{\mat{\beta}}_j$ via \eqref{eq:tensor_normal_beta_solution}, then we recompute the $\hat{\mat{\Omega}}_j$ estimates simultaniously keeping the $\hat{\mat{\beta}}_j$'s fixed. This procedure is repeated untill convergence. % Convergence is very fast, experiments showed that convergence occures usualy in less than $10$ iterations.
A technical detail for numerical stability is to ensure that the scaled inbetween values $\tilde{s}\tilde{\mat{\Sigma}}_j$, assumed to be symmetric and positive definite, are well conditioned. Otherwise, the procedure can be numerically unstable by attempting to compute $\hat{\mat{\Omega}}_j = (\tilde{s}\tilde{\mat{\Sigma}}_j)^{-1}$. Therefore, we estimate the condition number of $\tilde{s}\tilde{\mat{\Sigma}}_j$ before computing the inverse. In case of being ill conditioned, we regularize and use $\hat{\mat{\Omega}}_j = (\tilde{s}\tilde{\mat{\Sigma}}_j + 0.2 \lambda_{1}(\tilde{s}\tilde{\mat{\Sigma}}_j)\mat{I}_{p_j})^{-1}$ instead, where $\lambda_{1}(\tilde{s}\tilde{\mat{\Sigma}}_j)$ is the first (maximum) eigen value. Experiments showed that this regularization is usually required in the first few iterations only.
A technical detail for numerical stability is to ensure that the scaled inbetween values $\tilde{s}\tilde{\mat{\Sigma}}_j$, assumed to be symmetric and positive definite, are well conditioned. Otherwise, the procedure can be numerically unstable by attempting to compute $\hat{\mat{\Omega}}_j = (\tilde{s}\tilde{\mat{\Sigma}}_j)^{-1}$. Therefore, we estimate the condition number of $\tilde{s}\tilde{\mat{\Sigma}}_j$ before computing the inverse. In case of being ill conditioned, we regularize and use $\hat{\mat{\Omega}}_j = (\tilde{s}\tilde{\mat{\Sigma}}_j + 0.2 \lambda_{1}(\tilde{s}\tilde{\mat{\Sigma}}_j)\mat{I}_{p_j})^{-1}$ instead, where $\lambda_{1}(\tilde{s}\tilde{\mat{\Sigma}}_j)$ is the first (maximum) eigen value. Experiments showed that this regularization is usually only required in the first few iterations.
Furthermore, we may assume that the parameter space follows a more generel setting as in \cref{thm:param-manifold}. In this case, updating may produces estimates outside of the parameter space. A simple and efficient method is to project every updated estimate onto the corresponding manifold.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Ising Model}\label{sec:ising_estimation}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The general distribution of a binary vector is modeled by the \emph{multi-variate Bernoulli distribution} (see: \textcite{GraphicalModels-Whittaker2009, MVB-Dai2012, MVB-DaiDingWahba2013}). The \emph{Ising model} \textcite{Ising-Ising1924} is a special case, considering only two-way interactions. Its probability mass function (PMF) for a binary random vector $X\in\{ 0, 1 \}^p$ with natural parameters $\mat{\gamma}\in\mathbb{R}^{p(p + 1) / 2}$ is given by
\begin{displaymath}
P_{\mat{\gamma}}(\mat{x}) = p_0(\mat{\gamma})\exp(\t{\vech(\mat{x}\t{\mat{x}})}\mat{\gamma}).
\end{displaymath}
The scaling factor $p_0(\mat{\gamma})\in\mathbb{R}_{+}$ ensures that $P_{\mat{\gamma}}$ is a PMF. It is equal to the probability of the zero event $P(X = \mat{0}) = p_0(\mat{\gamma})$. More commonly known as the \emph{partition function}, the reciprocal of $p_0$, given by
\begin{equation}\label{eq:ising-partition-function}
p_0(\mat{\gamma})^{-1} = \sum_{\mat{x}\in\{0, 1\}^p}\exp(\t{\vech(\mat{x}\t{\mat{x}})}\mat{\gamma}).
\end{equation}
By an abuse of notation, let $\mat{\gamma}_{j l}$ address the element of $\mat{\gamma}$ corresponding to $\mat{x}_j\mat{x}_l$ in $\vech(\mat{x}\t{\mat{x}})$\footnote{Specifically, the element $\mat{\gamma}_{j l}$ of $\mat{\gamma}$ is a short hand for $\mat{\gamma}_{\iota(j, l)}$ with $\iota(j, l) = (\min(j, l) - 1)(2 p - \min(j, l)) / 2 + \max(j, l)$ mapping the matrix row index $j$ and column index $l$ to the corresponding half vectorization indices $\iota(j, l)$.}. The ``diagonal'' parameter $\mat{\gamma}_{j j}$ expresses the conditional log odds of $X_j = 1\mid X_{-j} = \mat{0}$, where the negative subscript in $X_{-j}$ describes the $p - 1$ dimensional vector $X$ with the $j$'t element removed. The ``off diagonal'' parameters $\mat{\gamma}_{j l}$, for $j\neq l$, are equal to the conditional log odds of simultanious occurence $X_j = 1, X_l = 1 \mid X_{-j, -l} = \mat{0}$. More precise, for $j\neq l$, the conditional probabitities and the natural parameters are related by
\begin{align}
\mat{\gamma}_{j j} &= \log\frac{P_{\mat{\gamma}}(X_j = 1\mid X_{-j} = \mat{0})}{1 - P_{\mat{\gamma}}(X_j = 1\mid X_{-j} = \mat{0})}, \nonumber \\
\mat{\gamma}_{j l} &= \log\frac{1 - P_{\mat{\gamma}}(X_j = 1\mid X_{-j} = \mat{0})P_{\mat{\gamma}}(X_l = 1\mid X_{-l} = \mat{0})}{P_{\mat{\gamma}}(X_j = 1\mid X_{-j} = \mat{0})P_{\mat{\gamma}}(X_l = 1\mid X_{-l} = \mat{0})}\frac{P_{\mat{\gamma}}(X_j = 1, X_l = 1\mid X_{-j, -l} = \mat{0})}{1 - P_{\mat{\gamma}}(X_j = 1, X_l = 1\mid X_{-j, -l} = \mat{0})} \label{eq:ising-two-way-log-odds}.
\end{align}
Conditional Ising models, incorporating the information of covariates $Y$ into the model, have also been considered \textcite{sparseIsing-ChengEtAt2014, sdr-mixedPredictors-BuraForzaniEtAl2022}. The direct way is to parameterize the parameter $\mat{\gamma} = \mat{\gamma}_y$ by the covariate $Y = y$ to model a conditional distribution $P_{\mat{\gamma}_y}(\mat{x}\mid Y = y)$.
We extend the conditional PMF by allowing the binary variables to be tensor values, that is for $\ten{X}\in\{ 0, 1 \}^{p_1\times\cdots\times p_r}$ we set $\mat{x} = \vec{\ten{X}}$, with dimension $p = \prod_{k = 1}^{r}p_k$. Considering the tensor structure of $\ten{X}$ is done by assuming Kronecker product constraints to the parameter vector $\mat{\gamma}_y$ in a similar fashion as for the tensor normal model. This means that we compair the PMF $P_{\mat{\gamma}_y}(\vec{\ten{X}} | Y = y)$ to the quadratic exponential family \eqref{eq:quadratic-exp-fam} with the natural parameters modeled by \eqref{eq:eta1} and \eqref{eq:eta2}. A detail to be considered is that the diagonal of $(\vec{\ten{X}})\t{(\vec{\ten{X}})}$ is equal to $\vec{\ten{X}}$. This gives the GMLM model as
\begin{align}
P_{\mat{\gamma}_y}(\ten{X} \mid Y = y)
&= p_0(\mat{\gamma}_y)\exp(\t{\vech((\vec{\ten{X}})\t{(\vec{\ten{X}})})}\mat{\gamma}_y) \nonumber \\
&= p_0(\mat{\gamma}_y)\exp\Bigl(\Bigl\langle \ten{X}, \ten{F}_y\mlm_{k = 1}^{r}\mat{\beta}_k \Bigr\rangle + \Bigl\langle\ten{X}\mlm_{k = 1}^{r}\mat{\Omega}_k, \ten{X}\Bigr\rangle\Bigr)
\end{align}
where we set $\overline{\ten{\eta}} = 0$ and $\mat{T}_2$ to the identity. This is an additional constraint to the model, the reason is that the diagonal elements of $\mat{\Omega} = \bigkron_{k = r}^{1}\mat{\Omega}_k$ take the role of $\overline{\ten{\eta}}$, althoug not fully. Having the diagonal of $\mat{\Omega}$ and $\overline{\ten{\eta}}$ handling the self interaction effects might lead to interference in the optimization routine. Another approach would be to use the $\mat{T}_2$ matrix to set the corresponding diagonal elements of $\mat{\Omega}$ to zero and let $\overline{\ten{\eta}}$ handle the self interaction effect. All of those approaches, namely setting $\overline{\ten{\eta}} = 0$, keeping $\overline{\ten{\eta}}$ or using $\mat{T}_2$, are theoretically solid and compatible with \cref{thm:grad,thm:param-manifold,thm:asymptotic-normality-gmlm}, assuming all axis dimensions $p_k$ are non-degenerate, that is $p_k > 1$ for all $k = 1, \ldots, r$. Regardles, under our modeling choise we get the relation between the natural parameters $\mat{\gamma}_y$ of the conditional Ising model and the GMLM parameters $\mat{\beta}_k$ and $\mat{\Omega}_k$ as
\begin{equation}\label{eq:ising-natural-params}
% \t{\pinv{\mat{D}_p}}\mat{\gamma}_y
% = \vec(\mat{\Omega} + \diag(\mat{B}\vec{\ten{F}_y}))
% = \vec\Biggl(\bigkron_{k = r}^{1}\mat{\Omega}_k + \diag\biggl(\vec\Bigl(\ten{F}_y\mlm_{k = 1}^{r}\mat{\beta}_k\Bigr)\biggr)\Biggr).
\mat{\gamma}_y
= \t{\mat{D}_p}\vec(\mat{\Omega} + \diag(\mat{B}\vec{\ten{F}_y}))
= \t{\mat{D}_p}\vec\Biggl(\bigkron_{k = r}^{1}\mat{\Omega}_k + \diag\biggl(\vec\Bigl(\ten{F}_y\mlm_{k = 1}^{r}\mat{\beta}_k\Bigr)\biggr)\Biggr).
\end{equation}
In contract to the tensor normal GMLM, the matrices $\mat{\Omega}_k$ are only required to be symmetric. More specificaly, we require $\mat{\Omega}_k$, for $k = 1, \ldots, r$, to be elements of an embedded submanifold of $\SymMat{p_k}$ (see: \cref{sec:kron-manifolds,sec:matrix-manifolds}). The mode wise reduction matrices $\mat{\beta}_k$ need to be elements of an embedded submanifold of $\mathbb{R}^{p_k\times q_k}$. Common choises are listed in \cref{sec:matrix-manifolds}. \todo{check if we need to exclude zero here!}
To solve the optimization problem \eqref{eq:mle}, given a data set $(\ten{X}_i, y_i)$, for $i = 1, \ldots, n$, we use a variation of gradient descent.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Initial Values}
The first step is to get reasonable starting values. Experiments showed that a good starting value of $\mat{\beta}_k$, for $k = 1, \ldots, r$, it to use the tensor normal estimates from \cref{sec:tensor_normal_estimation}, interprating $\ten{X}_i$ as continuous. For initial values of $\mat{\Omega}_k$, a different approach is required. Setting everything to the uninformed initial value, that is $\mat{\Omega}_k = \mat{0}$ as this corresponds to the conditional log odds to be $1:1$ for every component and pairwaide interaction. This is not possible, since $\mat{0}$ is a stationary point of the log-likelihood. This is directly observed by taking a look at the partial gradients of the log-likelihood in \cref{thm:grad}. Instead, we use a crude heuristic which threads every mode seperately and ignores any relation to the covariates. It is computationaly cheap and better than any of the alternatives we considered. For every $k = 1, \ldots, r$, let the $k$'th mode second moment estimate be
\begin{equation}\label{eq:ising-mode-moments}
\hat{\mat{M}}_{2(k)} = \frac{p_k}{n p}\sum_{i = 1}^n (\ten{X}_i)_{(k)}\t{(\ten{X}_i)_{(k)}}
\end{equation}
which contains the $k$'th mode first moment estimate in its diagonal $\hat{\mat{M}}_{1(k)} = \diag\hat{\mat{M}}_{2(k)}$. Considering every column of the matricized observation $(\ten{X}_i)_{(k)}$ as a $p_k$ dimensional observation itself. The number of those artifically generated observations is $n \prod_{j\neq k}p_j$. Let $Z_k$ denote the random variable those artifical observations are realization of. Then, we can interprate the elements $(\hat{\mat{M}}_{1(k)})_{j}$ as the estimates of the probability $P((Z_k)_j = 1)$, that is the marginal probability of the $j$th element of $Z_k$ being $1$. Similar, for $l \neq j$ we have $(\hat{\mat{M}}_{2(k)})_{j l}$ estimating $P((Z_k)_j = 1, (Z_k)_l = 1)$, the marginal probability of two-way interactions. % Without any regard of accuracy ...
Now, we set the diagonal elements of $\mat{\Omega}_k$ to zero. For the off diagonal elements of $\mat{\Omega}_k$, we equate the conditional probabilities $P((Z_k)_j = 1 \mid (Z_k)_{-j} = \mat{0})$ and $P((Z_k)_j = 1, (Z_k)_l = 1\mid (Z_k)_{-j, -l} = \mat{0})$ with the marginal probability estimates $(\hat{\mat{M}}_{1(k)})_{j}$ and $(\hat{\mat{M}}_{2(k)})_{j l}$, respectively. Use \eqref{eq:ising-two-way-log-odds} then gives the initial estimates $\hat{\mat{\Omega}}_k^{(0)}$, with $j \neq l$ component wise
\begin{equation}\label{eq:ising-init-Omegas}
(\hat{\mat{\Omega}}_k^{(0)})_{j j} = 0,
\qquad
(\hat{\mat{\Omega}}_k^{(0)})_{j l} = \log\frac{1 - (\hat{\mat{M}}_{1(k)})_{j}(\hat{\mat{M}}_{1(k)})_{l}}{(\hat{\mat{M}}_{1(k)})_{j}(\hat{\mat{M}}_{1(k)})_{l}}\frac{(\hat{\mat{M}}_{2(k)})_{j l}}{1 - (\hat{\mat{M}}_{2(k)})_{j l}}.
\end{equation}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Gradient Optimization}
Given initial values, the gradients provided by \cref{thm:grad} can be evaluated for the Ising model. The first step therefore is to determin the values of the inverse link components $\ten{g}_1(\mat{\gamma}_y) = \E[\ten{X} \mid Y = y]$ and $\ten{G}_2(\mat{\gamma}_y) = \ten{g}_2(\mat{\gamma}_y) = \E[\ten{X}\circ\ten{X} \mid Y = y]$. An imediate simplification is that the first moment is a part of the second moment. Its values are determined via $\vec(\E[\ten{X} \mid Y = y]) = \diag(\E[\ten{X}\circ\ten{X} \mid Y = y]_{(1, \ldots, r)})$. This means only the second moment needs to be computed, or estimated (see: \cref{sec:ising-bigger-dim}) in the case of slightly bigger $p$. For the Ising model, the conditional second moment with parameters $\mat{\gamma}_y$ is given by the matricized relation
\begin{equation}\label{eq:ising-m2}
\ten{g}_2(\ten{\gamma}_y)_{(1, \ldots, r)} = \E[(\vec{\ten{X}})\t{(\vec{\ten{X}})}\mid Y = y] = p_0(\mat{\gamma}_y)\sum_{\mat{x}\in\{0, 1\}^{p}}\mat{x}\t{\mat{x}}\exp(\t{\vech(\mat{x}\t{\mat{x}})}\mat{\gamma}_y).
\end{equation}
The natural parameter $\mat{\gamma}_y$ is evaluated via \eqref{eq:ising-natural-params} enabeling us to compute the partial gradients of the log-likelihood $l_n$ \eqref{eq:log-likelihood} for the Ising model by \cref{thm:grad} for the GMLM parameters $\mat{\beta}_k$ and $\mat{\Omega}_k$, $k = 1, \ldots, r$, at the current iterate $\mat{\theta}^{(I)} = (\mat{\beta}_1^{(I)}, \ldots, \mat{\beta}_r^{(I)}, \mat{\Omega}_1^{(I)}, \ldots, \mat{\Omega}_r^{(I)})$. Using classic gradient ascent for maximizing the log-likelihood, we have to specify a learning rate $\lambda\in\mathbb{R}_{+}$, usualy something like $10^{-3}$. The update rule is
\begin{displaymath}
\mat{\theta}^{(I + 1)} = \mat{\theta}^{(I)} + \lambda\nabla_{\mat{\theta}} l_n(\mat{\theta})\bigr|_{\mat{\theta} = \mat{\theta}^{(I)}},
\end{displaymath}
which is iterated till convergence. In practice, iteration is performed until ether a maximum number of iterations is exhausted and/or some break condition is satisfied. A proper choise of the learning rate is needed as a too large learning rate $\lambda$ causes instabilities, while a too low learning rate requires an enourmes ammount of iterations. Generically, there are two approach against the need to determine a proper lerning rate. First, \emph{line search methods} determin an appropriate step size for every iteration. This works great if the evaluation of the object function (the log-likelihood) is cheap. This is not the case in our setting, see \cref{sec:ising-bigger-dim}. The second approach is an \emph{addaptive learning rate}. The basic idea is to track specific statistics while optimizing and dynamiclly addapt the leaning rate via well tested heuristics using the gathered knowledge from past iterations. We opted to use an addaptive leaning rate approach, this not only levaites the need to determin an approriate leaning rate, but also excelerates learning.
Our method of choise is RMSprop, which stands for \emph{root mean squared propagation} \textcite{rmsprop-Hinton2012}. This is a well known method in maschine learning for training neural networks. Its a variation of gradient descent with an per scalar parameter addaptive learning rate. It tracks a moving average of the element wise squared gradient $\mat{g}_2^{(I)}$, which is then used to scale (element wise) the gradient in the update rule. See \textcite{rmsprop-Hinton2012,deeplearningbook-GoodfellowEtAl2016} among others. The update rule using RMSprop for maximization\footnote{Instead of the more common minimization, therefore $+$ in the update of $\mat{\theta}$.} is
\begin{align*}
\mat{g}_2^{(I + 1)} &= \nu \mat{g}_2^{(I)} + (1 - \nu)\nabla l_n(\mat{\theta}^{(I)})\odot\nabla l_n(\mat{\theta}^{(I)}), \\
\mat{\theta}^{(I + 1)} &= \mat{\theta}^{(I)} + \frac{\lambda}{\sqrt{\mat{g}_2^{(I + 1)}} + \epsilon}\odot\nabla l_n(\mat{\theta}^{(I)}).
\end{align*}
The parameters $\nu = 0.9$, $\lambda = 10^{-3}$ and $\epsilon\approx 1.49\cdot 10^{-8}$ are fixed. The initial value of $\mat{g}_2^{(0)} = \mat{0}$, the symbol $\odot$ denotes the Hadamard product, that is the element wise multiplication. The divition and sqaure root operation are performed element wise as well. According to our experiments, RMSprop requires in the range of $50$ till $1000$ iterations till convergence while gradient ascent with a learning rate of $10^{-3}$ is in the range of $1000$ till $10000$. \todo{check this!}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Small Data Sets}\label{sec:ising-small-data-sets}
In case of a finite number of observations, specifically in data sets with a small number of observations $n$, the situation where one components is always ether zero or one can occure. Its also possible to observe two exclusive components. This situation of a ``degenerate'' data set needs to be saveguarded against in practive. Working with parameters on a log-scale, this gives estimates of $\pm\infty$. This is outside of the parameter space and breaks our optimization algorithm.
The first situation where this needs to be addressed is in \eqref{eq:ising-init-Omegas}, where we set initial estimates for $\mat{\Omega}_k$. To avoid divition by zero as well as evaluating the log of zero, we addapt \eqref{eq:ising-mode-moments}, the mode wise moment estimates $\hat{\mat{M}}_{2(k)}$. A simple method is to replace the ``degenerate'' components, that are entries with value $0$ or $1$, with the smallest positive estimate of exactly one occurence $p_k / n p$, or all but one occurence $1 - p_k / n p$, respectively.
The same problem arives in gradient optimization. Therefore, before starting the optimization, we detect degenerate combinations. We compute upper and lower bounds for the ``degenerate'' element in the Kronecker product $\hat{\mat{\Omega}} = \bigkron_{k = r}^{1}\hat{\mat{\Omega}}_k$. After every gradient update, we check if any of the ``degenerate'' elements fall outside of the bounds. In that case, we adjust all the elements of the Kronecker component estimates $\hat{\mat{\Omega}}_k$, corresponding to the ``degenerate'' element of their Kronecker product, to fall inside the precomputed bounds. While doing so, we try to alter every component as little as possible to ensure that the non-degenerate elements in $\hat{\mat{\Omega}}$, effected by this change due to its Kronecker structure, are altered as little as possible. The exact details are technically cumbersome while providing little insight. \todo{For more details we refer the reader to the source code prodived with the supplemental material.}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Slightly Bigger Dimensions}\label{sec:ising-bigger-dim}
A big challange with the Ising model is its high computational complexity. The reason is in the sum over all binary vectors of length $p = \prod_{k = 1}^{r}p_k$ in the partition function \eqref{eq:ising-partition-function}. Computing the partition function exactly requires to sum all $2^p$ binary vectors. Small dimensions, like $p\approx 10$, this is easily computed. Increasing the dimension bejond $20$ gets extremely expensive while for dimensions bigger than $30$ its absolutely impossible. Trying to avoid the evaluation of the log-likelihood and only computing its partial gradients via \cref{thm:grad} does not resolve the issue. The gradients require the inverse link, in other words the second moment \eqref{eq:ising-m2}, where, if dropping the scaling factor $p_0$, still involves to sum $2^p$ summands. Basically, with our model, this means that the optimization of the Ising model using exactly computed gradients is impossible for moderately sized problems.
For estimation of dimensions $p$ bigger than $20$, we use a Monte-Carlo method to estimate the second moment \eqref{eq:ising-m2}, required to compute the partial gradients of the log-likelihood. Specifically, we use a Gibbs-Sampler to sample from the conditional distribution and approximate the second moment in an importance sampling framework. This can be implemented quite efficiently while the estimation accuracy for the second moment is evaluated experimentaly which seems to be very reliable. simultaniously, we use the same approach to estimate the partition funciton. This though, is in comparison inaccurate, and may only be used to get a rough idea of the log-likelihood. Regardles, for our method, we only need the gradient for optimization where appropriate break conditions, not based on the likelihood, lead to a working method for MLE estimation.
\begin{figure}
\centering
\includegraphics[]{plots/sim-ising-perft-m2.pdf}
\caption{\label{fig:ising-m2-perft}Performance test for computing/estimating the second moment of the Ising model of dimension $p$ using ether the exact method or a Monte-Carlo simulation.}
\end{figure}
\newpage
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Statistical Properties}
\subsection{Kronecker Product Manifolds}
\subsection{Kronecker Product Manifolds}\label{sec:kron-manifolds}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\cref{thm:sdr} identifies the sufficient reduction for the regression of $Y$ on $\ten{X}$ in the population. Any estimation of the sufficient reduction requires application of some optimality criterion. As we operate within the framework of the exponential family, we opted for maximum likelihood estimation (MLE). For the unconstrained problem, where the parameters are simply $\mat{B}$ and $\mat{\Omega}$ in \eqref{eq:eta1-manifold}, maximizing the likelihood of $\ten{X} \mid Y$ is straightforward and yields well-defined MLEs of both parameters. Our setting, though, requires the constrained optimization of the $\ten{X} \mid Y$ likelihood subject to $\mat{B} = \bigotimes_{j = r}^{1}\mat{\beta}_j$ and $\mat{\Omega}=\bigkron_{j = r}^{1}\mat{\Omega}_j$. \Cref{thm:kron-manifolds,thm:param-manifold} provide the setting for which the MLE of the constrained parameter $\mat{\theta}$ is well-defined, which in turn leads to the derivation of its asymptotic normality.
@ -716,18 +804,49 @@ As a basis to ensure that the constraint parameter space $\Theta$ is a manifold,
\end{theorem}
\subsection{Matrix Manifolds}\label{sec:matrix-manifolds}
A powerful side effect of \cref{thm:param-manifold} is the modeling flexibinity it provides. For example, we can perform low rank regression. Or, we may constrain two-way interactions between direct axis neighbors by using band matrices for the $\mat{\Omega}_k$'s, among others.
{\color{red}
Flexibility of Manifold Structure
This flexibility derives from many different matrix manifolds that can be used as building blocks $\manifold{B}_k$ and $\manifold{O}_k$ of the parameter space $\Theta$ in \cref{thm:param-manifold}. A list of possible choices, among others, is given in \cref{tab:matrix-manifolds}. As long as parameters in $\Theta$ are valid paramererization of a density (or PMF) of \eqref{eq:quadratic-exp-fam} subject to \eqref{eq:eta1-manifold} and \eqref{eq:eta2-manifold}, one may choose any of the manifolds listed in \cref{tab:matrix-manifolds} which are ether cones or spherical. We also included an example which is nether a sphere nor a cone. They may also be valid building blocks, but require more work as they are not directly leading to a parameter manifold by \cref{thm:param-manifold}. In case one can show the resulting parameter space $\Theta$ is an embedded manifold, the asymtotic theory of \cref{sec:asymtotics} is applicable.
\begin{enumerate}
\item $\mat{\Omega}$ is a Kronecker product: Kronecker products of pos. def. matrices are elements of a manifold
\item $\mat{B}$ is a Kronecker product: any combination of $\mat{\beta}_j$ full rank, rank $r$, or semi-orthogonal matrices would be an element of a manifold
\end{enumerate}
}
\begin{table}
\centering
\begin{tabular}{l | l | c c c}
Symbol & Description & C & S & Dimension\\ \hline
$\mathbb{R}^{p\times q}$ & All matrices of dimension $p\times q$ &
\checkmark & \xmark & $p q$ \\ \hline
& Full rank $p\times q$ matrices &
\checkmark & \xmark & $p q$ \\ \hline
$\mathrm{St}(p, q)$ & \emph{Stiefel Manifold}, $\{ \mat{U}\in\mathbb{R}^{p\times q} : \t{\mat{U}}\mat{U} = \mat{I}_q \}$ for $q\leq p$ &
\xmark & \checkmark & $p q - q (q + 1) / 2$ \\ \hline
$\mathcal{S}^{p - 1}$ & Unit sphere in $\mathbb{R}^p$, special case $\Stiefel{p}{1}$ &
\xmark & \checkmark & $p - 1$ \\ \hline
$U(p)$ & Unitary Group, special case $\Stiefel{p}{p}$ &
\xmark & \checkmark & $p (p - 1) / 2$ \\ \hline
$SU(p)$ & Special Unitary Group $\{ \mat{U}\in U(p) : \det{\mat{U}} = 1 \}$ &
\xmark & \checkmark & $p (p - 1) / 2$ \\ \hline
& Matrices of known rank $r > 0$, generalizes $\StiefelNonCompact{p}{q}$ &
\checkmark & \xmark & $r(p + q - r)$ \\ \hline
& Symmetric matrice &
\checkmark & \xmark & $\frac{1}{2}p(p+1)$ \\ \hline
$SPD(p)$ & Symmetric Positive Definite matrices &
\checkmark & \xmark & $\frac{1}{2}p(p+1)$ \\ \hline
& Scaled Identity $\{ a\mat{I}_p : a\in\mathbb{R}_{+} \}$ &
\checkmark & \xmark & $1$ \\ \hline
& Symmetric $r$-band matrices (includes diagonal) &
\checkmark & \xmark & $(2 p - r) (r + 1) / 2$ \\
& $\qquad\{ \mat{A}\in\mathbb{R}^{p\times p} : \mat{A} = \t{\mat{A}}\land \mat{A}_{i j} = 0\ \forall |i - j| > r \}$ \\ \hline
& Auto correlation alike $\{ \mat{A}\in\mathbb{R}^{p\times p} : \mat{A}_{i j} = \rho^{|i - j|}, \rho\in(0, 1) \}$ &
\xmark & \xmark & $1$ \\ \hline
\end{tabular}
\caption{\label{tab:matrix-manifolds}Examples of embedded matrix manifolds. ``Symbol'' a (more or less) common notation for the matrix manifold, if at all. ``C'' stands for \emph{cone}, meaning it is scale invariant. ``S'' means \emph{spherical}, that is, constant Frobenius norm.}
\end{table}
\begin{remark}
The \emph{Grassmann Manifold} of $q$ dimensional subspaces in $\mathbb{R}^p$ is not listed in \cref{tab:matrix-manifolds} since it is not embedded in $\mathbb{R}^{p \times q}$.
\end{remark}
\subsection{Asymptotics}
\subsection{Asymptotics}\label{sec:asymtotics}
Let $Z$ be a random variable distributed according to a parameterized probability distribution with density $f_{\mat{\theta_0}}\in\{ f_{\mat{\theta}} : \mat{\theta}\in\Theta \}$ where $\Theta$ is a subset of Euclidean space. We want to estimate the parameter ${\mat{\theta}}_0$ using $n$ i.i.d. (independent and identically distributed) copies of $Z$. We assume a known, real-valued and measurable function $z\mapsto m_{\mat{\theta}}(z)$ for every $\mat{\theta}\in\Theta$ while ${\mat{\theta}}_0$ maximizes the map $\mat{\theta}\mapsto M(\mat{\theta}) = \E m_{\mat{\theta}}(Z)$ uniquely. For the estimation we maximize the empirical version
\begin{align}\label{eq:Mn}
@ -828,6 +947,12 @@ We compair our method with a few other methods for the tensor normal and the Isi
\end{figure}
\begin{figure}
\centering
\includegraphics[width = \textwidth]{plots/sim-tsir.pdf}
\caption{\label{fig:sim-tsir}Simulation to investiage the unexpected failure of TSIR in simulation 1c.}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Ising Model}
@ -1020,7 +1145,7 @@ as well as for any tensor $\ten{A}$ of even order $2 r$ and matching square matr
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Proofs}
\section{Proofs}\label{app:B}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{proof}[Proof of \cref{thm:sdr}]\label{proof:sdr}
A direct implication of \textcite[Theorem~1]{sdr-BuraDuarteForzani2016} is that, under the exponential family \eqref{eq:quadratic-exp-fam} with natural statistic \eqref{eq:t-stat},

View File

@ -54,7 +54,8 @@
\begin{tikzpicture}[>=latex]
\begin{axis}[
name=sim-2a
name=sim-2a,
xticklabel=\empty
]
\addplot[color = gmlm, line width=1pt] table[x = sample.size, y = dist.subspace.gmlm] {aggr-1a-normal.csv};
\addplot[color = pca] table[x = sample.size, y = dist.subspace.pca] {aggr-2a-ising.csv};
@ -67,6 +68,56 @@
\node[anchor = base west, yshift = 0.3em] at (sim-2a.north west) {
a: small
};
\begin{axis}[
name=sim-2b,
anchor=north west, at={(sim-2a.right of north east)}, xshift=0.1cm,
xticklabel=\empty,
ylabel near ticks, yticklabel pos=right
]
\addplot[color = gmlm, line width=1pt] table[x = sample.size, y = dist.subspace.gmlm] {aggr-1a-normal.csv};
\addplot[color = pca] table[x = sample.size, y = dist.subspace.pca] {aggr-2b-ising.csv};
\addplot[color = hopca] table[x = sample.size, y = dist.subspace.hopca] {aggr-2b-ising.csv};
\addplot[color = tsir] table[x = sample.size, y = dist.subspace.tsir] {aggr-2b-ising.csv};
\addplot[color = mgcca] table[x = sample.size, y = dist.subspace.mgcca] {aggr-2b-ising.csv};
\addplot[color = lpca] table[x = sample.size, y = dist.subspace.lpca] {aggr-2b-ising.csv};
\addplot[color = clpca] table[x = sample.size, y = dist.subspace.clpca] {aggr-2b-ising.csv};
\end{axis}
\node[anchor = base west, yshift = 0.3em] at (sim-2b.north west) {
b:
};
\begin{axis}[
name=sim-2c,
anchor=north west, at={(sim-2a.below south west)}, yshift=-.8em,
xticklabel=\empty
]
\addplot[color = gmlm, line width=1pt] table[x = sample.size, y = dist.subspace.gmlm] {aggr-1a-normal.csv};
\addplot[color = pca] table[x = sample.size, y = dist.subspace.pca] {aggr-2c-ising.csv};
\addplot[color = hopca] table[x = sample.size, y = dist.subspace.hopca] {aggr-2c-ising.csv};
\addplot[color = tsir] table[x = sample.size, y = dist.subspace.tsir] {aggr-2c-ising.csv};
\addplot[color = mgcca] table[x = sample.size, y = dist.subspace.mgcca] {aggr-2c-ising.csv};
\addplot[color = lpca] table[x = sample.size, y = dist.subspace.lpca] {aggr-2c-ising.csv};
\addplot[color = clpca] table[x = sample.size, y = dist.subspace.clpca] {aggr-2c-ising.csv};
\end{axis}
\node[anchor = base west, yshift = 0.3em] at (sim-2c.north west) {
c:
};
\begin{axis}[
name=sim-2d,
anchor=north west, at={(sim-2c.right of north east)}, xshift=0.1cm,
ylabel near ticks, yticklabel pos=right
]
\addplot[color = gmlm, line width=1pt] table[x = sample.size, y = dist.subspace.gmlm] {aggr-1a-normal.csv};
\addplot[color = pca] table[x = sample.size, y = dist.subspace.pca] {aggr-2d-ising.csv};
\addplot[color = hopca] table[x = sample.size, y = dist.subspace.hopca] {aggr-2d-ising.csv};
\addplot[color = tsir] table[x = sample.size, y = dist.subspace.tsir] {aggr-2d-ising.csv};
\addplot[color = mgcca] table[x = sample.size, y = dist.subspace.mgcca] {aggr-2d-ising.csv};
\addplot[color = lpca] table[x = sample.size, y = dist.subspace.lpca] {aggr-2d-ising.csv};
\addplot[color = clpca] table[x = sample.size, y = dist.subspace.clpca] {aggr-2d-ising.csv};
\end{axis}
\node[anchor = base west, yshift = 0.3em] at (sim-2d.north west) {
d:
};
% \begin{axis}[
% name=sim-1b,
@ -145,7 +196,7 @@
\node[anchor = south, rotate = 90] at (current bounding box.west) {Subspace Distance $d(\boldsymbol{B}, \hat{\boldsymbol{B}})$};
\node[anchor = south, rotate = 270] at (current bounding box.east) {\phantom{Subspace Distance $d(\boldsymbol{B}, \hat{\boldsymbol{B}})$}};
\node[anchor = south, font=\large] at (current bounding box.north) {Tensor Normal Simulation};
\node[anchor = south, font=\large] at (current bounding box.north) {Ising Simulation};
\end{tikzpicture}
\end{document}

View File

@ -29,6 +29,7 @@
\definecolor{clpca}{RGB}{213,94,0}
\pgfplotsset{
compat=newest,
every axis/.style={
xtick={100,200,300,500,750},
ymin=-0.05, ymax=1.05,
@ -77,7 +78,7 @@
\addplot[color = hopca] table[x = sample.size, y = dist.subspace.hopca] {aggr-1b-normal.csv};
\addplot[color = tsir] table[x = sample.size, y = dist.subspace.tsir] {aggr-1b-normal.csv};
\addplot[color = mgcca] table[x = sample.size, y = dist.subspace.mgcca] {aggr-1b-normal.csv};
\addplot[color = gmlm, line width=1pt] table[x = sample.size, y = dist.subspace.gmlm] {aggr-1b-normal.csv};
\addplot[color = gmlm, line width=1pt] table[x = sample.size, y = dist.subspace.gmlm] {aggr-1b-normal.csv};
\end{axis}
\node[anchor = base west, yshift = 0.3em] at (sim-1b.north west) {
b: cubic dependence on $y$
@ -92,7 +93,7 @@
\addplot[color = hopca] table[x = sample.size, y = dist.subspace.hopca] {aggr-1c-normal.csv};
\addplot[color = tsir] table[x = sample.size, y = dist.subspace.tsir] {aggr-1c-normal.csv};
\addplot[color = mgcca] table[x = sample.size, y = dist.subspace.mgcca] {aggr-1c-normal.csv};
\addplot[color = gmlm, line width=1pt] table[x = sample.size, y = dist.subspace.gmlm] {aggr-1c-normal.csv};
\addplot[color = gmlm, line width=1pt] table[x = sample.size, y = dist.subspace.gmlm] {aggr-1c-normal.csv};
\end{axis}
\node[anchor = base west, yshift = 0.3em] at (sim-1c.north west) {
c: rank $1$ $\boldsymbol{\beta}$'s

260
sim/sim-tsir.R Normal file
View File

@ -0,0 +1,260 @@
library(tensorPredictors)
# Data set sample size in every simulation
sample.size <- 500L
# Nr. of per simulation replications
reps <- 100L
# number of observation/response axes (order of the tensors)
orders <- c(2L, 3L, 4L)
# auto correlation coefficient for the mode-wise auto scatter matrices `Omegas`
rhos <- seq(0, 0.8, by = 0.1)
setwd("~/Work/tensorPredictors/sim/")
base.name <- format(Sys.time(), "failure_of_tsir-%Y%m%dT%H%M")
# data sampling routine
sample.data <- function(sample.size, betas, Omegas) {
dimF <- mapply(ncol, betas)
# responce is a standard normal variable
y <- rnorm(sample.size)
y.pow <- Reduce(function(a, b) outer(a, b, `+`), Map(seq, 0L, len = dimF))
F <- t(outer(y, as.vector(y.pow), `^`))
dim(F) <- c(dimF, sample.size)
# sample predictors from tensor normal X | Y = y (last axis is sample axis)
sample.axis <- length(betas) + 1L
Sigmas <- Map(solve, Omegas)
mu_y <- mlm(F, Map(`%*%`, Sigmas, betas))
X <- mu_y + rtensornorm(sample.size, 0, Sigmas, sample.axis)
list(X = X, F = F, y = y, sample.axis = sample.axis)
}
# Create a CSV logger to write simulation results to
log.file <- paste(base.name, "csv", sep = ".")
logger <- CSV.logger(
file.name = log.file,
header = c("rho", "order", "sample.size", "rep", "beta.version", outer(
"dist.subspace", c("gmlm", "tsir", "sir"),
paste, sep = "."
))
)
for (order in orders) {
# setup problem dimensions given the tensor order
dimX <- rep(2L, order)
dimF <- rep(2L, order)
# as well as the projections onto rank 1 matrices
proj.betas <- Map(.projRank, rep(1L, order))
for (rho in rhos) {
# Scatter matrices (Omegas) given as autoregression structure `AR(rho)`
Omegas <- Map(function(p) rho^abs(outer(1:p, 1:p, `-`)), dimX)
# Version 1 betas (reductions)
beta.version <- 1L
betas <- Map(function(nr, nc) {
`[<-`(matrix(0, nr, nc), 1, , 1)
}, dimX, dimF)
B.true <- Reduce(kronecker, rev(betas))
# `(B.min %*% vec(X)) | Y = y` proportional to `(1 + y)^order`)
B.min.true <- B.true[, 1L, drop = FALSE]
# Version 1: repeated simulations
for (rep in seq_len(reps)) {
# Sample training data
c(X, F, y, sample.axis) %<-% sample.data(sample.size, betas, Omegas)
# Fit models to provided data
fit.gmlm <- gmlm_tensor_normal(X, F, sample.axis = sample.axis, proj.betas = proj.betas)
fit.tsir <- TSIR(X, y, d = rep(1L, order), sample.axis = sample.axis)
fit.sir <- SIR(mat(X, sample.axis), y, d = 1L)
# Extract minimal reduction matrices from fitted models
B.gmlm <- qr.Q(qr(Reduce(kronecker, rev(fit.gmlm$betas))))[, 1L, drop = FALSE]
B.tsir <- Reduce(kronecker, rev(fit.tsir))
B.sir <- fit.sir
# Compute estimation to true minimal `B` distance
dist.subspace.gmlm <- dist.subspace(B.min.true, B.gmlm, normalize = TRUE)
dist.subspace.tsir <- dist.subspace(B.min.true, B.tsir, normalize = TRUE)
dist.subspace.sir <- dist.subspace(B.min.true, B.sir, normalize = TRUE)
# Write to simulation log file (CSV file)
logger()
# and print progress
cat(sprintf("order %d, rho %.2f, version %d: rep: %d/%d\n",
order, rho, beta.version, rep, reps))
}
# Version 2 betas (reductions)
beta.version <- 2L
betas <- Map(function(nr, nc) {
tcrossprod((-1)^seq_len(nr), (-1)^seq_len(nc))
}, dimX, dimF)
B.true <- Reduce(kronecker, rev(betas))
# `(B.min %*% vec(X)) | Y = y` proportional to `(1 - y)^order`)
B.min.true <- B.true[, 1L, drop = FALSE]
# Version 2: repeated simulations (identical to Version 1)
for (rep in seq_len(reps)) {
# Sample training data
c(X, F, y, sample.axis) %<-% sample.data(sample.size, betas, Omegas)
# Fit models to provided data
fit.gmlm <- gmlm_tensor_normal(X, F, sample.axis = sample.axis, proj.betas = proj.betas)
fit.tsir <- TSIR(X, y, d = rep(1L, order), sample.axis = sample.axis)
fit.sir <- SIR(mat(X, sample.axis), y, d = 1L)
# Extract minimal reduction matrices from fitted models
B.gmlm <- qr.Q(qr(Reduce(kronecker, rev(fit.gmlm$betas))))[, 1L, drop = FALSE]
B.tsir <- Reduce(kronecker, rev(fit.tsir))
B.sir <- fit.sir
# Compute estimation to true minimal `B` distance
dist.subspace.gmlm <- dist.subspace(B.min.true, B.gmlm, normalize = TRUE)
dist.subspace.tsir <- dist.subspace(B.min.true, B.tsir, normalize = TRUE)
dist.subspace.sir <- dist.subspace(B.min.true, B.sir, normalize = TRUE)
# Write to simulation log file (CSV file)
logger()
# and print progress
cat(sprintf("order %d, rho %.2f, version %d: rep: %d/%d\n",
order, rho, beta.version, rep, reps))
}
}
}
### read simulation results and generate plots
if (!interactive()) { pdf(file = paste(base.name, "pdf", sep = ".")) }
# Read simulation results back from log file
sim <- read.csv(log.file)
# Source utility function used in most simulations (extracted for convenience)
source("./sim_utils.R")
# aggregate results
grouping <- sim[c("rho", "order", "beta.version")]
sim.dist <- sim[startsWith(names(sim), "dist")]
aggr <- aggregate(sim.dist, grouping, mean)
# reset (possible lost) config
orders <- sort(unique(aggr$order))
rhos <- sort(unique(aggr$rho))
# new grouping for the aggregates
layout(rbind(
matrix(seq_len(2 * length(orders)), ncol = 2),
2 * length(orders) + 1
), heights = c(rep(6L, length(orders)), 1L))
for (group in split(aggr, aggr[c("order", "beta.version")])) {
order <- group$order[[1]]
beta.version <- group$beta.version[[1]]
rho <- group$rho
plot(range(rho), 0:1, main = sprintf("V%d, order %d", beta.version, order),
type = "n", bty = "n", axes = FALSE, xlab = expression(rho), ylab = "Subspace Distance")
axis(1, at = rho)
axis(2, at = seq(0, 1, by = 0.2))
with(group, {
lines(rho, dist.subspace.gmlm, col = col.methods["gmlm"], lwd = 3, type = "b", pch = 16)
lines(rho, dist.subspace.tsir, col = col.methods["tsir"], lwd = 2, type = "b", pch = 16)
lines(rho, dist.subspace.sir, col = col.methods["sir"], lwd = 2, type = "b", pch = 16)
})
if (order == 3L && beta.version == 2L) {
abline(v = 0.5, lty = "dotted", col = "black")
abline(h = group$dist.subspace.tsir[which(rho == 0.5)],
lty = "dotted", col = "black")
}
}
methods <- c("GMLM", "TSIR", "SIR")
restor.par <- par(
fig = c(0, 1, 0, 1),
oma = c(0, 0, 0, 0),
mar = c(1, 0, 0, 0),
new = TRUE
)
plot(0, type = "n", bty = "n", axes = FALSE, xlab = "", ylab = "")
legend("bottom", col = col.methods[tolower(methods)], legend = methods,
horiz = TRUE, lty = 1, bty = "n", lwd = c(3, 2, 2), pch = 16)
par(restor.par)
# new grouping for the aggregates
layout(rbind(
matrix(seq_len(2 * 3), ncol = 2),
2 * 3 + 1
), heights = c(rep(6L, 3), 1L))
for (group in split(aggr, aggr[c("rho", "beta.version")])) {
rho <- group$rho[[1]]
beta.version <- group$beta.version[[1]]
if (!(rho %in% c(0, .5, .8))) { next }
order <- group$order
plot(range(order), 0:1, main = sprintf("V%d, rho %.1f", beta.version, rho),
type = "n", bty = "n", axes = FALSE, xlab = expression(order), ylab = "Subspace Distance")
axis(1, at = order)
axis(2, at = seq(0, 1, by = 0.2))
with(group, {
lines(order, dist.subspace.gmlm, col = col.methods["gmlm"], lwd = 3, type = "b", pch = 16)
lines(order, dist.subspace.tsir, col = col.methods["tsir"], lwd = 2, type = "b", pch = 16)
lines(order, dist.subspace.sir, col = col.methods["sir"], lwd = 2, type = "b", pch = 16)
})
if (rho == 0.5 && beta.version == 2L) {
abline(v = 0.5, lty = "dotted", col = "black")
abline(h = group$dist.subspace.tsir[which(order == 3L)],
lty = "dotted", col = "black")
}
}
methods <- c("GMLM", "TSIR", "SIR")
restor.par <- par(
fig = c(0, 1, 0, 1),
oma = c(0, 0, 0, 0),
mar = c(1, 0, 0, 0),
new = TRUE
)
plot(0, type = "n", bty = "n", axes = FALSE, xlab = "", ylab = "")
legend("bottom", col = col.methods[tolower(methods)], legend = methods,
horiz = TRUE, lty = 1, bty = "n", lwd = c(3, 2, 2), pch = 16)
par(restor.par)
### Version 1
# 2'nd order
# 1 + 2 y + y^2
# (1 + y)^2
# 3'rd order
# 1 + 3 y + 3 y^2 + y^4
# (1 + y)^3
# 4'th order
# 1 + 4 y + 6 y^2 + 4 y^3 + y^4
# (1 + y)^4
### Version 2
# 2'nd order
# 1 - 2 y + y^2
# (1 - y)^2 = 1 - 2 y + y^2
# 3'rd order
# 1 - 3 y + 3 y^2 - y^3
# -(y - 1)^3
# 4'th order
# 1 - 4 y + 6 y^2 - 4 y^3 + y^4
# (y - 1)^4

View File

@ -25,15 +25,22 @@ dimX <- c(2, 3, 5) # predictor `X` dimension
dimF <- rep(2, length(dimX)) # "function" `F(y)` of responce `y` dimension
# setup true model parameters (rank 1 betas)
betas <- Map(function(nr, nc) {
tcrossprod((-1)^seq_len(nr), (-1)^seq_len(nc))
}, dimX, dimF)
betas <- list(
`[<-`(matrix(0, dimX[1], dimF[1]), 1, , c(1, 1)),
`[<-`(matrix(0, dimX[2], dimF[2]), 2, , c(1, 0)),
`[<-`(matrix(0, dimX[3], dimF[3]), 3, , c(1, 2))
)
# invisible(Map(print.table, betas, zero.print = "."))
Omegas <- Map(function(pj) 0.5^abs(outer(1:pj, 1:pj, `-`)), dimX) # AR(0.5)
eta1 <- 0
# True (full) reduction matrix to compair against
B.true <- as.matrix(as.numeric(15 == (1:30)))
# define projections onto rank 1 matrices for betas
proj.betas <- Map(.projRank, rep(1L, length(betas)))
# data sampling routine
sample.data <- function(sample.size, eta1, betas, Omegas) {
# responce is a standard normal variable
@ -63,8 +70,45 @@ logger <- CSV.logger(
))
)
# compute true (full) model parameters to compair estimates against
B.true <- Reduce(`%x%`, rev(betas))[, 1L, drop = FALSE]
# # true reduction (1D, select first component)
# B.true <- as.matrix(as.numeric(1 == (1:30)))
# B.true <- Reduce(`%x%`, rev(betas)) #[, 1L, drop = FALSE]
# print.table(B.true, zero.print = ".")
# print.table(Reduce(kronecker, rev(betas)), zero.print = ".")
# mu1 <- function(y) 1 + 3 * y + y^2
# matX <- mat(X, 1:3)
# x1 <- matX[1, ]
# x2 <- matX[2, ]
# plot(mu1(y), x1, col = cut(y, 10L))
# plot(y, x1, col = cut(y, 10L))
# plot(mu1(y), x2, col = cut(y, 10L))
# plot(y, matX[1, ], col = cut(y, 10L))
# plot(y, matX[10, ], col = cut(y, 10L))
# c(X, F, y, sample.axis) %<-% sample.data(sample.size, eta1, betas, Omegas)
# colnames(B.true) <- c("1", "y", "y", "y^2", "y", "y^2", "y^2", "y^3")
# mu_y <- function(y) {
# B.min <- cbind(
# B.true[, 1],
# rowSums(B.true[, c(2:3, 5)]),
# rowSums(B.true[, c(4, 6:7)]),
# B.true[, 8]
# )
# B.min %*% rbind(1, y, y^2, y^3)
# }
# plot((mat(X, 4) %*% B.min)[, 2], y)
### for each sample size
for (sample.size in sample.sizes) {

View File

@ -6,6 +6,7 @@ export("%x_2%")
export("%x_3%")
export("%x_4%")
export(.projBand)
export(.projMaskedMean)
export(.projPSD)
export(.projRank)
export(.projSymBand)
@ -15,6 +16,7 @@ export(Dup)
export(Dup.pinv)
export(GMLM)
export(GMLM.default)
export(HOCCA)
export(HOPCA)
export(HOPIR)
export(HOSVD)
@ -31,6 +33,7 @@ export(POI)
export(RMReg)
export(RMap)
export(S)
export(SIR)
export(TSIR)
export(approx.kron)
export(approx.kronecker)

View File

@ -3,8 +3,8 @@
#' @todo TODO: Add beta and Omega projections
#'
#' @export
gmlm_ising <- function(X, F, sample.axis = length(dim(X)),
# proj.betas = ..., proj.Omegas = ..., # TODO: this
gmlm_ising <- function(X, F, y = NULL, sample.axis = length(dim(X)),
proj.betas = NULL, proj.Omegas = NULL,
max.iter = 1000L,
eps = sqrt(.Machine$double.eps),
step.size = 1e-3,
@ -12,15 +12,23 @@ gmlm_ising <- function(X, F, sample.axis = length(dim(X)),
patience = 3L,
nr.slices = 20L, # only for univariate `F(y) = y`
slice.method = c("cut", "ecdf", "none"), # only for univariate `F(y) = y` and `y` is a factor or integer
use_MC = 20L <= prod(dim(X)[-sample.axis]),
nr_threads = 8L, # ignored if `use_MC` is `FALSE`
logger = function(...) { }
) {
# Get problem dimensions
dimX <- dim(X)[-sample.axis]
# threat scalar `F` as a tensor
if (is.null(dim(F))) {
if (is.function(F)) {
# compute `F(y)`, replace function `F` with its tensor result
F <- F(y)
dimF <- dim(F)[-sample.axis]
} else if (is.null(dim(F))) {
# threat scalar `F` as a tensor
dimF <- rep(1L, length(dimX))
dim(F) <- ifelse(seq_along(dim(X)) == sample.axis, sample.size, 1L)
} else {
# `F` already provided as tensor
dimF <- dim(F)[-sample.axis]
}
sample.size <- dim(X)[sample.axis]
@ -34,39 +42,71 @@ gmlm_ising <- function(X, F, sample.axis = length(dim(X)),
}
modes <- seq_along(dimX)
# Special case for univariate response `vec F(y) = y`
# Due to high computational costs we use slicing
slice.method <- match.arg(slice.method)
slices.ind <- if ((slice.method != "none") && (length(F) == prod(dim(F)))) {
y <- as.vector(F)
if (!(is.factor(y) || is.integer(y))) {
slice.method <- match.arg(slice.method)
if (slice.method == "ecdf") {
y <- cut(ecdf(y)(y), nr.slices)
} else {
y <- cut(y, nr.slices)
}
}
split(seq_len(sample.size), y, drop = TRUE)
} else {
seq_len(sample.size)
# Ensure the Omega and beta projections lists are lists
if (!is.list(proj.Omegas)) {
proj.Omegas <- rep(NULL, length(modes))
}
if (!is.list(proj.betas)) {
proj.betas <- rep(NULL, length(modes))
}
# Special case for univariate response `y` or univariate `F = F(y)`
# Due to high computational costs we use slicing
slice.method <- match.arg(slice.method)
if (slice.method == "none") {
# slicing "turned off"
slices.ind <- seq_len(sample.size)
} else {
# get slicing variable, ether by providing `y` of if `F` is univariate
y <- if (length(y) == sample.size) {
as.vector(y)
} else if (length(F) == sample.size) {
as.vector(F)
} else {
NULL
}
if (is.null(y)) {
# couldn't find univariate variable to slice
slices.ind <- seq_len(sample.size)
} else {
# compute slice indices depending on type
if (!(is.factor(y) || is.integer(y))) {
if (slice.method == "ecdf") {
y <- cut(ecdf(y)(y), nr.slices)
} else {
y <- cut(y, nr.slices)
}
}
slices.ind <- split(seq_len(sample.size), y, drop = TRUE)
}
}
# initialize betas with tensor normal estimate (ignoring data being binary)
fit_normal <- gmlm_tensor_normal(X, F, sample.axis = length(dim(X)))
# (do NOT use the Omega projections, the tensor normal `Omegas` do not match
# the interpretation of the Ising model `Omegas`)
fit_normal <- gmlm_tensor_normal(X, F, sample.axis = length(dim(X)),
proj.betas = proj.betas)
betas <- fit_normal$betas
Omegas <- Omegas.init <- Map(function(mode) {
n <- prod(dim(X)[-mode])
prob2 <- mcrossprod(X, mode = mode) / n
prob2[prob2 == 0] <- 1 / n
prob2[prob2 == 1] <- (n - 1) / n
prob1 <- diag(prob2)
`prob1^2` <- outer(prob1, prob1)
`diag<-`(log(((1 - `prob1^2`) / `prob1^2`) * prob2 / (1 - prob2)), 0)
}, modes)
# Project `Omegas` onto their respective manifolds (`betas` already handled)
for (j in modes) {
if (is.function(proj_j <- proj.Omegas[[j]])) {
Omegas[[j]] <- proj_j(Omegas[[j]])
}
}
# Determin degenerate combinations, that are variables which are exclusive
# in the data set
matX <- mat(X, sample.axis)
@ -137,11 +177,11 @@ gmlm_ising <- function(X, F, sample.axis = length(dim(X)),
# slice size (nr. of objects in the slice)
n_i <- length(i)
sumF_i <- rowSums(eval(`F[..., i]`), dims = length(dimF))
sumF_i <- `dim<-`(rowSums(eval(`F[..., i]`), dims = length(dimF)), dimF)
diag_params_i <- mlm(sumF_i / n_i, betas)
params_i <- Omega + diag(as.vector(diag_params_i))
m2_i <- ising_m2(params_i)
m2_i <- ising_m2(params_i, use_MC = use_MC, nr_threads = nr_threads)
# accumulate loss
matX_i <- mat(eval(`X[..., i]`), modes)
@ -196,6 +236,16 @@ gmlm_ising <- function(X, F, sample.axis = length(dim(X)),
Omega + (step.size / (sqrt(m2) + eps)) * grad
}, Omegas, grad_Omegas, grad2_Omegas)
# Project Parameters onto their manifolds
for (j in modes) {
if (is.function(proj_j <- proj.betas[[j]])) {
betas[[j]] <- proj_j(betas[[j]])
}
if (is.function(proj_j <- proj.Omegas[[j]])) {
Omegas[[j]] <- proj_j(Omegas[[j]])
}
}
# Enforce degeneracy parameter constraints
if (any(degen.mask)) {
# Extract parameters corresponding to degenerate interactions
@ -221,186 +271,7 @@ gmlm_ising <- function(X, F, sample.axis = length(dim(X)),
list(eta1 = array(0, dimX), betas = betas, Omegas = Omegas),
tensor_normal = fit_normal,
Omegas.init = Omegas.init,
degen.mask = degen.mask
degen.mask = degen.mask,
iter = iter
)
}
################################################################################
### Development Interactive Block (Delete / Make sim / TODO: ...) ###
################################################################################
if (FALSE) { # interactive()
par(bg = "#1d1d1d",
fg = "lightgray",
col = "#d5d5d5",
col.axis = "#d5d5d5",
col.lab = "#d5d5d5",
col.main = "#d5d5d5",
col.sub = "#d5d5d5", # col.sub = "#2467d0"
pch = 16
)
cex <- 1.25
col <- colorRampPalette(c("#f15050", "#1d1d1d", "#567DCA"))(256)
.logger <- function() {
iter <- 0L
assign("log", data.frame(
iter = rep(NA_integer_, 100000),
loss = rep(NA_real_, 100000),
dist.B = rep(NA_real_, 100000),
dist.Omega = rep(NA_real_, 100000),
norm.grad.B = rep(NA_real_, 100000),
norm.grad.Omega = rep(NA_real_, 100000)
), envir = .GlobalEnv)
assign("B.gmlm", NULL, .GlobalEnv)
assign("Omega.gmlm", NULL, .GlobalEnv)
function(it, loss, betas, Omegas, grad_betas, grad_Omegas) {
# Store in global namespace (allows to stop and get the results)
B.gmlm <- Reduce(kronecker, rev(betas))
assign("B.gmlm", B.gmlm, .GlobalEnv)
Omega.gmlm <- Reduce(kronecker, rev(Omegas))
assign("Omega.gmlm", Omega.gmlm, .GlobalEnv)
dist.B <- dist.subspace(B.true, B.gmlm, normalize = TRUE)
dist.Omega <- norm(Omega.true - Omega.gmlm, "F")
norm.grad.B <- sqrt(sum(mapply(norm, grad_betas, "F")^2))
norm.grad.Omega <- sqrt(sum(mapply(norm, grad_Omegas, "F")^2))
log[iter <<- iter + 1L, ] <<- list(
it, loss, dist.B, dist.Omega, norm.grad.B, norm.grad.Omega
)
cat(sprintf("\r%3d - d(B): %.3f, d(O): %.3f, |g(B)|: %.3f, |g(O)|: %.3f, loss: %.3f\033[K",
it, dist.B, dist.Omega, norm.grad.B, norm.grad.Omega, loss))
}
}
sample.size <- 1000
dimX <- c(2, 3) # predictor `X` dimension
dimF <- rep(1, length(dimX)) # "function" `F(y)` of responce `y` dimension
betas <- Map(diag, 1, dimX, dimF)
Omegas <- list(toeplitz(c(0, -2)), toeplitz(seq(1, 0, by = -0.5)))
B.true <- Reduce(kronecker, rev(betas))
Omega.true <- Reduce(kronecker, rev(Omegas))
# data sampling routine
c(X, F, y, sample.axis) %<-% (sample.data <- function(sample.size, betas, Omegas) {
dimX <- mapply(nrow, betas)
dimF <- mapply(ncol, betas)
# generate response (sample axis is last axis)
y <- runif(prod(sample.size, dimF), -2, 2)
F <- array(y, dim = c(dimF, sample.size)) # ~ U[-1, 1]
Omega <- Reduce(kronecker, rev(Omegas))
X <- apply(F, length(dim(F)), function(Fi) {
dim(Fi) <- dimF
params <- diag(as.vector(mlm(Fi, betas))) + Omega
tensorPredictors::ising_sample(1, params)
})
dim(X) <- c(dimX, sample.size)
list(X = X, F = F, y = y, sample.axis = length(dim(X)))
})(sample.size, betas, Omegas)
local({
X.proto <- array(seq_len(prod(dimX)), dimX)
interactions <- crossprod(mat(X, sample.axis))
dimnames(interactions) <- rep(list(
do.call(paste0, c("X", Map(slice.index, list(X.proto), seq_along(dimX))))
), 2)
cat("Sample Size: ", sample.size, "\n")
print.table(interactions, zero.print = ".")
})
# system.time({
# fit.gmlm <- gmlm_ising(X, y, logger = .logger())
# })
Rprof()
gmlm_ising(X, y)
Rprof(NULL)
summaryRprof()
B.gmlm <- Reduce(kronecker, rev(fit.gmlm$betas))
Omega.gmlm <- Reduce(kronecker, rev(fit.gmlm$Omegas))
B.normal <- Reduce(kronecker, rev(attr(fit.gmlm, "tensor_normal")$betas))
Omega.init <- Reduce(kronecker, rev(attr(fit.gmlm, "Omegas.init")))
degen.mask <- attr(fit.gmlm, "degen.mask")
local({
layout(matrix(c(
1, 2, 3, 3, 3,
1, 4, 5, 6, 7
), nrow = 2, byrow = TRUE), width = c(6, 3, 1, 1, 1))
with(na.omit(log), {
plot(range(iter), c(0, 1), type = "n", bty = "n",
xlab = "Iterations", ylab = "Distance")
lines(iter, dist.B, col = "red", lwd = 2)
lines(iter, dist.Omega / max(dist.Omega), col = "blue", lwd = 2)
lines(iter, (loss - min(loss)) / diff(range(loss)), col = "darkgreen", lwd = 2)
norm.grad <- sqrt(norm.grad.B^2 + norm.grad.Omega^2)
# Scale all gradient norms
norm.grad.B <- norm.grad.B / max(norm.grad)
norm.grad.Omega <- norm.grad.Omega / max(norm.grad)
norm.grad <- norm.grad / max(norm.grad)
lines(iter, norm.grad.B, lty = 2, col = "red")
lines(iter, norm.grad.Omega, lty = 2, col = "blue")
lines(iter, norm.grad, lty = 2, col = "darkgreen")
axis(4, at = c(
tail(dist.B, 1),
min(dist.B)
), labels = round(c(
tail(dist.B, 1),
min(dist.B)
), 2), col = NA, col.ticks = "red", las = 1)
axis(4, at = c(
1,
tail(dist.Omega, 1) / max(dist.Omega),
min(dist.Omega) / max(dist.Omega)
), labels = round(c(
max(dist.Omega),
tail(dist.Omega, 1),
min(dist.Omega)
), 2), col = NA, col.ticks = "blue", las = 1)
abline(h = c(tail(dist.B, 1), min(dist.B)),
lty = "dotted", col = "red")
abline(h = c(max(dist.Omega), tail(dist.Omega, 1), min(dist.Omega)) / max(dist.Omega),
lty = "dotted", col = "blue")
})
legend("topright", col = c("red", "blue", "darkgreen"), lty = 1, lwd = 2,
legend = c("dist.B", "dist.Omega", "loss"), bty = "n")
zlim <- max(abs(range(Omega.true, Omega.init, Omega.gmlm))) * c(-1, 1)
matrixImage(Omega.true, main = "true", zlim = zlim, add.values = TRUE, col = col, cex = cex)
matrixImage(round(Omega.init, 2), main = "init (cond. prob.)", zlim = zlim, add.values = TRUE, col = col, cex = cex)
mtext(round(norm(Omega.true - Omega.init, "F"), 3), 3)
matrixImage(round(Omega.gmlm, 2), main = "gmlm (ising)", zlim = zlim, add.values = TRUE, col = col, cex = cex,
col.values = c(par("col"), "red")[`[<-`(array(1, rep(prod(dim(X)[-sample.axis]), 2)), degen.mask, 2)])
mtext(round(norm(Omega.true - Omega.gmlm, "F"), 3), 3)
zlim <- max(abs(range(B.true, B.normal, B.gmlm))) * c(-1, 1)
matrixImage(B.true, main = "true",
zlim = zlim, add.values = TRUE, col = col, cex = cex)
matrixImage(round(B.normal, 2), main = "init (normal)",
zlim = zlim, add.values = TRUE, axes = FALSE, col = col, cex = cex)
mtext(round(dist.subspace(B.true, B.normal, normalize = TRUE), 3), 3)
matrixImage(round(B.gmlm, 2), main = "gmlm (ising)",
zlim = zlim, add.values = TRUE, axes = FALSE, col = col, cex = cex)
mtext(round(dist.subspace(B.true, B.gmlm, normalize = TRUE), 3), 3)
})
}

View File

@ -29,9 +29,9 @@ gmlm_tensor_normal <- function(X, F, sample.axis = length(dim(X)),
}
### Phase 1: Computing initial values
meanX <- rowMeans(X, dims = length(dimX))
meanF <- rowMeans(F, dims = length(dimF))
### Phase 1: Computing initial values (`dim<-` ensures we have an "array")
meanX <- `dim<-`(rowMeans(X, dims = length(dimX)), dimX)
meanF <- `dim<-`(rowMeans(F, dims = length(dimF)), dimF)
# center X and F
X <- X - as.vector(meanX)
@ -121,7 +121,7 @@ gmlm_tensor_normal <- function(X, F, sample.axis = length(dim(X)),
))
# check the break consition
if (abs(loss.last - loss) < eps * loss.last) {
if (abs(loss.last - loss) < eps * abs(loss.last)) {
break
}
}

View File

@ -16,3 +16,39 @@ ising_m2 <- function(
M2
}
# library(tensorPredictors)
# dimX <- c(3, 4)
# dimF <- rep(1L, length(dimX))
# betas <- Map(diag, 1, dimX, dimF)
# Omegas <- list(
# 1 - diag(dimX[1]),
# toeplitz(rev(seq(0, len = dimX[2])) / dimX[2])
# )
# Omega <- Reduce(kronecker, rev(Omegas))
# y <- array(1, dimF)
# params <- diag(as.vector(mlm(y, betas))) + Omega
# # params <- array(0, dim(Omega))
# (prob_0 <- attr(ising_m2(params), "prob_0"))
# (probs <- replicate(20, attr(ising_m2(params, use_MC = TRUE, nr_threads = 8), "prob_0")))[1]
# m <- mean(probs)
# s <- sd(probs)
# (prob_a <- (function(p, M2) {
# (1 + p * (p + 1) / 2 + 2 * sum(M2) - 2 * (p + 1) * sum(diag(M2))) / 2^p
# })(prod(dimX), ising_m2(params, use_MC = FALSE)))
# par(mar = c(1, 2, 1, 2) + 0.1)
# plot(probs, ylim = pmax(0, range(probs, prob_0, prob_a)), pch = 16, cex = 1,
# xaxt = "n", xlab = "", col = "gray", log = "y", bty = "n")
# lines(cumsum(probs) / seq_along(probs), lty = 2, lwd = 2)
# abline(h = c(m - s, m, m + s), lty = c(3, 2, 3), col = "red", lwd = 2)
# abline(h = c(prob_0, prob_a), lwd = 2)
# axis(4, at = prob_0, labels = sprintf("%.1e", prob_0))
# axis(4, at = prob_a, labels = sprintf("%.1e", prob_a))

View File

@ -31,7 +31,7 @@ Dup <- function(p) {
#'
#' @export
Dup.pinv <- function(p) {
Dp <- D(p)
Dp <- Dup(p)
solve(crossprod(Dp), t(Dp))
}

View File

@ -161,6 +161,9 @@ double ising_m2_MC(
// Allocate memory to store Markov-Chain value
int* X = (int*)R_alloc(dim, sizeof(int));
// Accumulator for Monte-Carlo estimate for zero probability `P(X = 0)`
double accum = 0.0;
// Create/Update R's internal PRGN state
GetRNGstate();
@ -169,13 +172,17 @@ double ising_m2_MC(
// Draw random sample `X ~ Ising(params)`
ising_mcmc_vech(warmup, dim, params, X);
// Accumulate component counts
// Accumulate component counts and compute (unscaled) probability of `X`
uint64_t* count = counts;
double dot_X = 0.0;
for (size_t i = 0; i < dim; ++i) {
const size_t I = (i * (2 * dim - 1 - i)) / 2;
for (size_t j = i; j < dim; ++j) {
*(count++) += X[i] & X[j];
dot_X += (X[i] & X[j]) ? params[I + j] : 0.0;
}
}
accum += exp(-dot_X);
}
// Write R's internal PRNG state back (if needed)
@ -186,7 +193,8 @@ double ising_m2_MC(
M2[i] = (double)counts[i] / (double)(nr_samples);
}
return -1.0; // TODO: also compute prob_0
// Prob. of zero event (Ising p.m.f. scaling constant)
return accum / (exp2((double)dim) * (double)nr_samples);
}
@ -202,7 +210,7 @@ typedef struct thrd_data {
const double* params; // Ising model parameters
int* X; // Working memory to store current binary sample
uint32_t* counts; // (output) count of single and two way interactions
double prob_0; // (output) zero event probability estimate
double accum; // (output) Monte-Carlo accumulator for `P(X = 0)`
} thrd_data_t;
// Worker thread function
@ -214,18 +222,25 @@ int thrd_worker(thrd_data_t* data) {
// Initialize counts to zero
(void)memset(data->counts, 0, (dim * (dim + 1) / 2) * sizeof(uint32_t));
// Init Monte-Carlo estimate for zero probability `P(X = 0)`
data->accum = 0.0;
// Spawn Monte-Carlo Chains (one foe every sample)
for (size_t sample = 0; sample < data->nr_samples; ++sample) {
// Draw random sample `X ~ Ising(params)`
ising_mcmc_vech_thrd(data->warmup, dim, data->params, X, data->seed);
// Accumulate component counts
// Accumulate component counts and compute (unscaled) probability of `X`
uint32_t* count = data->counts;
double dot_X = 0.0;
for (size_t i = 0; i < dim; ++i) {
const size_t I = (i * (2 * dim - 1 - i)) / 2;
for (size_t j = i; j < dim; ++j) {
*(count++) += X[i] & X[j];
dot_X += (X[i] & X[j]) ? data->params[I + j] : 0.0;
}
}
data->accum += exp(-dot_X);
}
return 0;
@ -258,7 +273,7 @@ double ising_m2_MC_thrd(
threads_data[tid].warmup = warmup;
threads_data[tid].dim = dim;
threads_data[tid].params = params;
// Every worker gets its disjint working memory
// Every worker gets disjoint working memory
threads_data[tid].X = &Xs[dim * tid];
threads_data[tid].counts = &counts[len * tid];
// dispatch the worker
@ -271,10 +286,13 @@ double ising_m2_MC_thrd(
}
// Accumulate worker results into first (tid = 0) worker counts result
// as well as accumulate the Monte-Carlo accumulators into one
double accum = threads_data[0].accum;
for (size_t tid = 1; tid < nr_threads; ++tid) {
for (size_t i = 0; i < len; ++i) {
counts[i] += counts[tid * len + i];
}
accum += threads_data[tid].accum;
}
// convert discreat counts into means
@ -282,7 +300,8 @@ double ising_m2_MC_thrd(
M2[i] = (double)counts[i] / (double)nr_samples;
}
return -2.0; // TODO: also compute prob_0
// Prob. of zero event (Ising p.m.f. scaling constant)
return accum / (exp2((double)dim) * (double)nr_samples);
}
#endif /* !__STDC_NO_THREADS__ */

View File

@ -99,6 +99,11 @@ extern SEXP R_mlm(SEXP A, SEXP Bs, SEXP ms, SEXP ops) {
Rf_error("Dimension missmatch (Params length differ)");
}
// In case of an empty RHS (nr. of B's) is zero, this reduces to the identity
if (nrhs == 0) {
return A;
}
// Get modes and operations for Bs (transposed or not)
int* modes = (int*)R_alloc(nrhs, sizeof(int)); // 0-indexed version of `ms`
int* trans = INTEGER(ops);