"finishd" chess example,

changed fen2tensor to show the Vienna Game,
add: chess example to paper.tex
This commit is contained in:
Daniel Kapla 2023-12-30 10:01:12 +01:00
parent 85b6a2a12a
commit fa2a99f3f0
12 changed files with 445 additions and 465 deletions

View File

@ -38,13 +38,13 @@
\chessplane{10}{a8, h8}; \chessplane{10}{a8, h8};
\chessplane{9}{c8, f8}; \chessplane{9}{c8, f8};
\chessplane{8}{b8, g8}; \chessplane{8}{b8, g8};
\chessplane{7}{a7, b7, c7, d7, e7, f7, g7, h7}; \chessplane{7}{a7, b7, c7, d7, e5, f7, g7, h7};
\chessplane{6}{e1}; \chessplane{6}{e1};
\chessplane{5}{d1}; \chessplane{5}{d1};
\chessplane{4}{a1, h1}; \chessplane{4}{a1, h1};
\chessplane{3}{c1, f1}; \chessplane{3}{c1, f1};
\chessplane{2}{b1, g1}; \chessplane{2}{c3, g1};
\chessplane{1}{a2, b2, c2, d2, e2, f2, g2, h2}; \chessplane{1}{a2, b2, c2, d2, e4, f2, g2, h2};
\begin{scope}[canvas is yz plane at x={-1}, transform shape] \begin{scope}[canvas is yz plane at x={-1}, transform shape]
\node[anchor = south, rotate = 90] at (layer1.west) {Ranks / Axis 1}; \node[anchor = south, rotate = 90] at (layer1.west) {Ranks / Axis 1};
@ -57,7 +57,7 @@
\begin{scope}[canvas is xz plane at y=\xoffset, transform shape, xscale=-1] \begin{scope}[canvas is xz plane at y=\xoffset, transform shape, xscale=-1]
\path (layer1.north west) -- (layer12.north west) node[ \path (layer1.north west) -- (layer12.north west) node[
pos = 0.5, anchor = south pos = 0.5, anchor = south
] {Pieces / Axis 3}; ] {Pieces / Mixture Components};
\end{scope} \end{scope}
\end{scope} \end{scope}
@ -66,13 +66,13 @@
\node[shift = {(0, 0)}, anchor = east] (pos) at (current bounding box.west) {{ \node[shift = {(0, 0)}, anchor = east] (pos) at (current bounding box.west) {{
\setchessboard{linewidth = 0.1em, showmover = false, smallboard} \setchessboard{linewidth = 0.1em, showmover = false, smallboard}
\newgame \newgame
% The Vienna Game
\hidemoves{1. e4 e5 2.Nc3} % Like `\mainline` but does NOT show the PGN line
\chessboard{} \chessboard{}
}}; }};
\node[anchor = south] at (pos.center |- tensor north) {Start Position}; \node[anchor = south] at (pos.center |- tensor north) {Position};
\node[anchor = south] at (tensor north) {3D Tensor}; \node[anchor = south] at (tensor north) {Encoding};
\end{tikzpicture} \end{tikzpicture}
\end{document} \end{document}

View File

@ -1,91 +0,0 @@
\begin{tikzpicture}[x = 4pt, y = 1.3pt, scale = \tikzscale]
\path[use as bounding box] (12, 10) rectangle (181, 290);
\begin{scope}[inner sep = 0pt, outer sep = 0pt, line width = 0.4pt]
\draw ( 44.64, 73.03) -- (132.42, 73.03);
\draw ( 44.64, 73.03) -- ( 44.64, 68.05);
\draw ( 59.27, 73.03) -- ( 59.27, 68.05);
\draw ( 73.90, 73.03) -- ( 73.90, 68.05);
\draw ( 88.53, 73.03) -- ( 88.53, 68.05);
\draw (103.16, 73.03) -- (103.16, 68.05);
\draw (117.79, 73.03) -- (117.79, 68.05);
\draw (132.42, 73.03) -- (132.42, 68.05);
\node[anchor=base, scale = 0.83] at ( 44.64, 50) {100};
\node[anchor=base, scale = 0.83] at ( 73.90, 50) {300};
\node[anchor=base, scale = 0.83] at (103.16, 50) {500};
\node[anchor=base, scale = 0.83] at (132.42, 50) {700};
\draw (40.84, 79.52) -- (40.84, 241.75);
\draw (40.84, 79.52) -- (35.86, 79.52);
\draw (40.84, 111.97) -- (35.86, 111.97);
\draw (40.84, 144.42) -- (35.86, 144.42);
\draw (40.84, 176.86) -- (35.86, 176.86);
\draw (40.84, 209.31) -- (35.86, 209.31);
\draw (40.84, 241.75) -- (35.86, 241.75);
\node[anchor=west, scale = 0.83] at (26, 79.52) {0};
\node[anchor=west, scale = 0.83] at (26, 111.97) {0.2};
\node[anchor=west, scale = 0.83] at (26, 144.42) {0.4};
\node[anchor=west, scale = 0.83] at (26, 176.86) {0.6};
\node[anchor=west, scale = 0.83] at (26, 209.31) {0.8};
\node[anchor=west, scale = 0.83] at (26, 241.75) {1};
\end{scope}
\begin{scope}[inner sep = 0pt, outer sep = 0pt]
\node[anchor = base, scale = 1.00] at (92.19, 265.23) {{\bfseries Subspace Distance}};
\node[anchor = base, scale = 0.83] at (92.19, 20) {Sample Size};
\node[rotate = 90.00, anchor = base, scale = 0.83] at (20, 160.64) {$d(\mat{B}_0, \widehat{\mat{B}})$};
\end{scope}
\begin{scope}[xscale = 0.5, xshift = 800pt, yshift = 330pt]
\begin{scope}
\definecolor{drawColor}{RGB}{42,98,182}
\path[draw=drawColor,line width= 0.7pt,line join=round,line cap=round] ( 89.72, 14) -- (104.66, 14);
\node[text=black,anchor=base west,inner sep=0pt, outer sep=0pt, scale= 0.83] at (112.13, 8.26) {HOPCA};
\end{scope}
\begin{scope}[yshift = -3em]
\definecolor{drawColor}{RGB}{147,19,185}
\path[draw=drawColor,line width= 0.7pt,line join=round,line cap=round] ( 89.72, 14) -- (104.66, 14);
\node[text=black,anchor=base west,inner sep=0pt, outer sep=0pt, scale= 0.83] at (112.13, 8.26) {TSIR};
\end{scope}
\begin{scope}[yshift = -6em]
\definecolor{drawColor}{RGB}{36,116,7}
\path[draw=drawColor,line width= 0.7pt,line join=round,line cap=round] ( 89.72, 14) -- (104.66, 14);
\node[text=black,anchor=base west,inner sep=0pt, outer sep=0pt, scale= 0.83] at (112.13, 8.26) {\bf GMLM};
\end{scope}
\end{scope}
\begin{scope}
\definecolor{drawColor}{RGB}{36,116,7}
\path[draw=drawColor,line width= 0.7pt,line join=round,line cap=round] ( 44.64,119.77) -- ( 59.27,107.60) -- ( 73.90,103.65) -- (103.16, 97.80) -- (139.74, 95.44);
\definecolor{drawColor}{RGB}{42,98,182}
\path[draw=drawColor,line width= 0.7pt,line join=round,line cap=round] ( 44.64,229.99) -- ( 59.27,229.84) -- ( 73.90,229.81) -- (103.16,229.85) -- (139.74,229.94);
\definecolor{drawColor}{RGB}{147,19,185}
\path[draw=drawColor,line width= 0.7pt,line join=round,line cap=round] ( 44.64,157.45) -- ( 59.27,128.85) -- ( 73.90,116.33) -- (103.16,104.70) -- (139.74, 99.52);
\definecolor{fillColor}{RGB}{36,116,7}
\path[fill=fillColor,fill opacity=0.30] ( 44.64,127.46) -- ( 59.27,112.88) -- ( 73.90,107.67) -- (103.16,101.24) -- (139.74, 97.90) -- (139.74, 91.72) -- (103.16, 93.73) -- ( 73.90, 98.51) -- ( 59.27,101.87) -- ( 44.64,112.70) --
cycle;
\definecolor{fillColor}{RGB}{42,98,182}
\path[fill=fillColor,fill opacity=0.30] ( 44.64,230.71) -- ( 59.27,230.36) -- ( 73.90,230.07) -- (103.16,230.23) -- (139.74,230.15) -- (139.74,229.65) -- (103.16,229.53) -- ( 73.90,229.54) -- ( 59.27,229.25) -- ( 44.64,229.22) --
cycle;
\definecolor{fillColor}{RGB}{147,19,185}
\path[fill=fillColor,fill opacity=0.30] ( 44.64,172.90) -- ( 59.27,140.34) -- ( 73.90,122.60) -- (103.16,109.34) -- (139.74,103.80) -- (139.74, 95.11) -- (103.16, 99.59) -- ( 73.90,105.02) -- ( 59.27,114.84) -- ( 44.64,142.20) --
cycle;
\end{scope}
\end{tikzpicture}

View File

@ -1,3 +1,18 @@
@article{KoldaBader2009,
author = {Kolda, Tamara G. and Bader, Brett W.},
title = {Tensor Decompositions and Applications},
journal = {SIAM Review},
volume = {51},
number = {3},
pages = {455-500},
year = {2009},
doi = {10.1137/07070111X},
URL = {
https://doi.org/10.1137/07070111X},
eprint ={https://doi.org/10.1137/07070111X},
abstract = { This survey provides an overview of higher-order tensor decompositions, their applications, and available software. A tensor is a multidimensional or N-way array. Decompositions of higher-order tensors (i.e., N-way arrays with \$N \geq 3\$) have applications in psycho-metrics, chemometrics, signal processing, numerical linear algebra, computer vision, numerical analysis, data mining, neuroscience, graph analysis, and elsewhere. Two particular tensor decompositions can be considered to be higher-order extensions of the matrix singular value decomposition: CANDECOMP/PARAFAC (CP) decomposes a tensor as a sum of rank-one tensors, and the Tucker decomposition is a higher-order form of principal component analysis. There are many other tensor decompositions, including INDSCAL, PARAFAC2, CANDELINC, DEDICOM, and PARATUCK2 as well as nonnegative variants of all of the above. The N-way Toolbox, Tensor Toolbox, and Multilinear Engine are examples of software packages for working with tensors. }
}
@article{RegMatrixReg-ZhouLi2014, @article{RegMatrixReg-ZhouLi2014,
author = {Zhou, Hua and Li, Lexin}, author = {Zhou, Hua and Li, Lexin},
title = {Regularized matrix regression}, title = {Regularized matrix regression},

View File

@ -142,13 +142,12 @@
\newcommand{\DiagZeroMat}[1]{\mathbb{R}_{\diag=0}^{{#1}\times {#1}}} \newcommand{\DiagZeroMat}[1]{\mathbb{R}_{\diag=0}^{{#1}\times {#1}}}
\newcommand{\SymMat}[1]{\mathrm{Sym}^{{#1}\times {#1}}} \newcommand{\SymMat}[1]{\mathrm{Sym}^{{#1}\times {#1}}}
\newcommand{\SkewSymMat}[1]{\mathrm{Skew}^{{#1}\times {#1}}} \newcommand{\SkewSymMat}[1]{\mathrm{Skew}^{{#1}\times {#1}}}
% \newcommand{\SymPosDefMat}[1]{\mathrm{Sym}_{++}^{{#1}\times {#1}}} \newcommand{\SymPosDefMat}[1]{\mathrm{Sym}_{++}^{{#1}\times {#1}}}
\newcommand{\SymPosDefMat}[1]{\mathrm{SPD}(#1)}
\newcommand{\SymDiagZeroMat}[1]{\mathrm{Sym}_{\diag=0}^{p\times p}} \newcommand{\SymDiagZeroMat}[1]{\mathrm{Sym}_{\diag=0}^{p\times p}}
\newcommand{\SymBand}[2]{\mathrm{SymBand}^{{#1}\times {#1}}_{#2}} \newcommand{\SymBand}[2]{\mathrm{SymBand}^{{#1}\times {#1}}_{#2}}
\newcommand{\UnitaryGrp}[1]{\mathrm{U}(#1)} \newcommand{\UnitaryGrp}[1]{\mathrm{U}(#1)}
\newcommand{\SpecialUnitaryGrp}[1]{\mathrm{SU}(#1)} \newcommand{\SpecialUnitaryGrp}[1]{\mathrm{SU}(#1)}
% \newcommand{\AR}[1]{\mathrm{AR}(#1)}
\newcommand{\todo}[1]{{\color{red}TODO: #1}} \newcommand{\todo}[1]{{\color{red}TODO: #1}}
\newcommand{\efi}[1]{{\color{blue}Effie: #1}} \newcommand{\efi}[1]{{\color{blue}Effie: #1}}
@ -481,11 +480,13 @@ where $\overline{\ten{\eta}}\in\mathbb{R}^{p_1\times\ldots\times p_r}$, $\mat{\O
\end{align} \end{align}
where $\mat{B} = \bigotimes_{j = r}^{1}\mat{\beta}_j$ and the component matrices $\mat{\beta}_j\in\mathbb{R}^{p_j\times q_j}$ are of known rank for $j = 1, \ldots, r$. Given the high potential value of $p$, we further assume that where $\mat{B} = \bigotimes_{j = r}^{1}\mat{\beta}_j$ and the component matrices $\mat{\beta}_j\in\mathbb{R}^{p_j\times q_j}$ are of known rank for $j = 1, \ldots, r$. Given the high potential value of $p$, we further assume that
\begin{align} \begin{align}
\t{(\mat{T}_2\pinv{\mat{D}_p})}\mat{\eta}_{2y}= \t{(\mat{T}_2\pinv{\mat{D}_p})}\mat{\eta}_{2} &= \vec\biggl(c\bigotimes_{j = r}^{1}\mat{\Omega}_j\biggr). \label{eq:eta2} \t{(\mat{T}_2\pinv{\mat{D}_p})}\mat{\eta}_{2y}= \t{(\mat{T}_2\pinv{\mat{D}_p})}\mat{\eta}_{2} &= \vec\biggl(c\bigotimes_{j = r}^{1}\mat{\Omega}_j\biggr). \label{eq:eta2}
\end{align} \end{align}
where $\mat{\Omega}_j\in\mathbb{R}^{p_j\times p_j}$ are symmetric positive definite matrices for $j = 1, \ldots, r$. That is, we require $\mat{\Omega} = \bigotimes_{j = r}^{1}\mat{\Omega}_j$, which substantially reduces the number of parameters to estimate in $\mat{\Omega}$. The assumption that the $\mat{\Omega}_j$'s be positive definite is possible due to the constant $c$. where $\mat{\Omega}_j\in\mathbb{R}^{p_j\times p_j}$ are symmetric positive definite matrices for $j = 1, \ldots, r$. That is, we require $\mat{\Omega} = \bigotimes_{j = r}^{1}\mat{\Omega}_j$, which substantially reduces the number of parameters to estimate in $\mat{\Omega}$. The assumption that the $\mat{\Omega}_j$'s be positive definite is possible due to the constant $c$.
Equation \eqref{eq:eta2} is underdetermined since $\t{(\mat{T}_2\pinv{\mat{D}_p})}$ has full column rank $d < p^2$ (with a non-strict inequality if $\ten{X}$ is univariate) but $\mat{\eta}_2$ is uniquely determined given any $\mat{\Omega}$ as $\t{(\pinv{(\mat{T}_2\pinv{\mat{D}_p})})}$ has full row rank. We let $\mat{\xi} = (\vec{\overline{\ten{\eta}}}, \vec{\mat{B}}, \vech{\mat{\Omega}})$ be a $p(p + 2 q + 3) / 2$-parameter vector and $\mat{\theta} = (\vec{\overline{\ten{\eta}}}, \vec{\mat{B}}, \vech{\mat{\Omega}})$ be the constrained parameter vector, where $\mat{B}=\bigotimes_{j = r}^{1}\mat{\beta}_j$ and $\mat{\Omega} = \bigotimes_{j = r}^{1}\mat{\Omega}_j$. We also let $\Xi$ and $\Theta$ denote the unconstrained and constrained parameter spaces, with $\mat{\xi}$ and $\mat{\theta}$ varying in $\Xi$ and $\Theta$, respectively. The parameter space $\Xi$ is an open subset of $\mathbb{R}^{p(p + 2 q + 3) / 2}$ so that \eqref{eq:quadratic-exp-fam} is a proper density. Later, we relax the assumptions for $\mat{\beta}_k$ and $\mat{\Omega}_k$ as a consequence of \cref{thm:param-manifold} in \cref{sec:kron-manifolds}. Equation \eqref{eq:eta2} is underdetermined since $\t{(\mat{T}_2\pinv{\mat{D}_p})}$ has full column rank $d < p^2$ (with a non-strict inequality if $\ten{X}$ is univariate) but $\mat{\eta}_2$ is uniquely determined given any $\mat{\Omega}$ as $\t{(\pinv{(\mat{T}_2\pinv{\mat{D}_p})})}$ has full row rank. We let $\mat{\xi} = (\vec{\overline{\ten{\eta}}}, \vec{\mat{B}}, \vech{\mat{\Omega}})$ be a $p(p + 2 q + 3) / 2$-parameter vector and $\mat{\theta} = (\vec{\overline{\ten{\eta}}}, \vec{\mat{B}}, \vech{\mat{\Omega}})$ be the constrained parameter vector, where $\mat{B}=\bigotimes_{j = r}^{1}\mat{\beta}_j$ and $\mat{\Omega} = \bigotimes_{j = r}^{1}\mat{\Omega}_j$. We also let $\Xi$ and $\Theta$ denote the unconstrained and constrained parameter spaces, with $\mat{\xi}$ and $\mat{\theta}$ varying in $\Xi$ and $\Theta$, respectively. The parameter space $\Xi$ is an open subset of $\mathbb{R}^{p(p + 2 q + 3) / 2}$ so that \eqref{eq:quadratic-exp-fam} is a proper density. Later, we relax the assumptions for $\mat{\beta}_k$ and $\mat{\Omega}_k$ as a consequence of \cref{thm:param-manifold} in \cref{sec:kron-manifolds}.
% \todo{Maybe already here introduce the ``constraint'' set of $\Omega$'s allowed as $\{ \Omega\in\mathbb{R}_{++}^{p\times p} : \vec{\Omega} = \t{(\pinv{(\mat{T}_2\pinv{\mat{D}_p})}\mat{T}_2\pinv{\mat{D}_p})}\vec{\Omega} \}$}
In a classical \emph{generalized linear model} (GLM), the link function connecting the natural parameters to the expectation of the sufficient statistic $\mat{\eta}_y = \mat{g}(\E[\mat{t}(\ten{X}) \mid Y = y])$ is invertible. Such a link may not exist in our setting, but for our purpose what we call the ``inverse'' link suffices. The ``inverse'' link $\widetilde{\mat{g}}$ exists as the natural parameters fully describe the distribution. As in the non-minimal formulation \eqref{eq:quadratic-exp-fam}, we define the ``inverse'' link through its tensor valued components In a classical \emph{generalized linear model} (GLM), the link function connecting the natural parameters to the expectation of the sufficient statistic $\mat{\eta}_y = \mat{g}(\E[\mat{t}(\ten{X}) \mid Y = y])$ is invertible. Such a link may not exist in our setting, but for our purpose what we call the ``inverse'' link suffices. The ``inverse'' link $\widetilde{\mat{g}}$ exists as the natural parameters fully describe the distribution. As in the non-minimal formulation \eqref{eq:quadratic-exp-fam}, we define the ``inverse'' link through its tensor valued components
\begin{align} \begin{align}
\ten{g}_1(\mat{\eta}_y) &= \E[\ten{X} \mid Y = y], \label{eq:inv-link1}\\ \ten{g}_1(\mat{\eta}_y) &= \E[\ten{X} \mid Y = y], \label{eq:inv-link1}\\
@ -577,19 +578,20 @@ Suppose $\ten{X}\mid Y = y$ follows a tensor normal distribution with mean $\te
\begin{displaymath} \begin{displaymath}
f_{\mat{\theta}}(\ten{X}\mid Y = y) = (2\pi)^{-p / 2}\prod_{k = 1}^{r}\det(\mat{\Sigma}_k)^{-p / 2 p_k}\exp\left( -\frac{1}{2}\left\langle\ten{X} - \ten{\mu}_y, (\ten{X} - \ten{\mu}_y)\mlm_{k = 1}^{r}\mat{\Sigma}_k^{-1} \right\rangle \right). f_{\mat{\theta}}(\ten{X}\mid Y = y) = (2\pi)^{-p / 2}\prod_{k = 1}^{r}\det(\mat{\Sigma}_k)^{-p / 2 p_k}\exp\left( -\frac{1}{2}\left\langle\ten{X} - \ten{\mu}_y, (\ten{X} - \ten{\mu}_y)\mlm_{k = 1}^{r}\mat{\Sigma}_k^{-1} \right\rangle \right).
\end{displaymath} \end{displaymath}
For the sake of simplicity and w.l.o.g., we assume $\ten{X}$ has 0 marginal expectation; i.e., $\E\ten{X} = 0$. Rewriting this in the quadratic exponential family form \eqref{eq:quadratic-exp-fam}, determines the scaling constant $c = -1/2$. The relation to the GMLM parameters $\mat{\beta}_k$ and $\mat{\Omega}_k$, for $k = 1, \ldots, r$ is For the sake of simplicity and w.l.o.g., we assume $\ten{X}$ has 0 marginal expectation; i.e., $\E\ten{X} = 0$. Rewriting this in the quadratic exponential family form \eqref{eq:quadratic-exp-fam}, determines the scaling constant $c = -1/2$. The relation to the GMLM parameters $\overline{\ten{\eta}}, \mat{\beta}_k$ and $\mat{\Omega}_k$, for $k = 1, \ldots, r$ is
\begin{equation}\label{eq:tnormal_cond_params} \begin{displaymath}
\ten{\mu}_y = \ten{F}_y\mlm_{k = 1}^{r}\mat{\Omega}_k^{-1}\mat{\beta}_k, \qquad \ten{\mu}_y = \ten{F}_y\mlm_{k = 1}^{r}\mat{\Omega}_k^{-1}\mat{\beta}_k, \qquad
\mat{\Omega}_k = \mat{\Sigma}_k^{-1}, \mat{\Omega}_k = \mat{\Sigma}_k^{-1},
\end{equation} \end{displaymath}
where we used that $\overline{\ten{\eta}} = 0$ due to $0 = \E\ten{X} = \E\E[\ten{X}\mid Y] = \E\ten{\mu}_Y$ in combination with $\E\ten{F}_Y = 0$. Additionally, all the $\mat{\Omega}_k$'s are symmetric positive definite, because the $\mat{\Sigma}_k$'s are. This lead to another simplification since then $\mat{T}_2$ in \eqref{eq:t-stat} equals the identity. This also means that the gradients of the log-likelihood $l_n$ in \cref{thm:grad} are simpler. We obtain where we used that $\overline{\ten{\eta}} = 0$ due to $0 = \E\ten{X} = \E\E[\ten{X}\mid Y] = \E\ten{\mu}_Y$ in combination with $\E\ten{F}_Y = 0$. Additionally, all the $\mat{\Omega}_k$'s are symmetric positive definite, because the $\mat{\Sigma}_k$'s are. This lead to another simplification since then $\mat{T}_2$ in \eqref{eq:t-stat} equals the identity. This also means that the gradients of the log-likelihood $l_n$ in \cref{thm:grad} are simpler. We obtain
\begin{displaymath} \begin{displaymath}
\ten{g}_1(\mat{\eta}_y) = \E[\ten{X}\mid Y = y] = \ten{\mu}_y, \qquad \ten{g}_1(\mat{\eta}_y) = \E[\ten{X}\mid Y = y] = \ten{\mu}_y, \qquad
\ten{G}_2(\mat{\eta}_y) = \ten{g}_2(\mat{\eta}_y) = \E[\ten{X}\circ\ten{X}\mid Y = y] \equiv \bigkron_{k = r}^1\mat{\Sigma}_k + (\vec{\ten{\mu}}_y)\t{(\vec{\ten{\mu}}_y)}. \ten{G}_2(\mat{\eta}_y) = \ten{g}_2(\mat{\eta}_y) = \E[\ten{X}\circ\ten{X}\mid Y = y] \equiv \bigkron_{k = r}^1\mat{\Sigma}_k + (\vec{\ten{\mu}}_y)\t{(\vec{\ten{\mu}}_y)}.
\end{displaymath} \end{displaymath}
In practice, we assume we have a random sample of $n$ observations $(\ten{X}_i, \ten{F}_{y_i})$ from the joint distribution. We start the estimation process by demeaning them. Then, only the reduction matrices $\mat{\beta}_k$ and the scatter matrices $\mat{\Omega}_k$ need to be estimated. To solve the optimization problem \eqref{eq:mle}, with $\overline{\ten{\eta}} = 0$ we initialize the parameters using a simple heuristic approach. First, we compute moment based mode-wise marginal covariance estimates $\widehat{\mat{\Sigma}}_k(\ten{X})$ and $\widehat{\mat{\Sigma}}_k(\ten{F}_Y)$ as In practice, we assume we have a random sample of $n$ observations $(\ten{X}_i, \ten{F}_{y_i})$ from the joint distribution. We start the estimation process by demeaning them. Then, only the reduction matrices $\mat{\beta}_k$ and the scatter matrices $\mat{\Omega}_k$ need to be estimated. To solve the optimization problem \eqref{eq:mle}, with $\overline{\ten{\eta}} = 0$ we initialize the parameters using a simple heuristic approach. % For initial estimates $\hat{\mat{\beta}}_k^{(0)}$ we
First, we compute moment based mode-wise marginal covariance estimates $\widehat{\mat{\Sigma}}_k(\ten{X})$ and $\widehat{\mat{\Sigma}}_k(\ten{F}_Y)$ as
\begin{displaymath} \begin{displaymath}
\widehat{\mat{\Sigma}}_k(\ten{X}) = \frac{1}{n}\sum_{i = 1}^{n} (\ten{X}_i)_{(k)}\t{(\ten{X}_i)_{(k)}}, \quad \widehat{\mat{\Sigma}}_k(\ten{X}) = \frac{1}{n}\sum_{i = 1}^{n} (\ten{X}_i)_{(k)}\t{(\ten{X}_i)_{(k)}}, \qquad
\widehat{\mat{\Sigma}}_k(\ten{F}_Y) = \frac{1}{n}\sum_{i = 1}^{n} (\ten{F}_{y_i})_{(k)}\t{(\ten{F}_{y_i})_{(k)}}. \widehat{\mat{\Sigma}}_k(\ten{F}_Y) = \frac{1}{n}\sum_{i = 1}^{n} (\ten{F}_{y_i})_{(k)}\t{(\ten{F}_{y_i})_{(k)}}.
\end{displaymath} \end{displaymath}
Then, for every mode $k = 1, \ldots, r$, we compute the first $j = 1, \ldots, q_k$ eigenvectors $\mat{v}_j(\widehat{\mat{\Sigma}}_k(\ten{X}))$, $\mat{v}_j(\widehat{\mat{\Sigma}}_k(\ten{F}_Y))$ and eigenvalues $\lambda_j(\widehat{\mat{\Sigma}}_k(\ten{X}))$, $\lambda_j(\widehat{\mat{\Sigma}}_k(\ten{X}))$ of the marginal covariance estimates. We set Then, for every mode $k = 1, \ldots, r$, we compute the first $j = 1, \ldots, q_k$ eigenvectors $\mat{v}_j(\widehat{\mat{\Sigma}}_k(\ten{X}))$, $\mat{v}_j(\widehat{\mat{\Sigma}}_k(\ten{F}_Y))$ and eigenvalues $\lambda_j(\widehat{\mat{\Sigma}}_k(\ten{X}))$, $\lambda_j(\widehat{\mat{\Sigma}}_k(\ten{X}))$ of the marginal covariance estimates. We set
@ -618,6 +620,7 @@ Given $\hat{\mat{\beta}}_1, \ldots, \hat{\mat{\beta}}_r, \hat{\mat{\Omega}}_1, \
\biggr) \biggr)
\hat{\mat{\Omega}}_j. \hat{\mat{\Omega}}_j.
\end{equation} \end{equation}
%For the scatter matrices $\mat{\Omega}_j$, we need to fudge a bit.
Equating the partial gradient of the $j$th scatter matrix $\mat{\Omega}_j$ in \cref{thm:grad} to zero ( $\nabla_{\mat{\Omega}_j}l_n = 0$) gives a quadratic matrix equation. This is due to the dependence of $\ten{\mu}_y$ on $\mat{\Omega}_j$. In practice though, it is faster, more stable, and equally accurate to use mode-wise covariance estimates via the residuals Equating the partial gradient of the $j$th scatter matrix $\mat{\Omega}_j$ in \cref{thm:grad} to zero ( $\nabla_{\mat{\Omega}_j}l_n = 0$) gives a quadratic matrix equation. This is due to the dependence of $\ten{\mu}_y$ on $\mat{\Omega}_j$. In practice though, it is faster, more stable, and equally accurate to use mode-wise covariance estimates via the residuals
\begin{displaymath} \begin{displaymath}
\hat{\ten{R}}_i = \ten{X}_i - \hat{\ten{\mu}}_{y_i} = \ten{X}_i - \ten{F}_{y_i}\mlm_{k = 1}^{r}\hat{\mat{\Omega}}_k^{-1}\hat{\mat{\beta}}_k. \hat{\ten{R}}_i = \ten{X}_i - \hat{\ten{\mu}}_{y_i} = \ten{X}_i - \ten{F}_{y_i}\mlm_{k = 1}^{r}\hat{\mat{\Omega}}_k^{-1}\hat{\mat{\beta}}_k.
@ -637,36 +640,35 @@ so that
resulting in the estimates $\hat{\mat{\Omega}}_j = (\tilde{s}\tilde{\mat{\Sigma}}_j)^{-1}$. resulting in the estimates $\hat{\mat{\Omega}}_j = (\tilde{s}\tilde{\mat{\Sigma}}_j)^{-1}$.
Estimation is then performed by updating the estimates $\hat{\mat{\beta}}_j$ via \eqref{eq:tensor_normal_beta_solution} for $j = 1, \ldots, r$, and then recompute the $\hat{\mat{\Omega}}_j$ estimates simultaneously keeping the $\hat{\mat{\beta}}_j$'s fixed. This procedure is repeated until convergence. % Convergence is very fast, experiments showed that convergence occures usualy in less than $10$ iterations. Estimation is then performed by updating the estimates $\hat{\mat{\beta}}_j$ via \eqref{eq:tensor_normal_beta_solution} for $j = 1, \ldots, r$, and then recompute the $\hat{\mat{\Omega}}_j$ estimates simultaneously keeping the $\hat{\mat{\beta}}_j$'s fixed. This procedure is repeated until convergence. % Convergence is very fast, experiments showed that convergence occures usualy in less than $10$ iterations.
A technical detail for numerical stability is to ensure that the scaled values $\tilde{s}\tilde{\mat{\Sigma}}_j$, assumed to be symmetric and positive definite, are well conditioned. Thus, we estimate the condition number of $\tilde{s}\tilde{\mat{\Sigma}}_j$ prior to computing the inverse. In case of ill-conditioning, we use the regularized $\hat{\mat{\Omega}}_j = (\tilde{s}\tilde{\mat{\Sigma}}_j + 0.2 \lambda_{1}(\tilde{s}\tilde{\mat{\Sigma}}_j)\mat{I}_{p_j})^{-1}$ instead, where $\lambda_{1}(\tilde{s}\tilde{\mat{\Sigma}}_j)$ is the first (maximum) eigenvalue. Experiments showed that this regularization is usually only required in the first few iterations. A technical detail for numerical stability is to ensure that the scaled values $\tilde{s}\tilde{\mat{\Sigma}}_j$, assumed to be symmetric and positive definite, are well conditioned. Thus, we estimate the condition number of $\tilde{s}\tilde{\mat{\Sigma}}_j$ prior to computing the inverse. In case of ill- conditioning, we use the regularized $\hat{\mat{\Omega}}_j = (\tilde{s}\tilde{\mat{\Sigma}}_j + 0.2 \lambda_{1}(\tilde{s}\tilde{\mat{\Sigma}}_j)\mat{I}_{p_j})^{-1}$ instead, where $\lambda_{1}(\tilde{s}\tilde{\mat{\Sigma}}_j)$ is the first (maximum) eigenvalue. Experiments showed that this regularization is usually only required in the first few iterations.
Furthermore, if the parameter space follows a more general setting as in \cref{thm:param-manifold}, updating may produces estimates outside the parameter space. A simple and efficient method is to project every updated estimate onto the corresponding manifold. Furthermore, if the parameter space follows a more general setting as in \cref{thm:param-manifold}, updating may produces estimates outside the parameter space. A simple and efficient method is to project every updated estimate onto the corresponding manifold.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Ising Model}\label{sec:ising_estimation} \subsection{Ising Model}\label{sec:ising_estimation}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The general distribution of a binary vector is modeled by the \emph{multi-variate Bernoulli distribution} (see: \textcite{GraphicalModels-Whittaker2009, MVB-Dai2012, MVB-DaiDingWahba2013}). The \emph{Ising model} \textcite{Ising-Ising1924} is a special case, considering only two-way interactions. Its probability mass function (PMF) for a binary random vector $X\in\{ 0, 1 \}^p$ with natural parameters $\mat{\gamma}\in\mathbb{R}^{p(p + 1) / 2}$ is given by The general distribution of a binary vector is modeled by the \emph{multi-variate Bernoulli distribution} (\textcite{GraphicalModels-Whittaker2009, MVB-Dai2012, MVB-DaiDingWahba2013}). The \emph{Ising model} \textcite{Ising-Ising1924} is a special case, considering only two-way interactions. Its probability mass function (PMF) for a binary random vector $X\in\{ 0, 1 \}^p$ with natural parameters $\mat{\gamma}\in\mathbb{R}^{p(p + 1) / 2}$ is given by
\begin{displaymath} \begin{displaymath}
P_{\mat{\gamma}}(\mat{x}) = p_0(\mat{\gamma})\exp(\t{\vech(\mat{x}\t{\mat{x}})}\mat{\gamma}). P_{\mat{\gamma}}(\mat{x}) = p_0(\mat{\gamma})\exp(\t{\vech(\mat{x}\t{\mat{x}})}\mat{\gamma}).
\end{displaymath} \end{displaymath}
The scaling factor $p_0(\mat{\gamma})\in\mathbb{R}_{+}$ ensures that $P_{\mat{\gamma}}$ is a PMF. It is equal to the probability of the zero event $P(X = \mat{0}) = p_0(\mat{\gamma})$. More commonly known as the \emph{partition function}, the reciprocal of $p_0$, given by The scaling factor $p_0(\mat{\gamma})\in\mathbb{R}_{+}$ ensures that $P_{\mat{\gamma}}$ is a PMF. It is equal to the probability of the zero event $P(X = \mat{0}) = p_0(\mat{\gamma})$. More commonly known as the \emph{partition function}, the reciprocal of $p_0$, is given by
\begin{equation}\label{eq:ising-partition-function} \begin{equation}\label{eq:ising-partition-function}
p_0(\mat{\gamma})^{-1} = \sum_{\mat{x}\in\{0, 1\}^p}\exp(\t{\vech(\mat{x}\t{\mat{x}})}\mat{\gamma}). p_0(\mat{\gamma})^{-1} = \sum_{\mat{x}\in\{0, 1\}^p}\exp(\t{\vech(\mat{x}\t{\mat{x}})}\mat{\gamma}).
\end{equation} \end{equation}
By an abuse of notation, let $\mat{\gamma}_{j l}$ address the element of $\mat{\gamma}$ corresponding to $\mat{x}_j\mat{x}_l$ in $\vech(\mat{x}\t{\mat{x}})$\footnote{Specifically, the element $\mat{\gamma}_{j l}$ of $\mat{\gamma}$ is a short hand for $\mat{\gamma}_{\iota(j, l)}$ with $\iota(j, l) = (\min(j, l) - 1)(2 p - \min(j, l)) / 2 + \max(j, l)$ mapping the matrix row index $j$ and column index $l$ to the corresponding half vectorization indices $\iota(j, l)$.}. The ``diagonal'' parameter $\mat{\gamma}_{j j}$ expresses the conditional log odds of $X_j = 1\mid X_{-j} = \mat{0}$, where the negative subscript in $X_{-j}$ describes the $p - 1$ dimensional vector $X$ with the $j$'t element removed. The ``off diagonal'' parameters $\mat{\gamma}_{j l}$, for $j\neq l$, are equal to the conditional log odds of simultanious occurence $X_j = 1, X_l = 1 \mid X_{-j, -l} = \mat{0}$. More precise, for $j\neq l$, the conditional probabitities and the natural parameters are related by By an abuse of notation, let $\mat{\gamma}_{j l}$ denote the element of $\mat{\gamma}$ corresponding to $\mat{x}_j\mat{x}_l$ in $\vech(\mat{x}\t{\mat{x}})$\footnote{Specifically, the element $\mat{\gamma}_{j l}$ of $\mat{\gamma}$ is a short hand for $\mat{\gamma}_{\iota(j, l)}$ with $\iota(j, l) = (\min(j, l) - 1)(2 p - \min(j, l)) / 2 + \max(j, l)$ mapping the matrix row index $j$ and column index $l$ to the corresponding half vectorization indices $\iota(j, l)$.}. The ``diagonal'' parameter $\mat{\gamma}_{j j}$ expresses the conditional log odds of $X_j = 1\mid X_{-j} = \mat{0}$, where the negative subscript in $X_{-j}$ describes the $p - 1$ dimensional vector $X$ with the $j$th element removed. The ``off diagonal'' parameters $\mat{\gamma}_{j l}$, for $j\neq l$, are equal to the conditional log odds of simultanious occurence $X_j = 1, X_l = 1 \mid X_{-j, -l} = \mat{0}$. More precise, for $j\neq l$, the conditional probabitities and the natural parameters are related by
\begin{align} \begin{align}
\mat{\gamma}_{j j} &= \log\frac{P_{\mat{\gamma}}(X_j = 1\mid X_{-j} = \mat{0})}{1 - P_{\mat{\gamma}}(X_j = 1\mid X_{-j} = \mat{0})}, \nonumber \\ \mat{\gamma}_{j j} &= \log\frac{P_{\mat{\gamma}}(X_j = 1\mid X_{-j} = \mat{0})}{1 - P_{\mat{\gamma}}(X_j = 1\mid X_{-j} = \mat{0})}, \nonumber \\
\mat{\gamma}_{j l} &= \log\frac{1 - P_{\mat{\gamma}}(X_j = 1\mid X_{-j} = \mat{0})P_{\mat{\gamma}}(X_l = 1\mid X_{-l} = \mat{0})}{P_{\mat{\gamma}}(X_j = 1\mid X_{-j} = \mat{0})P_{\mat{\gamma}}(X_l = 1\mid X_{-l} = \mat{0})}\frac{P_{\mat{\gamma}}(X_j = 1, X_l = 1\mid X_{-j, -l} = \mat{0})}{1 - P_{\mat{\gamma}}(X_j = 1, X_l = 1\mid X_{-j, -l} = \mat{0})} \label{eq:ising-two-way-log-odds}. \mat{\gamma}_{j l} &= \log\frac{1 - P_{\mat{\gamma}}(X_j = 1\mid X_{-j} = \mat{0})P_{\mat{\gamma}}(X_l = 1\mid X_{-l} = \mat{0})}{P_{\mat{\gamma}}(X_j = 1\mid X_{-j} = \mat{0})P_{\mat{\gamma}}(X_l = 1\mid X_{-l} = \mat{0})}\frac{P_{\mat{\gamma}}(X_j = 1, X_l = 1\mid X_{-j, -l} = \mat{0})}{1 - P_{\mat{\gamma}}(X_j = 1, X_l = 1\mid X_{-j, -l} = \mat{0})} \label{eq:ising-two-way-log-odds}.
\end{align} \end{align}
Conditional Ising models, incorporating the information of covariates $Y$ into the model, have also been considered \textcite{sparseIsing-ChengEtAt2014, sdr-mixedPredictors-BuraForzaniEtAl2022}. The direct way is to parameterize $\mat{\gamma} = \mat{\gamma}_y$ by the covariate $Y = y$ to model a conditional distribution $P_{\mat{\gamma}_y}(\mat{x}\mid Y = y)$.
Conditional Ising models, incorporating the information of covariates $Y$ into the model, have also been considered \textcite{sparseIsing-ChengEtAt2014, sdr-mixedPredictors-BuraForzaniEtAl2022}. The direct way is to parameterize the parameter $\mat{\gamma} = \mat{\gamma}_y$ by the covariate $Y = y$ to model a conditional distribution $P_{\mat{\gamma}_y}(\mat{x}\mid Y = y)$. We extend the conditional PMF by allowing the binary variables to be tensor values, that is for $\ten{X}\in\{ 0, 1 \}^{p_1\times\cdots\times p_r}$ we set $\mat{x} = \vec{\ten{X}}$, with dimension $p = \prod_{k = 1}^{r}p_k$. Considering the tensor structure of $\ten{X}$ is done by assuming Kronecker product constraints to the parameter vector $\mat{\gamma}_y$ in a similar fashion as for the tensor normal model. This means that we compare the PMF $P_{\mat{\gamma}_y}(\vec{\ten{X}} | Y = y)$ to the quadratic exponential family \eqref{eq:quadratic-exp-fam} with the natural parameters modeled by \eqref{eq:eta1} and \eqref{eq:eta2}. A detail to be considered is that the diagonal of $(\vec{\ten{X}})\t{(\vec{\ten{X}})}$ is equal to $\vec{\ten{X}}$. This gives the GMLM model as
We extend the conditional PMF by allowing the binary variables to be tensor values, that is for $\ten{X}\in\{ 0, 1 \}^{p_1\times\cdots\times p_r}$ we set $\mat{x} = \vec{\ten{X}}$, with dimension $p = \prod_{k = 1}^{r}p_k$. Considering the tensor structure of $\ten{X}$ is done by assuming Kronecker product constraints to the parameter vector $\mat{\gamma}_y$ in a similar fashion as for the tensor normal model. This means that we compair the PMF $P_{\mat{\gamma}_y}(\vec{\ten{X}} | Y = y)$ to the quadratic exponential family \eqref{eq:quadratic-exp-fam} with the natural parameters modeled by \eqref{eq:eta1} and \eqref{eq:eta2}. A detail to be considered is that the diagonal of $(\vec{\ten{X}})\t{(\vec{\ten{X}})}$ is equal to $\vec{\ten{X}}$. This gives the GMLM model as
\begin{align} \begin{align}
P_{\mat{\gamma}_y}(\ten{X} \mid Y = y) P_{\mat{\gamma}_y}(\ten{X} \mid Y = y)
&= p_0(\mat{\gamma}_y)\exp(\t{\vech((\vec{\ten{X}})\t{(\vec{\ten{X}})})}\mat{\gamma}_y) \nonumber \\ &= p_0(\mat{\gamma}_y)\exp(\t{\vech((\vec{\ten{X}})\t{(\vec{\ten{X}})})}\mat{\gamma}_y) \nonumber \\
&= p_0(\mat{\gamma}_y)\exp\Bigl(\Bigl\langle \ten{X}, \ten{F}_y\mlm_{k = 1}^{r}\mat{\beta}_k \Bigr\rangle + \Bigl\langle\ten{X}\mlm_{k = 1}^{r}\mat{\Omega}_k, \ten{X}\Bigr\rangle\Bigr)\label{eq:ising-cond-prob} &= p_0(\mat{\gamma}_y)\exp\Bigl(\Bigl\langle \ten{X}, \ten{F}_y\mlm_{k = 1}^{r}\mat{\beta}_k \Bigr\rangle + \Bigl\langle\ten{X}\mlm_{k = 1}^{r}\mat{\Omega}_k, \ten{X}\Bigr\rangle\Bigr)\label{eq:ising-cond-prob}
\end{align} \end{align}
where we set $\overline{\ten{\eta}} = 0$ and $\mat{T}_2$ to the identity. This is an additional constraint to the model, the reason for $\overline{\ten{\eta}} = 0$ is that the diagonal elements of $\mat{\Omega} = \bigkron_{k = r}^{1}\mat{\Omega}_k$ take the role of $\overline{\ten{\eta}}$, althoug not fully. Having the diagonal of $\mat{\Omega}$ and $\overline{\ten{\eta}}$ handling the self interaction effects might lead to interference in the optimization routine. Another approach would be to use the $\mat{T}_2$ matrix to set the corresponding diagonal elements of $\mat{\Omega}$ to zero and let $\overline{\ten{\eta}}$ handle the self interaction effect. In \cref{sec:chess} we actually use $\mat{T}_2$ to enforce zero constants for specific elments of $\mat{\Omega}$. But for simplicity, we set $\mat{T}_2$ to the identity in the following. All of those approaches, namely setting $\overline{\ten{\eta}} = 0$, keeping $\overline{\ten{\eta}}$ or using $\mat{T}_2$, are theoretically solid and compatible with \cref{thm:grad,thm:param-manifold,thm:asymptotic-normality-gmlm}, assuming all axis dimensions $p_k$ are non-degenerate, that is $p_k > 1$ for all $k = 1, \ldots, r$. Regardles, under our modeling choise we get the relation between the natural parameters $\mat{\gamma}_y$ of the conditional Ising model and the GMLM parameters $\mat{\beta}_k$ and $\mat{\Omega}_k$ as where we set $\overline{\ten{\eta}} = 0$ and $\mat{T}_2$ to the identity. This is an additional constraint to the model, the reason is that the diagonal elements of $\mat{\Omega} = \bigkron_{k = r}^{1}\mat{\Omega}_k$ take the role of $\overline{\ten{\eta}}$, althoug not fully. Having the diagonal of $\mat{\Omega}$ and $\overline{\ten{\eta}}$ handling the self interaction effects might lead to interference in the optimization routine. Another approach would be to use the $\mat{T}_2$ matrix to set the corresponding diagonal elements of $\mat{\Omega}$ to zero and let $\overline{\ten{\eta}}$ handle the self interaction effect. All of those approaches, namely setting $\overline{\ten{\eta}} = 0$, keeping $\overline{\ten{\eta}}$ or using $\mat{T}_2$, are theoretically solid and compatible with \cref{thm:grad,thm:param-manifold,thm:asymptotic-normality-gmlm}, assuming all axis dimensions $p_k$ are non-degenerate, that is $p_k > 1$ for all $k = 1, \ldots, r$. Regardles, under our modeling choise we get the relation between the natural parameters $\mat{\gamma}_y$ of the conditional Ising model and the GMLM parameters $\mat{\beta}_k$ and $\mat{\Omega}_k$ as
\begin{equation}\label{eq:ising-natural-params} \begin{equation}\label{eq:ising-natural-params}
% \t{\pinv{\mat{D}_p}}\mat{\gamma}_y % \t{\pinv{\mat{D}_p}}\mat{\gamma}_y
% = \vec(\mat{\Omega} + \diag(\mat{B}\vec{\ten{F}_y})) % = \vec(\mat{\Omega} + \diag(\mat{B}\vec{\ten{F}_y}))
@ -726,9 +728,10 @@ The same problem arives in gradient optimization. Therefore, before starting the
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Slightly Bigger Dimensions}\label{sec:ising-bigger-dim} \subsubsection{Slightly Bigger Dimensions}\label{sec:ising-bigger-dim}
A big challange with the Ising model is its high computational complexity. The reason is in the sum over all binary vectors of length $p = \prod_{k = 1}^{r}p_k$ in the partition function \eqref{eq:ising-partition-function}. Computing the partition function exactly requires to sum all $2^p$ binary vectors. Small dimensions, like $p\approx 10$, this is easily computed. Increasing the dimension bejond $20$ gets extremely expensive while for dimensions bigger than $30$ its absolutely impossible. Trying to avoid the evaluation of the log-likelihood and only computing its partial gradients via \cref{thm:grad} does not resolve the issue. The gradients require the inverse link, in other words the second moment \eqref{eq:ising-m2}, where, if dropping the scaling factor $p_0$, still involves to sum $2^p$ summands. Basically, with our model, this means that the optimization of the Ising model using exactly computed gradients is impossible for moderately sized problems. A big challenge for the Ising model is its high computational complexity as it involves summing over all binary vectors of length $p = \prod_{k = 1}^{r}p_k$ in the partition function \eqref{eq:ising-partition-function}. Computing the partition function exactly requires to sum all $2^p$ binary vectors. For small dimensions, say $p\approx 10$, this is easily computed. Increasing the dimension beyond $20$ becomes extremely expensive while it is %absolutely
impossible for dimension bigger than $30$. Trying to avoid the evaluation of the log-likelihood and only computing its partial gradients via \cref{thm:grad} does not resolve the issue. The gradients require the inverse link, in other words the second moment \eqref{eq:ising-m2}, where, if dropping the scaling factor $p_0$, still involves to sum $2^p$ summands. Basically, with our model, this means that the optimization of the Ising model using exactly computed gradients is impossible for moderately sized problems.
For estimation of dimensions $p$ bigger than $20$, we use a Monte-Carlo method to estimate the second moment \eqref{eq:ising-m2}, required to compute the partial gradients of the log-likelihood. Specifically, we use a Gibbs-Sampler to sample from the conditional distribution and approximate the second moment in an importance sampling framework. This can be implemented quite efficiently while the estimation accuracy for the second moment is evaluated experimentaly which seems to be very reliable. Simultaniously, we use the same approach to estimate the partition function. %This though, is in comparison inaccurate, and may only be used to get a rough idea of the log-likelihood. Regardles, for our method, we only need the gradient for optimization where appropriate break conditions, not based on the likelihood, lead to a working method for MLE estimation. For estimation of dimensions $p$ bigger than $20$, we use a Monte-Carlo method to estimate the second moment \eqref{eq:ising-m2}, required to compute the partial gradients of the log-likelihood. Specifically, we use a Gibbs-Sampler to sample from the conditional distribution and approximate the second moment in an importance sampling framework. This can be implemented quite efficiently while the estimation accuracy for the second moment is evaluated experimentally which seems to be very reliable. Simultaneously, we use the same approach to estimate the partition function. This though, is in comparison inaccurate, and may only be used to get a rough idea of the log-likelihood. Regardless, for our method, we only need the gradient for optimization where appropriate break conditions, not based on the likelihood, lead to a working method for MLE estimation.
\begin{figure} \begin{figure}
\centering \centering
@ -746,16 +749,16 @@ For estimation of dimensions $p$ bigger than $20$, we use a Monte-Carlo method t
\cref{thm:sdr} identifies the sufficient reduction for the regression of $Y$ on $\ten{X}$ in the population. Any estimation of the sufficient reduction requires application of some optimality criterion. As we operate within the framework of the exponential family, we opted for maximum likelihood estimation (MLE). For the unconstrained problem, where the parameters are simply $\mat{B}$ and $\mat{\Omega}$ in \eqref{eq:eta1-manifold}, maximizing the likelihood of $\ten{X} \mid Y$ is straightforward and yields well-defined MLEs of both parameters. Our setting, though, requires the constrained optimization of the $\ten{X} \mid Y$ likelihood subject to $\mat{B} = \bigotimes_{j = r}^{1}\mat{\beta}_j$ and $\mat{\Omega}=\bigkron_{j = r}^{1}\mat{\Omega}_j$. \Cref{thm:kron-manifolds,thm:param-manifold} provide the setting for which the MLE of the constrained parameter $\mat{\theta}$ is well-defined, which in turn leads to the derivation of its asymptotic normality. \cref{thm:sdr} identifies the sufficient reduction for the regression of $Y$ on $\ten{X}$ in the population. Any estimation of the sufficient reduction requires application of some optimality criterion. As we operate within the framework of the exponential family, we opted for maximum likelihood estimation (MLE). For the unconstrained problem, where the parameters are simply $\mat{B}$ and $\mat{\Omega}$ in \eqref{eq:eta1-manifold}, maximizing the likelihood of $\ten{X} \mid Y$ is straightforward and yields well-defined MLEs of both parameters. Our setting, though, requires the constrained optimization of the $\ten{X} \mid Y$ likelihood subject to $\mat{B} = \bigotimes_{j = r}^{1}\mat{\beta}_j$ and $\mat{\Omega}=\bigkron_{j = r}^{1}\mat{\Omega}_j$. \Cref{thm:kron-manifolds,thm:param-manifold} provide the setting for which the MLE of the constrained parameter $\mat{\theta}$ is well-defined, which in turn leads to the derivation of its asymptotic normality.
The main problem to be solves for asymtotic results of the MLE of the constraint parameter $\mat{\theta} = (\overline{\ten{\eta}}, \vec\mat{B}, \vech\mat{\Omega})$ derives from the nature of the constraint. We assumed that $\mat{B} = \bigkron_{k = r}^{1}\mat{\beta}_k$, where the parameter $\mat{B}$ is identifiable. This means that different values of $\mat{B}$ lead to different densities $f_{\mat{\theta}}(\ten{X}\mid Y = y)$, a basic property needed to ensure consistency of parameter estimates, which in turn is needed for asymptotic normality. On the other hand, the components $\mat{\beta}_j$ for $j = 1, \ldots, r$ are \emph{not} identifyable. A direct consequence of the identity $\mat{\beta}_2\otimes\mat{\beta}_1 = (c\mat{\beta}_2)\otimes (c^{-1}\mat{\beta}_1)$ for every $c\neq 0$. This is the reason we formulated $\Theta$ as a constraint parameter space instead of parameterizing the densities of $\ten{X}\mid Y$ with respect to the components $\mat{\beta}_1, \ldots, \mat{\beta}_r$. The situation for $\mat{\Omega} = \bigkron_{k = r}^{1}\mat{\Omega}_k$ is the same. The main problem in obtaining asymptotic results for the MLE of the constrained parameter $\mat{\theta} = (\overline{\ten{\eta}}, \vec\mat{B}, \vech\mat{\Omega})$ stems from the nature of the constraint. We assumed that $\mat{B} = \bigkron_{k = r}^{1}\mat{\beta}_k$, where the parameter $\mat{B}$ is identifiable. This means that different values of $\mat{B}$ lead to different densities $f_{\mat{\theta}}(\ten{X}\mid Y = y)$, a basic property needed to ensure consistency of parameter estimates, which in turn is needed for asymptotic normality. On the other hand, the components $\mat{\beta}_j$, $j = 1, \ldots, r$, are \emph{not} identifiable, which is a direct consequence of the equality $\mat{\beta}_2\otimes\mat{\beta}_1 = (c\mat{\beta}_2)\otimes (c^{-1}\mat{\beta}_1)$ for every $c\neq 0$. This is the reason we formulated $\Theta$ as a constrained parameter space instead of parameterizing the densities of $\ten{X}\mid Y$ with respect to the components $\mat{\beta}_1, \ldots, \mat{\beta}_r$. The same is true for $\mat{\Omega} = \bigkron_{k = r}^{1}\mat{\Omega}_k$.
In addition to identifyable parameters, asymtotic normality given in \cref{thm:asymptotic-normality-gmlm} requires the notion of differentiation. Therefore, the space itself needs to be able to define differentiation, usually a vector space. This is a too strong assumption for our purposes. To weaken the vector space assumption we considure \emph{smooth manifolds}. Basically, spaces which localy look like Euclidean space and allow the notion of differentiation. The more general \emph{topological} manifolds are too weak for differentiation. To make matters worse, a smooth manifold only allows first derivatives. Without goint into any details, a \emph{Riemannian manifold} is the solution. Similar to an abstract \emph{smooth manifold}, those spaces are detached from our usual intuition as well as complicated to handle in an already complicated setting. This is where an \emph{embedded (sub)manifold} comes to rescue. Simply speaking, an embedded manifold is a manifold which is a subset of a manifold from which it inherits its properties. Now, if a manifold is embedded in Euclidean space, almost all the complication of the abstract manifold theory simplifies drastically. Moreover, since Euclidean space is itself a Riemannian manifold, we inherit the means for higher derivatives. Finally, smooth embedded submanifold structure for the parameter space maintains consistency with existing approaches and results for parameter sets with linear subspace structure. Those reasons are the argument to ensure that the constraint parameter space $\Theta$ is an \emph{smooth embedded submanifold} in an open subset $\Xi$ of Euclidean space. In addition to identifiable parameters, asymptotic normality obtained in \cref{thm:asymptotic-normality-gmlm} requires differentiation. Therefore, the space itself needs to admit defining differentiation, which is usually a vector space. This is too strong an assumption for our purposes. To weaken the vector space assumption we consider \emph{smooth manifolds}. The latter are spaces which look like Euclidean spaces locally and allow the notion of differentiation. The more general \emph{topological} manifolds are too weak for differentiation. To make matters worse, a smooth manifold only allows first derivatives. Without going into details, the solution is a \emph{Riemannian manifold}. Similar to an abstract \emph{smooth manifold}, Riemannian manifolds are detached from our usual intuition as well as complicated to handle in an already complicated setting. This is where an \emph{embedded (sub)manifold} comes to rescue. Simply speaking, an embedded manifold is a manifold which is a subset of a manifold from which it inherits its properties. If a manifold is embedded in a Euclidean space, almost all the complication of the abstract manifold theory simplifies drastically. Moreover, since a Euclidean space is itself a Riemannian manifold, we inherit the means for higher derivatives. Finally, smooth embedded submanifold structure for the parameter space maintains consistency with existing approaches and results for parameter sets with linear subspace structure. These reasons justify the constraint that the parameter space $\Theta$ be an \emph{smooth embedded submanifold} in an open subset $\Xi$ of a Euclidean space.
Now, we diretly define a \emph{smooth manifold} embedded in $\mathbb{R}^p$ without any detours to the more generel theory. See for example \textcite{introToSmoothMani-Lee2012,,introToRiemannianMani-Lee2018,optimMatrixMani-AbsilEtAl2007,aufbauAnalysis-kaltenbaeck2021} among others. Now, we directly define a \emph{smooth manifold} embedded in $\mathbb{R}^p$ without any detours to the more generel theory. See for example \textcite{introToSmoothMani-Lee2012,,introToRiemannianMani-Lee2018,optimMatrixMani-AbsilEtAl2007,aufbauAnalysis-kaltenbaeck2021} among others.
\begin{definition}[Manifolds]\label{def:manifold} \begin{definition}[Manifolds]\label{def:manifold}
A set $\manifold{A}\subseteq\mathbb{R}^p$ is an \emph{embedded smooth manifold} of dimension $d$ if for every $\mat{x}\in\manifold{A}$ there exists a smooth\footnote{Here \emph{smooth} means infinitely differentiable or $C^{\infty}$.} bi-continuous map $\varphi:U\cap\manifold{A}\to V$, called a \emph{chart}, with $\mat{x}\in U\subseteq\mathbb{R}^p$ open and $V\subseteq\mathbb{R}^d$ open. A set $\manifold{A}\subseteq\mathbb{R}^p$ is an \emph{embedded smooth manifold} of dimension $d$ if for every $\mat{x}\in\manifold{A}$ there exists a smooth\footnote{Here \emph{smooth} means infinitely differentiable or $C^{\infty}$.} bi-continuous map $\varphi:U\cap\manifold{A}\to V$, called a \emph{chart}, with $\mat{x}\in U\subseteq\mathbb{R}^p$ open and $V\subseteq\mathbb{R}^d$ open.
\end{definition} \end{definition}
We also need the concept of a \emph{tangent space} to formulate asymtotic normality in a way which is independent of a particular coordinate representation. Intuitively, the tangent space at a point $\mat{x}\in\manifold{A}$ of the manifold $\manifold{A}$ is the hyperspace of all velocity vectors $\t{\nabla\gamma(0)}$ of any curve $\gamma:(-1, 1)\to\manifold{A}$ passing through $\mat{x} = \gamma(0)$, see \cref{fig:torus}. Locally, at $\mat{x} = \gamma(0)$ with a chart $\varphi$ we can written $\gamma(t) = \varphi^{-1}(\varphi(\gamma(t)))$ which gives that $\Span\t{\nabla\gamma(0)} \subseteq \Span\t{\nabla\varphi^{-1}(\varphi(\mat{x}))}$. Taking the union over all smooth curves through $\mat{x}$ gives equality. The following definition leverages the simplified setup of smooth manifolds in Euclidean space. We also need the concept of a \emph{tangent space} to formulate asymptotic normality in a way which is independent of a particular coordinate representation. Intuitively, the tangent space at a point $\mat{x}\in\manifold{A}$ of the manifold $\manifold{A}$ is the hyperspace of all velocity vectors $\t{\nabla\gamma(0)}$ of any curve $\gamma:(-1, 1)\to\manifold{A}$ passing through $\mat{x} = \gamma(0)$, see \cref{fig:torus}. Locally, at $\mat{x} = \gamma(0)$ with a chart $\varphi$ we can written $\gamma(t) = \varphi^{-1}(\varphi(\gamma(t)))$ which gives that $\Span\t{\nabla\gamma(0)} \subseteq \Span\t{\nabla\varphi^{-1}(\varphi(\mat{x}))}$. Taking the union over all smooth curves through $\mat{x}$ gives equality. The following definition leverages the simplified setup of smooth manifolds in Euclidean space.
\begin{definition}[Tangent Space]\label{def:tangent-space} \begin{definition}[Tangent Space]\label{def:tangent-space}
Let $\manifold{A}\subseteq\mathbb{R}^p$ be an embedded smooth manifold and $\mat{x}\in\manifold{A}$. The \emph{tangent space} at $\mat{x}$ of $\manifold{A}$ is defined as Let $\manifold{A}\subseteq\mathbb{R}^p$ be an embedded smooth manifold and $\mat{x}\in\manifold{A}$. The \emph{tangent space} at $\mat{x}$ of $\manifold{A}$ is defined as
@ -776,7 +779,7 @@ We also need the concept of a \emph{tangent space} to formulate asymtotic normal
As a basis to ensure that the constraint parameter space $\Theta$ is a manifold, which is the statement of \cref{thm:param-manifold}, we need \cref{thm:kron-manifolds}. Therefore, we need the notion of a \emph{spherical} set, which is a set $\manifold{A}$, on which the Frobenius norm is constant. That is, $\|\,.\,\|_F:\manifold{A}\to\mathbb{R}$ is constant. Forthermore, we call a scale invariant set $\manifold{A}$ a \emph{cone}, that is $\manifold{A} = \{ c \mat{A} : \mat{A}\in\manifold{A} \}$ for all $c > 0$. As a basis to ensure that the constrained parameter space $\Theta$ is a manifold, which is a requirement of \cref{thm:param-manifold}, we need \cref{thm:kron-manifolds}. Therefore, we need the notion of a \emph{spherical} set, which is a set $\manifold{A}$, on which the Frobenius norm is constant. That is, $\|\,.\,\|_F:\manifold{A}\to\mathbb{R}$ is constant. Forthermore, we call a scale invariant set $\manifold{A}$ a \emph{cone}, that is $\manifold{A} = \{ c \mat{A} : \mat{A}\in\manifold{A} \}$ for all $c > 0$.
\begin{theorem}[Kronecker Product Manifolds]\label{thm:kron-manifolds} \begin{theorem}[Kronecker Product Manifolds]\label{thm:kron-manifolds}
Let $\manifold{A}\subseteq\mathbb{R}^{p_1\times q_1}\backslash\{\mat{0}\}, \manifold{B}\subseteq\mathbb{R}^{p_2\times q_2}\backslash\{\mat{0}\}$ be smooth embedded submanifolds. Assume one of the following conditions holds. Let $\manifold{A}\subseteq\mathbb{R}^{p_1\times q_1}\backslash\{\mat{0}\}, \manifold{B}\subseteq\mathbb{R}^{p_2\times q_2}\backslash\{\mat{0}\}$ be smooth embedded submanifolds. Assume one of the following conditions holds.
@ -796,7 +799,7 @@ As a basis to ensure that the constraint parameter space $\Theta$ is a manifold,
\quad\text{and}\quad \quad\text{and}\quad
\manifold{K}_{\mat{\Omega}} = \Bigl\{ \bigkron_{k = r}^{1}\mat{\Omega}_k : \mat{\Omega}_k\in\manifold{O}_k \Bigr\} \manifold{K}_{\mat{\Omega}} = \Bigl\{ \bigkron_{k = r}^{1}\mat{\Omega}_k : \mat{\Omega}_k\in\manifold{O}_k \Bigr\}
\end{displaymath} \end{displaymath}
where $\manifold{B}_k\subset\mathbb{R}^{p_k\times q_k}\backslash\{\mat{0}\}$ and $\manifold{O}_k\subset\mathbb{R}^{p_k\times p_k}\backslash\{\mat{0}\}$ are smooth embedded manifolds which are ether spheres or cones, for $k = 1, ..., r$. Furthermore, let where $\manifold{B}_k\subset\mathbb{R}^{p_k\times q_k}\backslash\{\mat{0}\}$ and $\manifold{O}_k\subset\mathbb{R}^{p_k\times p_k}\backslash\{\mat{0}\}$ are smooth embedded manifolds which are either spheres or cones, for $k = 1, ..., r$. Furthermore, let
\begin{displaymath} \begin{displaymath}
\manifold{CK}_{\mat{\Omega}} = \{ \vech{\mat{\Omega}} : \mat{\Omega}\in\manifold{K}_{\mat{\Omega}} \land \pinv{(\mat{T}_2\pinv{\mat{D}_p})}\mat{T}_2\pinv{\mat{D}_p}\vec{\mat{\Omega}} = \vec{\mat{\Omega}} \} \manifold{CK}_{\mat{\Omega}} = \{ \vech{\mat{\Omega}} : \mat{\Omega}\in\manifold{K}_{\mat{\Omega}} \land \pinv{(\mat{T}_2\pinv{\mat{D}_p})}\mat{T}_2\pinv{\mat{D}_p}\vec{\mat{\Omega}} = \vec{\mat{\Omega}} \}
\end{displaymath} \end{displaymath}
@ -804,9 +807,9 @@ As a basis to ensure that the constraint parameter space $\Theta$ is a manifold,
\end{theorem} \end{theorem}
\subsection{Matrix Manifolds}\label{sec:matrix-manifolds} \subsection{Matrix Manifolds}\label{sec:matrix-manifolds}
A powerful side effect of \cref{thm:param-manifold} is the modeling flexibility it provides. For example, we can perform low rank regression. Or, we may constrain two-way interactions between direct axis neighbors by using band matrices for the $\mat{\Omega}_k$'s, among others. A powerful side effect of \cref{thm:param-manifold} is the modeling flexibinity it provides. For example, we can perform low rank regression. Or, we may constrain two-way interactions between direct axis neighbors by using band matrices for the $\mat{\Omega}_k$'s, among others.
This flexibility derives from many different matrix manifolds that can be used as building blocks $\manifold{B}_k$ and $\manifold{O}_k$ of the parameter space $\Theta$ in \cref{thm:param-manifold}. A list of possible choices, among others, is given in \cref{tab:matrix-manifolds}. As long as parameters in $\Theta$ are valid paramererization of a density (or PMF) of \eqref{eq:quadratic-exp-fam} subject to \eqref{eq:eta1-manifold} and \eqref{eq:eta2-manifold}, one may choose any of the manifolds listed in \cref{tab:matrix-manifolds} which are ether cones or spherical. We also included an example which is nether a sphere nor a cone. They may also be valid building blocks, but require more work as they are not directly leading to a parameter manifold by \cref{thm:param-manifold}. In case one can show the resulting parameter space $\Theta$ is an embedded manifold, the asymtotic theory of \cref{sec:asymtotics} is applicable. This flexibility derives from many different matrix manifolds that can be used as building blocks $\manifold{B}_k$ and $\manifold{O}_k$ of the parameter space $\Theta$ in \cref{thm:param-manifold}. A list of possible choices, among others, is given in \cref{tab:matrix-manifolds}. As long as parameters in $\Theta$ are valid paramererization of a density (or PMF) of \eqref{eq:quadratic-exp-fam} subject to \eqref{eq:eta1-manifold} and \eqref{eq:eta2-manifold}, one may choose any of the manifolds listed in \cref{tab:matrix-manifolds} which are either cones or spherical. We also included an example which is neither a sphere nor a cone. They may also be valid building blocks, but require more work as they are not directly leading to a parameter manifold by \cref{thm:param-manifold}. In case one can show the resulting parameter space $\Theta$ is an embedded manifold, the asymptotic theory of \cref{sec:asymtotics} is applicable.
\begin{table} \begin{table}
\centering \centering
@ -848,7 +851,7 @@ This flexibility derives from many different matrix manifolds that can be used a
\subsection{Asymptotics}\label{sec:asymtotics} \subsection{Asymptotics}\label{sec:asymtotics}
Let $Z$ be a random variable distributed according to a parameterized probability distribution with density $f_{\mat{\theta_0}}\in\{ f_{\mat{\theta}} : \mat{\theta}\in\Theta \}$ where $\Theta$ is a subset of Euclidean space. We want to estimate the parameter ${\mat{\theta}}_0$ using $n$ i.i.d. (independent and identically distributed) copies of $Z$. We assume a known, real-valued and measurable function $z\mapsto m_{\mat{\theta}}(z)$ for every $\mat{\theta}\in\Theta$ while ${\mat{\theta}}_0$ maximizes the map $\mat{\theta}\mapsto M(\mat{\theta}) = \E m_{\mat{\theta}}(Z)$ uniquely. For the estimation we maximize the empirical version Let $Z$ be a random variable distributed according to a parameterized probability distribution with density $f_{\mat{\theta_0}}\in\{ f_{\mat{\theta}} : \mat{\theta}\in\Theta \}$ where $\Theta$ is a subset of a Euclidean space. We want to estimate the parameter ${\mat{\theta}}_0$ using $n$ i.i.d. (independent and identically distributed) copies of $Z$. We assume a known, real-valued and measurable function $z\mapsto m_{\mat{\theta}}(z)$ for every $\mat{\theta}\in\Theta$ and that ${\mat{\theta}}_0$ is the unique maximizer of the map $\mat{\theta}\mapsto M(\mat{\theta}) = \E m_{\mat{\theta}}(Z)$. For the estimation we maximize the empirical version
\begin{align}\label{eq:Mn} \begin{align}\label{eq:Mn}
M_n(\mat{\theta}) &= \frac{1}{n}\sum_{i = 1}^n m_{\mat{\theta}}(Z_i). M_n(\mat{\theta}) &= \frac{1}{n}\sum_{i = 1}^n m_{\mat{\theta}}(Z_i).
\end{align} \end{align}
@ -856,8 +859,6 @@ An \emph{M-estimator} $\hat{\mat{\theta}}_n = \hat{\mat{\theta}}_n(Z_1, ..., Z_n
\begin{displaymath} \begin{displaymath}
\hat{\mat{\theta}}_n = \argmax_{\mat{\theta}\in\Theta} M_n(\mat{\theta}). \hat{\mat{\theta}}_n = \argmax_{\mat{\theta}\in\Theta} M_n(\mat{\theta}).
\end{displaymath} \end{displaymath}
It is not necessary to have a perfect maximizer, as long as the objective has finite supremum, it is sufficient to take an \emph{almost maximizer} $\hat{\mat{\theta}}_n$ as defined in the following; It is not necessary to have a perfect maximizer, as long as the objective has finite supremum, it is sufficient to take an \emph{almost maximizer} $\hat{\mat{\theta}}_n$ as defined in the following;
\begin{definition}[weak and strong M-estimators] \begin{definition}[weak and strong M-estimators]
@ -879,7 +880,11 @@ It is not necessary to have a perfect maximizer, as long as the objective has fi
\end{theorem} \end{theorem}
The following is a reformulation of \textcite[Lemma~2.3]{asymptoticMLE-BuraEtAl2018} which assumes Condition~2.2 to hold. But the existence of a mapping in Condition~2.2 is not needed for Lemma~2.3. It suffices that the restricted parameter space $\Theta$ is a subset of the unrestricted parameter space $\Xi$, which is trivially satisfied in our setting. Under this, \cref{thm:exists-strong-M-estimator-on-subsets} follows directly from \textcite[Lemma~2.3]{asymptoticMLE-BuraEtAl2018}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Asymptotic Normality}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The following is a reformulation of \textcite[Lemma~2.3]{asymptoticMLE-BuraEtAl2018} which assumes Condition~2.2 to hold. The existence of a mapping in Condition~2.2 is not needed for Lemma~2.3. It suffices that the restricted parameter space $\Theta$ is a subset of the unrestricted parameter space $\Xi$, which is trivially satisfied in our setting. Under this, \cref{thm:exists-strong-M-estimator-on-subsets} follows directly from \textcite[Lemma~2.3]{asymptoticMLE-BuraEtAl2018}.
\begin{theorem}[Existence of strong M-estimators on Subsets]\label{thm:exists-strong-M-estimator-on-subsets} \begin{theorem}[Existence of strong M-estimators on Subsets]\label{thm:exists-strong-M-estimator-on-subsets}
Assume there exists a (weak/strong) M-estimator $\hat{\mat{\xi}}_n$ for $M_n$ over $\Xi$, then there exists a strong M-estimator $\hat{\mat{\theta}}_n$ for $M_n$ over any non-empty $\Theta\subseteq\Xi$. Assume there exists a (weak/strong) M-estimator $\hat{\mat{\xi}}_n$ for $M_n$ over $\Xi$, then there exists a strong M-estimator $\hat{\mat{\theta}}_n$ for $M_n$ over any non-empty $\Theta\subseteq\Xi$.
@ -887,16 +892,16 @@ The following is a reformulation of \textcite[Lemma~2.3]{asymptoticMLE-BuraEtAl2
\begin{theorem}[Existence and Consistency of M-estimators on Subsets]\label{thm:M-estimator-consistency-on-subsets} \begin{theorem}[Existence and Consistency of M-estimators on Subsets]\label{thm:M-estimator-consistency-on-subsets}
Let $\Xi$ be a convex open subset of a Euclidean space and $\Theta\subseteq\Xi$ non-empty. Assume $\mat{\xi}\mapsto m_{\mat{\xi}}(z)$ is a strictly concave function on $\Xi$ for almost all $z$ and $z\mapsto m_{\mat{\xi}}(z)$ is measurable for all $\mat{\xi}\in\Xi$. Furthermore, let $M(\mat{\xi}) = \E m_{\mat{\xi}}(Z)$ be a well defined function with a unique maximizer $\mat{\theta}_0\in\Theta\subseteq\Xi$, that is $M(\mat{\theta}_0) > M(\mat{\xi})$ for all $\mat{\xi}\neq\mat{\theta}_0$. Also, let for every non-empty compact $K\subset\Xi$ Let $\Xi$ be a convex open subset of a Euclidean space and $\Theta\subseteq\Xi$ non-empty. Assume $\mat{\xi}\mapsto m_{\mat{\xi}}(z)$ is a strictly concave function on $\Xi$ for almost all $z$ and $z\mapsto m_{\mat{\xi}}(z)$ is measurable for all $\mat{\xi}\in\Xi$. Let $M(\mat{\xi}) = \E m_{\mat{\xi}}(Z)$ be a well defined function with a unique maximizer $\mat{\theta}_0\in\Theta\subseteq\Xi$; that is, $M(\mat{\theta}_0) > M(\mat{\xi})$ for all $\mat{\xi}\neq\mat{\theta}_0$. Also, assume
\begin{displaymath} \begin{displaymath}
\E\sup_{\mat{\xi}\in K}|m_{\mat{\xi}}(Z)| < \infty. \E\sup_{\mat{\xi}\in K}|m_{\mat{\xi}}(Z)| < \infty,
\end{displaymath} \end{displaymath}
Then, there exists a strong M-estimator $\hat{\mat{\theta}}_n$ of $M_n(\mat{\theta}) = \frac{1}{n}\sum_{i = 1}^{n} m_{\mat{\theta}}(Z_i)$ over the subset $\Theta$. Moreover, any strong M-estimator $\hat{\mat{\theta}}_n$ of $M_n$ over $\Theta$ converges in probability to $\mat{\theta}_0$, that is $\hat{\mat{\theta}}_n\xrightarrow{p}\mat{\theta}_0$. for every non-empty compact $K\subset\Xi$. Then, there exists a strong M-estimator $\hat{\mat{\theta}}_n$ of $M_n(\mat{\theta}) = \frac{1}{n}\sum_{i = 1}^{n} m_{\mat{\theta}}(Z_i)$ over the subset $\Theta$. Moreover, any strong M-estimator $\hat{\mat{\theta}}_n$ of $M_n$ over $\Theta$ converges in probability to $\mat{\theta}_0$, that is $\hat{\mat{\theta}}_n\xrightarrow{p}\mat{\theta}_0$.
\end{theorem} \end{theorem}
\todo{The assumptions of the following can be a bit weakened, is this neccessary? For example the Hessian can be singular but is non-singular constraint to the tangent space. We can also only define $\mat{\theta}\mapsto m_{\mat{\theta}}$ only on the manifold which makes the statement much more technical, but way more general while we need to ensure that every smooth local extention of $\mat{\theta}\mapsto m_{\mat{\theta}}$ yields the same statement, which it does, but well, then it gets more complicated! Maybe add these things as a remark? The even more general formulation for Riemannian Manifolds is definitely over the top!} \todo{The assumptions of the following can be a bit weakened, is this neccessary? For example the Hessian can be singular but is non-singular constrained to the tangent space. We can also only define $\mat{\theta}\mapsto m_{\mat{\theta}}$ only on the manifold which makes the statement much more technical, but way more general while we need to ensure that every smooth local extension of $\mat{\theta}\mapsto m_{\mat{\theta}}$ yields the same statement, which it does, but well, then it gets more complicated! Maybe add these things as a remark? The even more general formulation for Riemannian Manifolds is definitely over the top!}
\begin{theorem}[Asymptotic Normality for M-estimators on Manifolds]\label{thm:M-estimator-asym-normal-on-manifolds} \begin{theorem}[Asymptotic Normality for M-estimators on Manifolds]\label{thm:M-estimator-asym-normal-on-manifolds}
Let $\Theta\subseteq\mathbb{R}^p$ be a smooth embedded manifold. For each $\mat{\theta}$ in a neighborhood in $\mathbb{R}^p$ of the true parameter $\mat{\theta}_0\in\Theta$ let $z\mapsto m_{\mat{\theta}}(z)$ be measurable and $\mat{\theta}\mapsto m_{\mat{\theta}}(z)$ be differentiable at $\mat{\theta}_0$ for almost all $z$. Assume also that there exists a measurable function $u$ such that $\E[u(Z)^2] < \infty$, and for almost all $z$ as well as all $\mat{\theta}_1, \mat{\theta}_2$ in a neighborhood of $\mat{\theta}_0$ such that Let $\Theta\subseteq\mathbb{R}^p$ be a smooth embedded manifold. For each $\mat{\theta}$ in a neighborhood in $\mathbb{R}^p$ of the true parameter $\mat{\theta}_0\in\Theta$ let $z\mapsto m_{\mat{\theta}}(z)$ be measurable and $\mat{\theta}\mapsto m_{\mat{\theta}}(z)$ be differentiable at $\mat{\theta}_0$ for almost all $z$. Assume also that there exists a measurable function $u$ such that $\E[u(Z)^2] < \infty$, and for almost all $z$ as well as all $\mat{\theta}_1, \mat{\theta}_2$ in a neighborhood of $\mat{\theta}_0$ such that
@ -921,15 +926,15 @@ The following is a reformulation of \textcite[Lemma~2.3]{asymptoticMLE-BuraEtAl2
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Simulations} \section{Simulations}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
In this section we provide simulation results for the tensor normal as well as the Ising model where different aspects of the GMLM model are compaired against other methods. The comparison methods are TSIR \textcite{tsir-DingCook2015}, MGCCA \textcite{MGCCA-GirkaEtAl2024} and HOPCA \todo{cite?!} for both continuous and binary data. In case of binary data, the binary values are simply treated as continuous. As a base line we also include classic PCA on vectorized observations. \todo{check, fix, ...} In this section we provide simulation results for the tensor normal as well as the Ising model where different aspects of the GMLM model are compaired against other methods. The comparison methods are tensor Sliced Inverse Regression (TSIR) \textcite{tsir-DingCook2015}, MGCCA \textcite{MGCCA-GirkaEtAl2024} and the Tucker decomposition that is a higher-order form of principal component analysis (HOPCA) \textcite{KoldaBader2009}, for both continuous and binary data. For the latter, the binary values are simply treated as continuous. As a base line we also include classic PCA on vectorized observations. \todo{check, fix, ...}
All experiments are performed with different sample sizes $n = 100, 200, 300, 500$ and $750$. Every experiment is repeated $100$ times. All experiments are performed with different sample sizes $n = 100, 200, 300, 500$ and $750$. Every experiment is repeated $100$ times.
We are interested in the quality of the estimate of the true sufficient reduction $\ten{R}(\ten{X})$ from \cref{thm:sdr}. Therefore, we compair against the true vectorized reduction matrix $\mat{B} = \bigkron_{k = r}^{1}\mat{\beta}_k$, since the vectorized version is compatible with any linear reduction method. The distance $d(\mat{B}, \hat{\mat{B}})$ between $\mat{B}\in\mathbb{R}^{p\times q}$ and an estimate $\hat{\mat{B}}\in\mathbb{R}^{p\times \tilde{q}}$ is the \emph{subspace distance} which is proportional to We are interested in the quality of the estimate of the true sufficient reduction $\ten{R}(\ten{X})$ from \cref{thm:sdr}. Therefore, we compare with the true vectorized reduction matrix $\mat{B} = \bigkron_{k = r}^{1}\mat{\beta}_k$, as it is compatible with any linear reduction method. The distance $d(\mat{B}, \hat{\mat{B}})$ between $\mat{B}\in\mathbb{R}^{p\times q}$ and an estimate $\hat{\mat{B}}\in\mathbb{R}^{p\times \tilde{q}}$ is the \emph{subspace distance} which is proportional to
\begin{displaymath} \begin{displaymath}
d(\mat{B}, \hat{\mat{B}}) \propto \| \mat{B}\pinv{(\t{\mat{B}}\mat{B})}\t{\mat{B}} - \hat{\mat{B}}\pinv{(\t{\hat{\mat{B}}}\hat{\mat{B}})}\t{\hat{\mat{B}}} \|_F. d(\mat{B}, \hat{\mat{B}}) \propto \| \mat{B}\pinv{(\t{\mat{B}}\mat{B})}\t{\mat{B}} - \hat{\mat{B}}\pinv{(\t{\hat{\mat{B}}}\hat{\mat{B}})}\t{\hat{\mat{B}}} \|_F,
\end{displaymath} \end{displaymath}
That is the Frobenius norm of the diffeence between the projections onto the span of $\mat{B}$ and $\hat{\mat{B}}$. The proportionality constant\footnote{Depends on row dimension $p$ and the ranks of $\mat{B}$ and $\hat{\mat{B}}$ given by $(\min(\rank\mat{B} + \rank\hat{\mat{B}}, 2 p - (\rank\mat{B} + \rank\hat{\mat{B}})))^{-1/2}$.} of $d(\mat{B}, \hat{\mat{B}})$ ensures that the subspace distance is in the interval $[0, 1]$. A distance of zero implies equality of the spans, a distance of one means that the subspaces are orthogonal. the Frobenius norm of the difference between the projections onto the span of $\mat{B}$ and $\hat{\mat{B}}$. The proportionality constant\footnote{Depends on row dimension $p$ and the ranks of $\mat{B}$ and $\hat{\mat{B}}$ given by $(\min(\rank\mat{B} + \rank\hat{\mat{B}}, 2 p - (\rank\mat{B} + \rank\hat{\mat{B}})))^{-1/2}$.} of $d(\mat{B}, \hat{\mat{B}})$ ensures that the subspace distance is in the interval $[0, 1]$. A distance of zero implies space overlap, a distance of one means that the subspaces are orthogonal.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Tensor Normal}\label{sec:sim-tensor-normal} \subsection{Tensor Normal}\label{sec:sim-tensor-normal}
@ -1052,7 +1057,7 @@ The results of 1c) are surprising. The GMLM model behaves as expected, clearly b
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Data Analysis} \section{Data Analysis}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
In this section be perform two \todo{realy two!} applications of the GMLM model on real data. First example is the tensor normal model applied to EED data\todo{do this!}. Next, we perform a prove of concept data analysis example for chess. The main purpose of choosing chess is two fold. First, we can ilustrate an explicit use case for the (till now ignored) linear constraint matrix $\mat{T}_2$. Second, its a personal interest of one of authors.\todo{???} In this section be perform two \todo{realy two!} applications of the GMLM model on real data. First example is the tensor normal model applied to EEG data\todo{do this!}. Next, we perform a prove of concept data analysis example for chess. The main purpose of choosing chess is two fold. First, we can ilustrate an explicit use case for the (till now ignored) linear constraint matrix $\mat{T}_2$. Second, its a personal interest of one of authors.\todo{???}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@ -1060,9 +1065,9 @@ In this section be perform two \todo{realy two!} applications of the GMLM model
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Chess}\label{sec:chess} \subsection{Chess}\label{sec:chess}
The data set is provided by the \citetitle{lichess-database}\footnote{\fullcite{lichess-database}}. We downloaded November of 2023 consisting of more than $92$ million games. We removed all games without position evaluations. The evaluations, also denoted scores, are from Stockfish,\footnote{\fullcite{stockfish}} a free and strong chess engine. The scores take the role of the response $Y$ and correspond to a winning probability from whites point of few. Positive scores are good for white and negative scores indicate an advantage for black. We ignore all highlty unballanced positions, which we set to be positions with absolute score above $5$. We also remove all positions with a mate score (one side can force check mate). Finally, we only considure positions with white to move. This leads to a final data set of roughly $190$ million positions, including duplicates. The data set is provided by the \citetitle{lichess-database}\footnote{\fullcite{lichess-database}}. We downloaded November of 2023 consisting of more than $92$ million games. We removed all games without position evaluations. The evaluations, also denoted scores, are from Stockfish,\footnote{\fullcite{stockfish}} a free and strong chess engine. The scores take the role of the response $Y$ and correspond to a winning probability from whites point of few. Positive scores are good for white and negative scores indicate an advantage for black. We ignore all highly unbalanced positions, which we set to be positions with absolute score above $5$. We also remove all positions with a mate score (one side can force check mate). Furthermore, we only consider positions after $10$ half-moves to avoid oversampling the beginning of the most common openings including the start position which is in every game. Finally, we only consider positions with white to move. This leads to a final data set of roughly $64$ million positions, including duplicates.
A chess position is encoded as a 3D binary tensor of dimension $8\times 8\times 12$ giving the predictors $\ten{X}$, see \cref{fig:fen2tensor}. The first two axis encode the squares of a chess board, which is a $8\times 8$ grid. The third axis encodes chess pieces. The $12$ pieces derive from the $6$ types of pieces, namely pawns (\pawn), knights (\knight), bishops (\bishop), queens (\queen) and kings (\king) of two colors, black and white. A chess position is encoded as a set of $12$ binary matrices $\ten{X}_{\mathrm{piece}}$ of dimensions $8\times 8$. Every binary matrix encodes the positioning of a particular piece by containing a $1$ if the piece is present at the corresponding board position. The $12$ pieces derive from the $6$ types of pieces, namely pawns (\pawn), knights (\knight), bishops (\bishop), queens (\queen) and kings (\king) of two colors, black and white. See \cref{fig:fen2tensor} for a visualization.
\begin{figure}[hp!] \begin{figure}[hp!]
\centering \centering
@ -1070,11 +1075,64 @@ A chess position is encoded as a 3D binary tensor of dimension $8\times 8\times
\caption{\label{fig:fen2tensor}The chess start position and its 3D binary tensor representation, empty entries are $0$.} \caption{\label{fig:fen2tensor}The chess start position and its 3D binary tensor representation, empty entries are $0$.}
\end{figure} \end{figure}
Now, we assume that $(\ten{X}, y)$ follows the Ising GMLM model \cref{sec:ising_estimation}. The relation to the scores $y$ is modeled via a cubic relation by the $2\times 2\times 2$ dimensional tensor $\ten{F}_y$ with elements $(\ten{F}_y)_{i j k} = y^{i + j + k - 3}$. We assume that $\ten{X}_{\mathrm{piece}}\mid Y = y$ follows an Ising GMLM model \cref{sec:ising_estimation} with different conditional piece predictors being independent. The independence assumption is for the sake of simplicity even though this is clearly not the case in the underlying true distribution. See \cref{sec:chess-discussion} for a short discussion of improvements. By this simplifying assumption we get a mixture model with the log-likelihood
\begin{displaymath}
l_n(\mat{\theta}) = \frac{1}{12}\sum_{\mathrm{piece}}l_n(\mat{\theta}_{\mathrm{piece}})
\end{displaymath}
where $l_n(\mat{\theta}_{\mathrm{piece}})$ is the Ising GMLM log-likelihood as in \cref{sec:ising_estimation} for $\ten{X}_{\mathrm{piece}}\mid Y = y$. For every component the same relation to the scores $y$ is modeled via a $2\times 2$ dimensional matrix valued function $\ten{F}_y$ consisting of the monomials $1, y, y^2$, specifically $(\ten{F}_y)_{i j} = y^{i + j - 2}$.
By the raw scale of the data, millions of observations, it is computationally infeasible to compute the gradients on the entire data set. Simply using a computationally manageable subset is not an option. Due to the high dimension on binary data, which is $12$ times a $8\times 8$ for every observation giving a total dimension of $768$. The main issue is that a manageable subset, say one million observations, still leads to a degenerate data set. In our simplified mixture model, the pawns are a specific issue as there are multiple millions of different combinations of the $8$ pawns per color on the $6\times 8$ sub grid the pawns can be positioned. This allown does not allow to take a reasonable sized subset for estimation. The solution is to switch from a classic gradient based optimization to a stochastic version. This means that every gradient update uses a new random subset of the entire data set. Therefore, we draw independent random samples form the data consisting of $64$ million positions. The independence of samples derived from the independence of games, and every sample is drawn from a different game.
\paragraph{Validation:}
Given the non-linear nature of the reduction, due to the quadratic matrix valued function $\ten{F}_y$ of the score $y$, we use a \emph{generalized additive model}\footnote{using the function \texttt{gam()} from the \texttt{R} package \texttt{mgcv}.} (GAM) to predict position scores from reduced positions. The reduced positions are $48$ dimensional continuous values by combining the $12$ mixture components from the $2\times 2$ matrix valued reductions per piece. The per piece reduction is
\begin{displaymath}
\ten{R}(\ten{X}_{\mathrm{piece}}) = \mat{\beta}_{1,\mathrm{piece}}(\ten{X}_{\mathrm{piece}} - \E\ten{X}_{\mathrm{piece}})\t{\mat{\beta}_{2, \mathrm{piece}}}
\end{displaymath}
which gives the complete $48$ dimensional vectorized reduction by stacking the piece wise reductions
\begin{displaymath}
\vec{\ten{R}(\ten{X}})
= (\vec{\ten{R}(\ten{X}_{\text{white pawn}})}, \ldots, \vec{\ten{R}(\ten{X}_{\text{black king}})})
= \t{\mat{B}}\vec(\ten{X} - \E\ten{X}).
\end{displaymath}
The second line encodes all the piece wise reductions in a block diagonal full reduction matrix $\mat{B}$ of dimension $768\times 48$ which is applied to the vectorized 3D tensor $\ten{X}$ combining all the piece components $\ten{X}_{\mathrm{piece}}$ into a single tensor of dimension $8\times 8\times 12$. This is a reduction to $6.25\%$ of the original dimension. The $R^2$ statistic of the GAM fitted on $10^5$ new reduced samples is $R^2_{gam}\approx 42\%$. A linear model on the reduced data achieves $R^2_{lm}\approx 25\%$ which clearly shows the non-linear relation. On the other hand, the static evaluation of the \emph{Schach H\"ornchen}\todo{ref/mention/?!?} engine, given the full position (\emph{not} reduced), achieves an $R^2_{hce}\approx 51\%$. The $42\%$ are reasonably well comparied to $51\%$ of the engine static evaluation which gets the original position and uses chess specific expect knowledge. Features the static evaluation includes, which are expected to be learned by the GMLM mixture model, are; \emph{material} (piece values) and \emph{piece square tables} (PSQT, preferred piece type positions). In addition, the static evaluation includes chess specific features like \emph{king safety}, \emph{pawn structure} or \emph{rooks on open files}. This lets us conclude that the reduction captures most of the relevant features possible, given the oversimplified modeling we performed.
\paragraph{Interpretation:} For a compact interpretation of the estimated reduction we construct PSQTs. To do so we use the linear model from the validation section. Then, we rewrite the combined linear reduction and linear model in terms of PSQTs. Let $\mat{B}$ be the $768\times 48$ full vectorized linear reduction. This is the block diagonal matrix with the $64\times 4$ dimensional per piece reductions $\mat{B}_{\mathrm{piece}} = \mat{\beta}^{\mathrm{piece}}_2\otimes\mat{\beta}^{\mathrm{piece}}_1$. Then, the linear model with coefficients $\mat{b}$ and intercept $a$ on the reduced data is given by
\begin{equation}\label{eq:chess-lm}
y = a + \t{\mat{b}}\t{\mat{B}}\vec(\ten{X} - \E\ten{X}) + \epsilon
\end{equation}
with an unknown mean zero error term $\epsilon$ and treating the binary tensor $\ten{X}$ as continuous. Decomposing the linear model coefficients into blocks of $4$ gives per piece coefficients $\mat{b}_{\mathrm{piece}}$ which combine with the diagonal blocks $\mat{B}_{\mathrm{piece}}$ of $\mat{B}$ only. Rewriting \eqref{eq:chess-lm} gives
\begin{align*}
y
&= a + \sum_{\mathrm{piece}}\t{(\mat{B}_{\mathrm{piece}}\mat{b}_{\mathrm{piece}})}\vec(\ten{X}_{\mathrm{piece}} - \E\ten{X}_{\mathrm{piece}}) + \epsilon \\
&= \tilde{a} + \sum_{\mathrm{piece}}\langle
\mat{B}_{\mathrm{piece}}\mat{b}_{\mathrm{piece}},
\vec(\ten{X}_{\mathrm{piece}})
\rangle + \epsilon
\end{align*}
with a new intercept term $\tilde{a}$, which is of no interest to us. Finally, we enforce a color symmetry, using known mechanism from chess engines. Specifically, mirroring the position changes the sign of the score $y$. Here, mirroring reverses the rank (row) order, this is the image in a mirror behind a chess board. Let for every $\mat{C}_{\mathrm{piece}}$ be a $8\times 8$ matrix with elements $(\mat{C}_{\mathrm{piece}})_{i j} = (\mat{B}_{\mathrm{piece}}\mat{b}_{\mathrm{piece}})_{i + 8 (j - 1)}$. And denote with $\mat{M}(\mat{A})$ the matrix mirror operation which reverses the row order of a matrix. Using this new notation allows to enforcing this symmetry leading to the new approximate linear relation
\begin{align*}
y &= \tilde{a} + \sum_{\mathrm{piece}}\langle
\mat{C}_{\mathrm{piece}},
\ten{X}_{\mathrm{piece}}
\rangle + \epsilon \\
&\approx \tilde{a} + \sum_{\text{piece type}}\frac{1}{2}\langle
\mat{C}_{\text{white piece}} - \mat{M}(\mat{C}_{\text{black piece}}),
\ten{X}_{\text{white piece}} - \mat{M}(\ten{X}_{\text{white piece}})
\rangle + \epsilon
\end{align*}
If for every piece type ($6$ types, \emph{not} distinguishing between color) holds $\mat{C}_{\text{white piece}} = -\mat{M}(\mat{C}_{\text{black piece}})$, then we have equality. In our case this is valid given that the estimates $\hat{\mat{C}}_{\mathrm{piece}}$ fulfill this property with a small error. The $6$ matrices $(\mat{C}_{\text{white piece}} - \mat{M}(\mat{C}_{\text{black piece}})) / 2$ are called \emph{piece square tables} (PSQT) which are visualized in \cref{fig:psqt}. The interpretation of those tables is straight forward. A high positive values (blue) means that it is usually good to have a piece of the corresponding type on that square while a high negative value (red) means the opposite. It needs to be considered that the PSQTs are for quiet positions only, that means all pieces are save in the sense that there is no legal capturing moves nore is the king in check.
\begin{figure}[hp!]
\centering
\includegraphics[width = \textwidth]{plots/psqt.pdf}
\caption{\label{fig:psqt}Extracted PSQTs (piece square tables) from the chess example GMLM reduction.}
\end{figure}
The first visual effect in \cref{fig:psqt} is the dark blue PSQT of the Queen followed by a not so dark Rook PSQT. This indicated that the Queen, followed by the Rook, are the most value pieces (after the king, but a king piece value makes no sense). The next two are the Knight and Bishop which have higher value than the Pawns, ignoring the $6$th and $7$th rank as this makes the pawns a potential queen. This is the classic piece value order known in chess.
Next, goint one by one through the PSQTs, a few words about the prefered positions for every piece type. The pawn positions are specifically good on the $6$th and especially on the $7$th rank as this threatens a promotion to a Queen (or Knight, Bishop, Rook). The Knight PSQT is a bit surprising, the most likely explanation for the knight being good in the enemy territory is that it got there by capturing an enemy piece for free. A common occurency in low rated games which is a big chunk of the training data, ranging over all levels. The Bishops sem to have no specific prefered placement, only a slight higher overall value than pawns, excluding pawns iminent of a promotion. Continuing with the rooks, we see that the rook is a good attacking piece, indicated by a save rook infiltration. The Queen is powerfull almost everywhere, only the outer back rank squares (lower left and right) tend to reduce her value. This is rooted in the queen being there is a sign for being pushed by enemy pieces. Leading to a lot of squares being controled by the enemy hindering one own movement. Finally, the king, given the goal of the game is to checkmate the king, a save position for the king is very valuable. This is seen by the back rank (rank $1$) being the only non-penalized squares. Furthermore, the most save squares are the castling target squares ($g1$ and $c1$) as well as the $b1$ square. Shifting the king over to $b1$ is quite common protecting the $a2$ pawn providing a complete protected pawn shield infront of the king.
Due to the rules of chess, given any encoded legal position $\ten{X}$, there are some elements which are never set. This are the elements corresponding to pawns on the 1st and 8th rank. This implies that the matrix $\mat{T}_2$ in \eqref{eq:t-stat} under model \eqref{eq:quadratic-exp-fam} can \emph{not} be the identity as, for simplicity, assumed in \cref{sec:ising_estimation}. The addaptation in this case is simple. We only need to enforce the corresponding diagonal elements in $\mat{\Omega}$ to be zero, which is a linear operation. The only difference in the estimation procedure is to use $\ten{G}_2$ in \cref{thm:grad} for gradient computations. This can be extended to incorporate additional constraints into the model derived from the game of chess. First, all interactions of pieces with pawns on the 1st or 8th rank are impossible. Next, there is always exactly one king per color on the board and they can not be on neighboring squares. Finally, only one piece can occupy a square. All those conditions can be writen as a linear constraint on $\mat{\Omega}$ by setting the corresponding entries in $\mat{\Omega}$ to zero.
By the raw scale of the data, millions of observations, it is computationaly infeasible to compute the gradients on the entire data set. Simply using a computationaly managable subset is not an option. The problem is that the Ising model is of dimension $8\cdot 8\cdot 12 = 768$ which requires an absurd amount of data to get a reasonable picture of the interaction effects. The solution is to switch from a classic gradient based optimization to a stochastic version. This means that every gradient update uses a new random subset of the entire data set. Therefore, we draw independent random samples of the data. The independence of samples derived from the independence of games, and every sample is drawn from a different game.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\appendix \appendix

View File

@ -1,6 +1,6 @@
sample.size time.gmlm dist.subspace.gmlm dist.projection.gmlm time.pca dist.subspace.pca dist.projection.pca time.hopca dist.subspace.hopca dist.projection.hopca time.tsir dist.subspace.tsir dist.projection.tsir time.mgcca dist.subspace.mgcca dist.projection.mgcca sample.size rep time.gmlm dist.subspace.gmlm dist.projection.gmlm time.pca dist.subspace.pca dist.projection.pca time.hopca dist.subspace.hopca dist.projection.hopca time.tsir dist.subspace.tsir dist.projection.tsir time.mgcca dist.subspace.mgcca dist.projection.mgcca
100 0.03385 0.2433541701 0.2433541701 0.00226 0.863261257 0.863261257 0.00132 0.961234054 0.961234054 0.01148 0.27002415 0.27002415 0.02002 0.726356935 0.726356935 100 50.5 0.03385 0.2433541701 0.2433541701 0.00226 0.863261257 0.863261257 0.00132 0.961234054 0.961234054 0.01148 0.27002415 0.27002415 0.02002 0.726356935 0.726356935
200 0.05152 0.1649899379 0.1649899379 0.00217 0.852767026 0.852767026 0.00266 0.960479878 0.960479878 0.01373 0.1884941138 0.1884941138 0.01991 0.713602535 0.713602535 200 50.5 0.05152 0.1649899379 0.1649899379 0.00217 0.852767026 0.852767026 0.00266 0.960479878 0.960479878 0.01373 0.1884941138 0.1884941138 0.01991 0.713602535 0.713602535
300 0.06495 0.1336658703 0.1336658703 0.00486 0.843714531 0.843714531 0.01064 0.958630831 0.958630831 0.01328 0.1526304709 0.1526304709 0.02 0.71557788 0.71557788 300 50.5 0.06495 0.1336658703 0.1336658703 0.00486 0.843714531 0.843714531 0.01064 0.958630831 0.958630831 0.01328 0.1526304709 0.1526304709 0.02 0.71557788 0.71557788
500 0.13549 0.1032317816 0.1032317816 0.00938 0.846591187 0.846591187 0.01447 0.959566069 0.959566069 0.01722 0.1208618464 0.1208618464 0.05966 0.713613799 0.713613799 500 50.5 0.13549 0.1032317816 0.1032317816 0.00938 0.846591187 0.846591187 0.01447 0.959566069 0.959566069 0.01722 0.1208618464 0.1208618464 0.05966 0.713613799 0.713613799
750 0.19323 0.0920445927 0.0920445927 0.00751 0.843049644 0.843049644 0.02065 0.95998194 0.95998194 0.02132 0.1040318623 0.1040318623 0.06884 0.708925464 0.708925464 750 50.5 0.19323 0.0920445927 0.0920445927 0.00751 0.843049644 0.843049644 0.02065 0.95998194 0.95998194 0.02132 0.1040318623 0.1040318623 0.06884 0.708925464 0.708925464

1 sample.size time.gmlm dist.subspace.gmlm dist.projection.gmlm time.pca dist.subspace.pca dist.projection.pca time.hopca dist.subspace.hopca dist.projection.hopca time.tsir dist.subspace.tsir dist.projection.tsir time.mgcca dist.subspace.mgcca dist.projection.mgcca sample.size rep time.gmlm dist.subspace.gmlm dist.projection.gmlm time.pca dist.subspace.pca dist.projection.pca time.hopca dist.subspace.hopca dist.projection.hopca time.tsir dist.subspace.tsir dist.projection.tsir time.mgcca dist.subspace.mgcca dist.projection.mgcca
2 100 0.03385 0.2433541701 0.2433541701 0.00226 0.863261257 0.863261257 0.00132 0.961234054 0.961234054 0.01148 0.27002415 0.27002415 0.02002 0.726356935 0.726356935 100 50.5 0.03385 0.2433541701 0.2433541701 0.00226 0.863261257 0.863261257 0.00132 0.961234054 0.961234054 0.01148 0.27002415 0.27002415 0.02002 0.726356935 0.726356935
3 200 0.05152 0.1649899379 0.1649899379 0.00217 0.852767026 0.852767026 0.00266 0.960479878 0.960479878 0.01373 0.1884941138 0.1884941138 0.01991 0.713602535 0.713602535 200 50.5 0.05152 0.1649899379 0.1649899379 0.00217 0.852767026 0.852767026 0.00266 0.960479878 0.960479878 0.01373 0.1884941138 0.1884941138 0.01991 0.713602535 0.713602535
4 300 0.06495 0.1336658703 0.1336658703 0.00486 0.843714531 0.843714531 0.01064 0.958630831 0.958630831 0.01328 0.1526304709 0.1526304709 0.02 0.71557788 0.71557788 300 50.5 0.06495 0.1336658703 0.1336658703 0.00486 0.843714531 0.843714531 0.01064 0.958630831 0.958630831 0.01328 0.1526304709 0.1526304709 0.02 0.71557788 0.71557788
5 500 0.13549 0.1032317816 0.1032317816 0.00938 0.846591187 0.846591187 0.01447 0.959566069 0.959566069 0.01722 0.1208618464 0.1208618464 0.05966 0.713613799 0.713613799 500 50.5 0.13549 0.1032317816 0.1032317816 0.00938 0.846591187 0.846591187 0.01447 0.959566069 0.959566069 0.01722 0.1208618464 0.1208618464 0.05966 0.713613799 0.713613799
6 750 0.19323 0.0920445927 0.0920445927 0.00751 0.843049644 0.843049644 0.02065 0.95998194 0.95998194 0.02132 0.1040318623 0.1040318623 0.06884 0.708925464 0.708925464 750 50.5 0.19323 0.0920445927 0.0920445927 0.00751 0.843049644 0.843049644 0.02065 0.95998194 0.95998194 0.02132 0.1040318623 0.1040318623 0.06884 0.708925464 0.708925464

View File

@ -1,6 +1,6 @@
sample.size rep time.gmlm dist.subspace.gmlm time.pca dist.subspace.pca time.hopca dist.subspace.hopca time.tsir dist.subspace.tsir time.mgcca dist.subspace.mgcca sample.size rep time.gmlm dist.subspace.gmlm time.pca dist.subspace.pca time.hopca dist.subspace.hopca time.tsir dist.subspace.tsir time.mgcca dist.subspace.mgcca
100 50.5 0.53147 0.225507724 0.00215 0.268624529 0.00122 0.268631852 0.00953 0.975552785 0.01743 0.268634801 100 50.5 0.53037 0.225507724 0.00225 0.268624529 0.0012 0.268631852 0.00927 0.975552785 0.01707 0.268634801
200 50.5 0.93729 0.1462304967 0.00228 0.268618192 0.00255 0.268627434 0.01032 0.969086651 0.01508 0.268616289 200 50.5 0.92916 0.1462304967 0.00229 0.268618192 0.00247 0.268627434 0.01055 0.969086651 0.0149 0.268616289
300 50.5 1.25449 0.118119137 0.0047 0.268672296 0.00937 0.268679546 0.01236 0.937344184 0.01812 0.268678444 300 50.5 1.28251 0.118119137 0.00404 0.268672296 0.01078 0.268679546 0.01247 0.937344184 0.0181 0.268678444
500 50.5 2.61149 0.0925056109 0.00598 0.268620612 0.01592 0.268627137 0.01491 0.920788974 0.04995 0.268609548 500 50.5 2.50673 0.0925056109 0.00548 0.268620612 0.01687 0.268627137 0.01547 0.920788974 0.04591 0.268609548
750 50.5 3.62933 0.0734930824 0.00608 0.268629405 0.0213 0.26863585 0.01911 0.895542158 0.05447 0.268624504 750 50.5 3.78846 0.0734930824 0.00654 0.268629405 0.02286 0.26863585 0.0202 0.895542158 0.05844 0.268624504

1 sample.size rep time.gmlm dist.subspace.gmlm time.pca dist.subspace.pca time.hopca dist.subspace.hopca time.tsir dist.subspace.tsir time.mgcca dist.subspace.mgcca
2 100 50.5 0.53147 0.225507724 0.00215 0.268624529 0.00122 0.268631852 0.00953 0.975552785 0.01743 0.268634801 100 50.5 0.53037 0.225507724 0.00225 0.268624529 0.0012 0.268631852 0.00927 0.975552785 0.01707 0.268634801
3 200 50.5 0.93729 0.1462304967 0.00228 0.268618192 0.00255 0.268627434 0.01032 0.969086651 0.01508 0.268616289 200 50.5 0.92916 0.1462304967 0.00229 0.268618192 0.00247 0.268627434 0.01055 0.969086651 0.0149 0.268616289
4 300 50.5 1.25449 0.118119137 0.0047 0.268672296 0.00937 0.268679546 0.01236 0.937344184 0.01812 0.268678444 300 50.5 1.28251 0.118119137 0.00404 0.268672296 0.01078 0.268679546 0.01247 0.937344184 0.0181 0.268678444
5 500 50.5 2.61149 0.0925056109 0.00598 0.268620612 0.01592 0.268627137 0.01491 0.920788974 0.04995 0.268609548 500 50.5 2.50673 0.0925056109 0.00548 0.268620612 0.01687 0.268627137 0.01547 0.920788974 0.04591 0.268609548
6 750 50.5 3.62933 0.0734930824 0.00608 0.268629405 0.0213 0.26863585 0.01911 0.895542158 0.05447 0.268624504 750 50.5 3.78846 0.0734930824 0.00654 0.268629405 0.02286 0.26863585 0.0202 0.895542158 0.05844 0.268624504

View File

@ -1,6 +1,6 @@
sample.size time.gmlm dist.subspace.gmlm dist.projection.gmlm time.pca dist.subspace.pca dist.projection.pca time.hopca dist.subspace.hopca dist.projection.hopca time.tsir dist.subspace.tsir dist.projection.tsir time.mgcca dist.subspace.mgcca dist.projection.mgcca sample.size rep time.gmlm dist.subspace.gmlm dist.projection.gmlm time.pca dist.subspace.pca dist.projection.pca time.hopca dist.subspace.hopca dist.projection.hopca time.tsir dist.subspace.tsir dist.projection.tsir time.mgcca dist.subspace.mgcca dist.projection.mgcca
100 0.02532 0.736796092 1 0.00209 0.900231255 0.999317475 0.00057 0.844797856 1 0.01269 0.768475572 1 0.03429 0.801702356 0.996195093 100 50.5 0.02532 0.736796092 1 0.00209 0.900231255 0.999317475 0.00057 0.844797856 1 0.01269 0.768475572 1 0.03429 0.801702356 0.996195093
200 0.02118 0.723026339 1 0.00202 0.897808634 0.999511768 0.00076 0.821791902 1 0.01143 0.729274976 1 0.03156 0.754417729 0.993998159 200 50.5 0.02118 0.723026339 1 0.00202 0.897808634 0.999511768 0.00076 0.821791902 1 0.01143 0.729274976 1 0.03156 0.754417729 0.993998159
300 0.02073 0.719268149 1 0.00253 0.896381124 0.999790203 0.00086 0.810580445 1 0.0108 0.723298879 1 0.03314 0.730024383 0.994609905 300 50.5 0.02073 0.719268149 1 0.00253 0.896381124 0.999790203 0.00086 0.810580445 1 0.0108 0.723298879 1 0.03314 0.730024383 0.994609905
500 0.06797 0.714404844 1 0.00556 0.894836313 0.999794819 0.00472 0.806682723 1 0.01003 0.719358531 1 0.08743 0.70385003 0.990669984 500 50.5 0.06797 0.714404844 1 0.00556 0.894836313 0.999794819 0.00472 0.806682723 1 0.01003 0.719358531 1 0.08743 0.70385003 0.990669984
750 0.08089 0.712524515 1 0.00652 0.894548046 0.999845024 0.00646 0.800351207 1 0.011 0.716469876 1 0.10204 0.687227594 0.991626558 750 50.5 0.08089 0.712524515 1 0.00652 0.894548046 0.999845024 0.00646 0.800351207 1 0.011 0.716469876 1 0.10204 0.687227594 0.991626558

1 sample.size time.gmlm dist.subspace.gmlm dist.projection.gmlm time.pca dist.subspace.pca dist.projection.pca time.hopca dist.subspace.hopca dist.projection.hopca time.tsir dist.subspace.tsir dist.projection.tsir time.mgcca dist.subspace.mgcca dist.projection.mgcca sample.size rep time.gmlm dist.subspace.gmlm dist.projection.gmlm time.pca dist.subspace.pca dist.projection.pca time.hopca dist.subspace.hopca dist.projection.hopca time.tsir dist.subspace.tsir dist.projection.tsir time.mgcca dist.subspace.mgcca dist.projection.mgcca
2 100 0.02532 0.736796092 1 0.00209 0.900231255 0.999317475 0.00057 0.844797856 1 0.01269 0.768475572 1 0.03429 0.801702356 0.996195093 100 50.5 0.02532 0.736796092 1 0.00209 0.900231255 0.999317475 0.00057 0.844797856 1 0.01269 0.768475572 1 0.03429 0.801702356 0.996195093
3 200 0.02118 0.723026339 1 0.00202 0.897808634 0.999511768 0.00076 0.821791902 1 0.01143 0.729274976 1 0.03156 0.754417729 0.993998159 200 50.5 0.02118 0.723026339 1 0.00202 0.897808634 0.999511768 0.00076 0.821791902 1 0.01143 0.729274976 1 0.03156 0.754417729 0.993998159
4 300 0.02073 0.719268149 1 0.00253 0.896381124 0.999790203 0.00086 0.810580445 1 0.0108 0.723298879 1 0.03314 0.730024383 0.994609905 300 50.5 0.02073 0.719268149 1 0.00253 0.896381124 0.999790203 0.00086 0.810580445 1 0.0108 0.723298879 1 0.03314 0.730024383 0.994609905
5 500 0.06797 0.714404844 1 0.00556 0.894836313 0.999794819 0.00472 0.806682723 1 0.01003 0.719358531 1 0.08743 0.70385003 0.990669984 500 50.5 0.06797 0.714404844 1 0.00556 0.894836313 0.999794819 0.00472 0.806682723 1 0.01003 0.719358531 1 0.08743 0.70385003 0.990669984
6 750 0.08089 0.712524515 1 0.00652 0.894548046 0.999845024 0.00646 0.800351207 1 0.011 0.716469876 1 0.10204 0.687227594 0.991626558 750 50.5 0.08089 0.712524515 1 0.00652 0.894548046 0.999845024 0.00646 0.800351207 1 0.011 0.716469876 1 0.10204 0.687227594 0.991626558

View File

@ -1,12 +1,22 @@
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Human Crafted Evaluation
HCE <- function(positions) {
.Call(`_Rchess_HCE`, positions)
}
#' Specialized version of `read_cyclic.cpp` taylored to work in conjunction with #' Specialized version of `read_cyclic.cpp` taylored to work in conjunction with
#' `gmlm_chess()` as data generator to provide random draws from a FEN data set #' `gmlm_chess()` as data generator to provide random draws from a FEN data set
#' with scores filtered to be in in the range `score_min` to `score_max`. #' with scores filtered to be in in the range `score_min` to `score_max`.
#' #'
data.gen <- function(file, sample_size, score_min = -5.0, score_max = +5.0, quiet = FALSE) { data.gen <- function(file, sample_size, score_min = -5.0, score_max = +5.0, quiet = FALSE, min_ply_count = 10L) {
.Call(`_Rchess_data_gen`, file, sample_size, score_min, score_max, quiet) .Call(`_Rchess_data_gen`, file, sample_size, score_min, score_max, quiet, min_ply_count)
}
#' Human Crafted Evaluation
eval.psqt <- function(positions, psqt) {
.Call(`_Rchess_eval_psqt`, positions, psqt)
} }
#' Convert a legal FEN string to a 3D binary (integer with 0-1 entries) array #' Convert a legal FEN string to a 3D binary (integer with 0-1 entries) array

View File

@ -11,9 +11,19 @@ Rcpp::Rostream<true>& Rcpp::Rcout = Rcpp::Rcpp_cout_get();
Rcpp::Rostream<false>& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get(); Rcpp::Rostream<false>& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get();
#endif #endif
// HCE
Rcpp::NumericVector HCE(const std::vector<Board>& positions);
RcppExport SEXP _Rchess_HCE(SEXP positionsSEXP) {
BEGIN_RCPP
Rcpp::RObject rcpp_result_gen;
Rcpp::traits::input_parameter< const std::vector<Board>& >::type positions(positionsSEXP);
rcpp_result_gen = Rcpp::wrap(HCE(positions));
return rcpp_result_gen;
END_RCPP
}
// data_gen // data_gen
Rcpp::CharacterVector data_gen(const std::string& file, const int sample_size, const float score_min, const float score_max, const bool quiet); Rcpp::CharacterVector data_gen(const std::string& file, const int sample_size, const float score_min, const float score_max, const bool quiet, const int min_ply_count);
RcppExport SEXP _Rchess_data_gen(SEXP fileSEXP, SEXP sample_sizeSEXP, SEXP score_minSEXP, SEXP score_maxSEXP, SEXP quietSEXP) { RcppExport SEXP _Rchess_data_gen(SEXP fileSEXP, SEXP sample_sizeSEXP, SEXP score_minSEXP, SEXP score_maxSEXP, SEXP quietSEXP, SEXP min_ply_countSEXP) {
BEGIN_RCPP BEGIN_RCPP
Rcpp::RObject rcpp_result_gen; Rcpp::RObject rcpp_result_gen;
Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::RNGScope rcpp_rngScope_gen;
@ -22,7 +32,19 @@ BEGIN_RCPP
Rcpp::traits::input_parameter< const float >::type score_min(score_minSEXP); Rcpp::traits::input_parameter< const float >::type score_min(score_minSEXP);
Rcpp::traits::input_parameter< const float >::type score_max(score_maxSEXP); Rcpp::traits::input_parameter< const float >::type score_max(score_maxSEXP);
Rcpp::traits::input_parameter< const bool >::type quiet(quietSEXP); Rcpp::traits::input_parameter< const bool >::type quiet(quietSEXP);
rcpp_result_gen = Rcpp::wrap(data_gen(file, sample_size, score_min, score_max, quiet)); Rcpp::traits::input_parameter< const int >::type min_ply_count(min_ply_countSEXP);
rcpp_result_gen = Rcpp::wrap(data_gen(file, sample_size, score_min, score_max, quiet, min_ply_count));
return rcpp_result_gen;
END_RCPP
}
// eval_psqt
Rcpp::NumericVector eval_psqt(const std::vector<Board>& positions, const std::vector<Rcpp::NumericMatrix>& psqt);
RcppExport SEXP _Rchess_eval_psqt(SEXP positionsSEXP, SEXP psqtSEXP) {
BEGIN_RCPP
Rcpp::RObject rcpp_result_gen;
Rcpp::traits::input_parameter< const std::vector<Board>& >::type positions(positionsSEXP);
Rcpp::traits::input_parameter< const std::vector<Rcpp::NumericMatrix>& >::type psqt(psqtSEXP);
rcpp_result_gen = Rcpp::wrap(eval_psqt(positions, psqt));
return rcpp_result_gen; return rcpp_result_gen;
END_RCPP END_RCPP
} }
@ -173,7 +195,9 @@ END_RCPP
} }
static const R_CallMethodDef CallEntries[] = { static const R_CallMethodDef CallEntries[] = {
{"_Rchess_data_gen", (DL_FUNC) &_Rchess_data_gen, 5}, {"_Rchess_HCE", (DL_FUNC) &_Rchess_HCE, 1},
{"_Rchess_data_gen", (DL_FUNC) &_Rchess_data_gen, 6},
{"_Rchess_eval_psqt", (DL_FUNC) &_Rchess_eval_psqt, 2},
{"_Rchess_fen2int", (DL_FUNC) &_Rchess_fen2int, 1}, {"_Rchess_fen2int", (DL_FUNC) &_Rchess_fen2int, 1},
{"_Rchess_read_cyclic", (DL_FUNC) &_Rchess_read_cyclic, 5}, {"_Rchess_read_cyclic", (DL_FUNC) &_Rchess_read_cyclic, 5},
{"_Rchess_sample_move", (DL_FUNC) &_Rchess_sample_move, 1}, {"_Rchess_sample_move", (DL_FUNC) &_Rchess_sample_move, 1},

View File

@ -19,7 +19,8 @@ Rcpp::CharacterVector data_gen(
const int sample_size, const int sample_size,
const float score_min = -5.0, const float score_min = -5.0,
const float score_max = +5.0, const float score_max = +5.0,
const bool quiet = false const bool quiet = false,
const int min_ply_count = 10
) { ) {
// Check parames // Check parames
if (sample_size < 1) { if (sample_size < 1) {
@ -101,18 +102,12 @@ Rcpp::CharacterVector data_gen(
Rcpp::stop("Retry count exceeded after illegal FEN '%s'", fen); Rcpp::stop("Retry count exceeded after illegal FEN '%s'", fen);
} }
// Filter white to move positions // Reject / Filter samples
if (pos.sideToMove() == piece::black) { if (((int)pos.plyCount() < min_ply_count) // Filter early positions
reject_count++; || (pos.sideToMove() == piece::black) // Filter white to move positions
continue; || (score < score_min || score_max <= score) // filter scores out of slice
} || (quiet && pos.isQuiet())) // filter quiet positions (iff requested)
// filter scores out of slice {
if (score < score_min || score_max <= score) {
reject_count++;
continue;
}
// filter quiet positions (iff requested)
if (quiet && pos.isQuiet()) {
reject_count++; reject_count++;
continue; continue;
} }

View File

@ -1,10 +1,13 @@
options(keep.source = TRUE, keep.source.pkgs = TRUE)
library(tensorPredictors) library(tensorPredictors)
library(Rchess) library(Rchess)
library(mgcv) # for `gam()` (Generalized Additive Model)
source("./gmlm_chess.R") source("./gmlm_chess.R")
################################################################################
### Fitting the GMLM mixture model ###
################################################################################
# Data set file name of chess positions with Stockfish [https://stockfishchess.org] # Data set file name of chess positions with Stockfish [https://stockfishchess.org]
# evaluation scores (downloaded and processed by `./preprocessing.sh` from the # evaluation scores (downloaded and processed by `./preprocessing.sh` from the
# lichess data base [https://database.lichess.org/]) # lichess data base [https://database.lichess.org/])
@ -16,215 +19,157 @@ data_gen <- function(batch_size, score_min, score_max) {
Rchess::fen2int(Rchess::data.gen(data_set, batch_size, score_min, score_max, quiet = TRUE)) Rchess::fen2int(Rchess::data.gen(data_set, batch_size, score_min, score_max, quiet = TRUE))
} }
fun_y <- function(y) {
F <- t(outer(y, c(0, 1, 1, 2, 1, 2, 2, 3), `^`))
dim(F) <- c(2, 2, 2, length(y))
F
}
# Invoke specialized GMLM optimization routine for chess data # Invoke specialized GMLM optimization routine for chess data
fit.gmlm <- gmlm_chess(data_gen, fun_y, step_size = 1e-3) fit.gmlm <- gmlm_chess(data_gen)
################################################################################ ################################################################################
### At 1838 is the last one with all values, not just quiet positions ### ### Reduction Interpretation and Validation ###
################################################################################ ################################################################################
# load last save point (includes reduction as `betas`)
save_point <- sort(list.files(
".",
pattern = "save_point_[0-9]*\\.Rdata",
full.names = TRUE
), decreasing = TRUE)[[1]]
load(save_point)
#### STOP HERE!
### Construct PSQT (Piece SQuare Tables) from reduction `betas`
sample_size <- 100000
# Sample a new position data set for fitting a linear model to conbine different
# reduction directions into a per piece PSQT matrix
fens <- Rchess::data.gen(data_set, sample_size, -20, 20, quiet = TRUE)
# extract stockfish (non-static) position evaluation
y <- attr(fens, "scores")
# Convert position into "One-Hot Encoded" / "Bit Board" tensor
X <- Rchess::fen2int(fens)
# Compute reduction
reducedX <- Reduce(rbind, Map(function(piece) {
# "condition" on piece, that is to extract the current mixture component
X <- X[, , piece, ]
# reduce mixture component
mlm(X - as.vector(rowMeans(X, dims = 2)), betas[[piece]], transposed = TRUE)
}, 1:12))
# Convert memory layout to contain vectorized observations in rows
reducedX <- t(`dim<-`(reducedX, c(48, sample_size)))
# set names for coefficient extraction from linear fit
colnames(reducedX) <- as.vector(outer(
unlist(strsplit("PNBRQKpnbrqk", "")), c(1, "yl", "yu", "y.2"), paste, sep = "."
))
# Estimate PSQT linear combination weights from reduced sample (exclude dead
# draw positions, that is "score = 0". This are approx 5% of all positions)
fit <- lm(y ~ ., data = data.frame(y = y, reducedX), subset = y != 0.0)
summary(fit)
# Translate reduction with weighting estimate into PSQTs
psqt <- Map(function(piece) {
# reduction column names corresponding to the current white piece (upper case)
piece <- toupper(piece)
col_names <- paste(piece, c(1, "yl", "yu", "y.2"), sep = ".")
# Whites PSQT
psqt_white <- do.call(kronecker, rev(betas[[piece]])) %*% coef(fit)[col_names]
dim(psqt_white) <- c(8, 8)
# the same for black
piece <- tolower(piece)
col_names <- paste(piece, c(1, "yl", "yu", "y.2"), sep = ".")
psqt_black <- do.call(kronecker, rev(betas[[piece]])) %*% coef(fit)[col_names]
dim(psqt_black) <- c(8, 8)
# Combine into shared PSQT from whites point of view
psqt_white - psqt_black[8:1, ]
}, c("P", "N", "B", "R", "Q", "K"))
# finish by enforcing the pawn constraint (irrelevant for validation, the
# corresponding values in an encoded position is always zero)
psqt[["P"]][c(1, 8), ] <- 0
### Validation by GAM fitted on reduced data
formula <- as.formula(paste("y ~ ", paste("s(", colnames(reducedX), ")", collapse = "+")))
fit.gam <- mgcv::gam(formula, data = data.frame(y = y, reducedX), subset = y != 0.0)
summary(fit.gam)
# compair estimates with mean as baseline and static human crafted evaluation (HCE)
rmse.base <- sqrt(mean((mean(y) - y)^2))
y.hce <- Rchess::HCE(fens)
rmse.hce <- sqrt(mean((y.hce - y)^2))
y.hat <- predict(fit.gam, newdata = data.frame(reducedX))
rmse.hat <- sqrt(mean((y.hat - y)^2))
# Also extract R^2 (eval by hand or get from models)
(r.sq.lm <- summary(fit)$r.squared)
(r.sq.gam <- summary(fit.gam)$r.sq)
(r.sq.hce <- 1 - (rmse.hce / rmse.base)^2)
################################################################################
### Generate LaTeX PSQT plot ###
################################################################################
if (FALSE) { if (FALSE) {
load("/home/loki/Work/tensorPredictors/dataAnalysis/chess/gmlm_chess_save_point_000000.Rdata") sink("psqt.tex")
load("/home/loki/Work/tensorPredictors/dataAnalysis/chess/gmlm_chess_save_point_000274.Rdata")
load("/home/loki/Work/tensorPredictors/dataAnalysis/chess/gmlm_chess_save_point_000532.Rdata")
# Load latest save point
load(sort(list.files("~/Work/tensorPredictors/dataAnalysis/chess/", pattern = "save_point*"), decreasing = TRUE)[[1]])
# build intervals from score break points cat("% Authomatically generated by `dataAnalysis/chess.R`
score_breaks <- c(-5.0, -3.0, -2.0, -1.0, -0.5, -0.2, 0.2, 0.5, 1.0, 2.0, 3.0, 5.0) \\documentclass{standalone}
score_min <- head(score_breaks, -1)
score_max <- tail(score_breaks, -1)
score_means <- (score_min + score_max) / 2
\\usepackage[LSB, T1]{fontenc}
\\usepackage{chessboard}
\\usepackage{skak}
\\usepackage{tikz}
\\usepackage{amsmath}
\\usepackage{xcolor}
# build Omega constraint, that is the set of impossible combinations \\setboardfontencoding{LSB}
# (including self interactions) due to the rules of chess
Omega_const <- local({
# One piece per square
diag_offset <- abs(.row(c(768, 768)) - .col(c(768, 768)))
Omega_const <- !diag(768) & ((diag_offset %% 64L) == 0L)
# One King per color
Omega_const <- Omega_const | kronecker(diag(1:12 %in% c(6, 12)), !diag(64), `&`)
# Enemy kings can _not_ be on neightbouring squares
king_const <- mapply(function(i, j) {
`[<-`((abs(.row(c(8, 8)) - i) <= 1L) & (abs(.col(c(8, 8)) - j) <= 1L), i, j, FALSE)
}, .row(c(8, 8)), .col(c(8, 8)))
dim(Omega_const) <- c(64, 12, 64, 12)
Omega_const[, 6, , 12] <- Omega_const[, 6, , 12] | king_const
Omega_const[, 12, , 6] <- Omega_const[, 12, , 6] | king_const
dim(Omega_const) <- c(768, 768)
# no pawns on rank 1 or rank 8
pawn_const <- tcrossprod(as.vector(`[<-`(matrix(0L, 8, 8), c(1, 8), , 1L)), rep(1L, 64))
pawn_const <- kronecker(`[<-`(matrix(0, 12, 12), c(1, 7), , 1), pawn_const)
which(Omega_const | (pawn_const | t(pawn_const)))
})
const <- `[<-`(array(0L, c(8, 8, 12, 8, 8, 12)), Omega_const, 1L) \\setchessboard{linewidth = 0.1em, showmover = false, smallboard}
dimnames(const) <- rep(list(
8:1, letters[1:8], unlist(strsplit("PNBRQKpnbrqk", ""))
), 2L)
diag_offset <- abs(.row(c(768, 768)) - .col(c(768, 768)))
dim(diag_offset) <- dim(const)
dimnames(diag_offset) <- dimnames(const)
(function(r1, f1, p1, r2, f2, p2) { ")
print.table(const[r1, f1, p1, r2, f2, p2], zero.print = ".")
print.table(const[r2, f2, p2, r1, f1, p1], zero.print = ".")
})("4", "e", "p", "1", , )
(diag_offset["4", "e", "K", , , "k"] %% 64L) * (abs(.row(c(8, 8)) - .col(c(8, 8))) == 1L) cat(paste0("\\definecolor{col", 1:128, "}{HTML}{",
mapply(`[`, strsplit(hcl.colors(128, "Blue-Red 3", rev = TRUE), "#"), 2),
"}"
))
cat("
B <- Reduce(kronecker, rev(betas)) \\begin{document}
dim(B) <- c(8, 8, 12, 8) \\begin{tikzpicture}
dimnames(B) <- list(
8:1,
letters[1:8],
unlist(strsplit("PNBRQKpnbrqk", "")),
paste0("y^", c(0, 1, 1, 2, 1, 2, 2, 3))
)
old.par <- par(mfrow = c(3, 4)) \\coordinate (pawn) at (0, 0);
rmB <- rowMeans(B, dims = 3) \\coordinate (knight) at (5, 0);
for (piece in dimnames(B)[[3]]) { \\coordinate (bishop) at (10, 0);
matrixImage(rmB[, , piece]) \\coordinate (rook) at (0, -5.2);
} \\coordinate (queen) at (5, -5.2);
par(old.par) \\coordinate (king) at (10, -5.2);
print.as.board <- function(mat) { ")
print.table(
matrix(as.integer(
mat
), 8, 8, dimnames = dimnames(const)[1:2]),
zero.print = "."
)
}
print.as.board({
rows <- .row(c(8, 8))
cols <- .col(c(8, 8))
diags <- rows - cols
(abs(diag) == 1L | abs(diag) == 2L) & rows
})
print.as.board({
(abs(.row(c(8, 8)) - 3) == 1L) & (abs(.col(c(8, 8)) - 3) == 1L)
})
king_const <- mapply(neighbours, .row(c(8, 8)), .col(c(8, 8)))
dim(king_const) <- c(8, 8, 8, 8)
dimnames(king_const) <- dimnames(const)[c(1, 2, 4, 5)]
print.as.board(neighbours(4, 7))
y <- score_means[5]
# Conditional Ising model parameters
Omega <- `[<-`(Reduce(kronecker, rev(Omegas)), Omega_const, 0)
params <- `diag<-`(Omega, as.vector(mlm(`dim<-`(fun_y(y), dimF), betas)))
# Conditional mean of the Ising model
mu_y <- ising_m2(params)
range(Omega)
matrixImage(Omega)
matrixImage(mu_y)
X <- `dim<-`(Reduce(c, Map(data_gen, 512, score_min, score_max)), c(8, 8, 12, 512 * length(score_means)))
y <- rep(score_means, each = 512)
mean_X <- rowMeans(X, dims = 3)
X_reduced <- mlm(X - as.vector(mean_X), betas, transposed = TRUE)
summary(lm(y ~ mat(X_reduced, 4)))
plot(lm(y ~ mat(X_reduced, 4)))
fens <- Rchess::data.gen(data_set, 10000, quiet = TRUE)
y <- attr(fens, "scores")
X <- Rchess::fen2int(fens)
mean_X <- rowMeans(X, dims = 3)
X_reduced <- mat(mlm(X - as.vector(mean_X), betas, transposed = TRUE), 4)
colnames(X_reduced) <- paste0("y^", c(0, 1, 1, 2, 1, 2, 2, 3))
fit <- lm(y ~ X_reduced)
summary(fit)
vcov(fit)
# resample
fens <- Rchess::data.gen(data_set, 10000, quiet = TRUE)
y <- attr(fens, "scores")
X <- Rchess::fen2int(fens)
mean_X <- rowMeans(X, dims = 3)
X_reduced <- mat(mlm(X - as.vector(mean_X), betas, transposed = TRUE), 4)
colnames(X_reduced) <- paste0("y^", c(0, 1, 1, 2, 1, 2, 2, 3))
plot(predict(fit, newdata = as.data.frame(X_reduced)), y)
# save_points <- sort(list.files(pattern = "save_point*"))
# load(save_points[length(save_points)])
# loss <- drop(mapply(function(file) {
# load(file)
# last_loss
# }, save_points))
# plot(loss, type = "b")
setwd("~/Work/tensorPredictors/dataAnalysis/chess/")
save_points <- sort(list.files(".", pattern = "save_point*"))
c(head(save_points), "...", tail(save_points))
loss <- sapply(save_points, function(save_point) {
load(save_point)
last_loss
}, USE.NAMES = FALSE)
names(loss) <- seq_along(loss)
loss <- loss[is.finite(loss)]
c(head(loss), "...", tail(loss))
R2 <- sapply(save_points, function(save_point) {
load(save_point)
X_reduced <- mlm(X - as.vector(mean_X), betas, transposed = TRUE)
fit <- lm(y ~ mat(X_reduced, 4))
summary(fit)$r.squared
}, USE.NAMES = FALSE)
plot(as.numeric(names(loss)), loss, type = "l", col = "red", lwd = 2, log = "y")
abline(v = 1745, lty = 2)
plot(R2, type = "l", col = "red", lwd = 2, log = "y")
abline(v = 1740, lty = 2)
abline(h = R2[1740], lty = 2)
summary(fit)
vcov(fit)
}
local({ local({
y <- rnorm(100) + 2 + (x <- rnorm(100)) zlim <- c(-1, 1) * max(abs(unlist(psqt, use.names = FALSE)))
summary(lm(y ~ x)) breaks <- seq(zlim[1], zlim[2], len = 129)
pieces <- c("pawn", "knight", "bishop", "rook", "queen", "king")
for (i in seq_along(psqt)) {
cat(paste0("\\node (", pieces[i], ") at (", pieces[i], ") {\\chessboard[", paste0(
"color=col", as.integer(cut(psqt[[i]], breaks)),
",colorbackfield=", outer(8:1, letters[1:8], function(r, f) paste0(f, r)),
collapse=","
), "]};\n"))
}
}) })
cat("
\\node[anchor = north, yshift = -0.4em] at (pawn.north) {Pawn};
\\node[anchor = north, yshift = -0.4em] at (knight.north) {Knight};
\\node[anchor = north, yshift = -0.4em] at (bishop.north) {Bishop};
\\node[anchor = north, yshift = -0.4em] at (rook.north) {Rook};
\\node[anchor = north, yshift = -0.4em] at (queen.north) {Queen};
\\node[anchor = north, yshift = -0.4em] at (king.north) {King};
\\end{tikzpicture}
\\end{document}
")
sink()
}

View File

@ -28,14 +28,14 @@
#' #'
gmlm_chess <- function( gmlm_chess <- function(
data_gen, data_gen,
fun_y, fun_y = function(y) { `dim<-`(t(outer(y, c(0, 1, 1, 2), `^`)), c(2, 2, length(y))) },
score_breaks = c(-5.0, -3.0, -2.0, -1.0, -0.5, -0.2, 0.2, 0.5, 1.0, 2.0, 3.0, 5.0), score_breaks = c(-5.0, -3.0, -2.0, -1.0, -0.5, -0.2, 0.2, 0.5, 1.0, 2.0, 3.0, 5.0),
nr_threads = 8L, nr_threads = 8L,
mcmc_samples = 10000L, mcmc_samples = 10000L,
slice_size = 512L, slice_size = 512L,
max_iter = 10000L, max_iter = 10000L,
patience = 25L, patience = 10L,
step_size = 1e-3, step_size = 1e-2,
eps = sqrt(.Machine$double.eps), eps = sqrt(.Machine$double.eps),
save_point = "save_point_%s.Rdata" save_point = "save_point_%s.Rdata"
) { ) {
@ -45,27 +45,13 @@ gmlm_chess <- function(
score_max <- tail(score_breaks, -1) score_max <- tail(score_breaks, -1)
score_means <- (score_min + score_max) / 2 score_means <- (score_min + score_max) / 2
# build Omega constraint, that is the set of impossible combinations # Piece index lookup "table" by piece symbol
# (including self interactions) due to the rules of chess pieces <- `names<-`(1:12, unlist(strsplit("PNBRQKpnbrqk", "")))
Omega_const <- local({
# One piece per square # Build constraints for every mixture component, that is, for every piece
diag_offset <- abs(.row(c(768, 768)) - .col(c(768, 768))) pawn_const <- which(as.logical(tcrossprod(.row(c(8, 8)) %in% c(1, 8))))
Omega_const <- !diag(768) & ((diag_offset %% 64L) == 0L) # King and Queen constraints (by queens its just an approx)
# One King per color KQ_const <- which(!diag(64))
Omega_const <- Omega_const | kronecker(diag(1:12 %in% c(6, 12)), !diag(64), `&`)
# Enemy kings can _not_ be on neightbouring squares
king_const <- mapply(function(i, j) {
`[<-`((abs(.row(c(8, 8)) - i) <= 1L) & (abs(.col(c(8, 8)) - j) <= 1L), i, j, FALSE)
}, .row(c(8, 8)), .col(c(8, 8)))
dim(Omega_const) <- c(64, 12, 64, 12)
Omega_const[, 6, , 12] <- Omega_const[, 6, , 12] | king_const
Omega_const[, 12, , 6] <- Omega_const[, 12, , 6] | king_const
dim(Omega_const) <- c(768, 768)
# no pawns on rank 1 or rank 8
pawn_const <- tcrossprod(as.vector(`[<-`(matrix(0L, 8, 8), c(1, 8), , 1L)), rep(1L, 64))
pawn_const <- kronecker(`[<-`(matrix(0, 12, 12), c(1, 7), , 1), pawn_const)
which(Omega_const | (pawn_const | t(pawn_const)))
})
# Check if there is a save point (load from save) # Check if there is a save point (load from save)
load_point <- if (is.character(save_point)) { load_point <- if (is.character(save_point)) {
@ -88,33 +74,39 @@ gmlm_chess <- function(
F <- fun_y(rep(score_means, each = slice_size)) F <- fun_y(rep(score_means, each = slice_size))
# set object dimensions (`dimX` is constant, `dimF` depends on `fun_y` arg) # set object dimensions (`dimX` is constant, `dimF` depends on `fun_y` arg)
dimX <- c(8L, 8L, 12L) dimX <- c(8L, 8L) # for every mixture component
dimF <- dim(F)[1:3] dimF <- dim(F)[1:2] # also per mixture component
# Initial values for `betas` are the tensor normal GMLM estimates # Initialize `betas` for every mixture component
betas <- gmlm_tensor_normal(X, F)$betas betas <- Map(function(piece) {
gmlm_tensor_normal(X[, , piece, ], F)$betas
}, pieces)
# and initial values for `Omegas`, based on the same first "big" sample # and initial values for `Omegas`, based on the same first "big" sample
Omegas <- Map(function(mode) { Omegas <- Map(function(piece) {
X <- X[, , piece, ]
Map(function(mode) {
n <- prod(dim(X)[-mode]) n <- prod(dim(X)[-mode])
prob2 <- mcrossprod(X, mode = mode) / n prob2 <- mcrossprod(X, mode = mode) / n
prob2[prob2 == 0] <- 1 / n prob2[prob2 == 0] <- 1 / n
prob2[prob2 == 1] <- (n - 1) / n prob2[prob2 == 1] <- (n - 1) / n
prob1 <- diag(prob2) prob1 <- diag(prob2)
`prob1^2` <- outer(prob1, prob1) `prob1^2` <- outer(prob1, prob1)
`diag<-`(log(((1 - `prob1^2`) / `prob1^2`) * prob2 / (1 - prob2)), 0) `diag<-`(log(((1 - `prob1^2`) / `prob1^2`) * prob2 / (1 - prob2)), 0)
}, 1:3) }, 1:2)
}, pieces)
Omegas[[pieces["P"]]][[1]][c(1, 8), ] <- 0
Omegas[[pieces["p"]]][[1]][c(1, 8), ] <- 0
# Initial sample `(X, F)` no longer needed, remove # Initial sample `(X, F)` no longer needed, remove them
rm(X, F) rm(X, F)
# Initialize gradients and aggregated mean squared gradients # Initialize gradients and aggregated mean squared gradients
grad2_betas <- Map(array, 0, Map(dim, betas)) grad2_betas <- Map(function(params) Map(array, 0, Map(dim, params)), betas)
grad2_Omegas <- Map(array, 0, Map(dim, Omegas)) grad2_Omegas <- Map(function(params) Map(array, 0, Map(dim, params)), Omegas)
# initialize optimization tracker for break condition # initialize optimization tracker for break condition
last_loss <- Inf last_loss <- best_loss <- Inf
non_improving <- 0L non_improving <- 0L
iter <- 0L iter <- 0L
} }
@ -129,81 +121,113 @@ gmlm_chess <- function(
dimX, dimF, dimX, dimF,
betas, Omegas, betas, Omegas,
grad2_betas, grad2_Omegas, grad2_betas, grad2_Omegas,
last_loss, non_improving, iter, last_loss, best_loss, non_improving, iter,
file = sprintf(save_point, sprintf("%06d", iter - 1L)))) file = sprintf(save_point, sprintf("%06d", iter - 1L))))
} }
# start timing for this iteration (this is precise enough) # start timing for this iteration (this is precise enough)
start_time <- proc.time()[["elapsed"]] start_time <- proc.time()[["elapsed"]]
# full Omega (with constraint elements set to zero) needed to conditional # Full Omega(s) for every piece mixture component
# parameters of the Ising model to compute (approx) the second moment Omega <- Map(function(Omegas) {
Omega <- `[<-`(Reduce(kronecker, rev(Omegas)), Omega_const, 0) kronecker(Omegas[[2]], Omegas[[1]])
}, Omegas)
Omega[[pieces["P"]]][pawn_const] <- 0
Omega[[pieces["p"]]][pawn_const] <- 0
Omega[[pieces["K"]]][KQ_const] <- 0
Omega[[pieces["k"]]][KQ_const] <- 0
Omega[[pieces["Q"]]][KQ_const] <- 0
Omega[[pieces["q"]]][KQ_const] <- 0
# Gradient and negative log-likelihood approximation # Gradient and negative log-likelihood approximation
loss <- 0 # neg. log-likelihood loss <- 0 # neg. log-likelihood
grad_betas <- Map(matrix, 0, dimX, dimF) # grads for betas grad_betas <- Map(function(piece) Map(matrix, 0, dimX, dimF), pieces) # grads for betas
R2 <- array(0, dim = c(dimX, dimX)) # residuals R2 <- Map(function(piece) array(0, dim = c(dimX, dimX)), pieces) # residuals
# for every score slice # for every score slice
for (i in seq_along(score_means)) { for (slice in seq_along(score_means)) {
# function of `y` being the score slice mean (only 3D, same for all obs.) # function of `y` being the score slice mean (only 3D, same for all obs.)
F <- `dim<-`(fun_y(score_means[i]), dimF) F <- `dim<-`(fun_y(score_means[slice]), dimF)
# compute parameters of (slice) conditional Ising model # compute parameters of (slice) conditional Ising model
params <- `diag<-`(Omega, as.vector(mlm(F, betas))) params <- Map(function(betas, Omega) {
`diag<-`(Omega, as.vector(mlm(F, betas)))
}, betas, Omega)
# second moment of `X | Y = score_means[i]` # second moment of `X_{,,piece} | Y = score_means[slice]` for every piece
m2 <- ising_m2(params, use_MC = TRUE, nr_threads = nr_threads, nr_samples = mcmc_samples) m2 <- Map(function(param) {
ising_m2(param, use_MC = TRUE, nr_threads = nr_threads, nr_samples = mcmc_samples)
}, params)
# draw random sample from current slice `vec(X) | Y in (score_min, score_max]` # Draw a new sample
# with columns being the vectorized observations `vec(X)`. X <- data_gen(slice_size, score_min[slice], score_max[slice])
matX <- `dim<-`(data_gen(slice_size, score_min[i], score_max[i]), c(prod(dimX), slice_size))
# accumulate (approx) neg. log-likelihood # Split into matricized mixture parts
loss <- loss - (sum(matX * (params %*% matX)) + slice_size * attr(m2, "log_prob_0")) matX <- Map(function(piece) {
`dim<-`(X[, , piece, ], c(64, slice_size))
}, pieces)
# accumulated loss over all piece mixtures
loss <- loss - Reduce(`+`, Map(function(matX, param, m2) {
sum(matX * (param %*% matX)) + slice_size * attr(m2, "log_prob_0")
}, matX, params, m2))
# Slice residuals (second order `resid2` and actual residuals `resid1`) # Slice residuals (second order `resid2` and actual residuals `resid1`)
resid2 <- tcrossprod(matX) - slice_size * m2 resid2 <- Map(function(matX, m2) {
resid1 <- `dim<-`(diag(resid2), dimX) tcrossprod(matX) - slice_size * m2
}, matX, m2)
# accumulate residuals # accumulate residuals
R2 <- R2 + as.vector(resid2) R2 <- Map(function(R2, resid2) { R2 + as.vector(resid2) }, R2, resid2)
# and the beta gradients # and the beta gradients
grad_betas <- Map(`+`, grad_betas, Map(function(mode) { grad_betas <- Map(function(grad_betas, resid2, betas) {
mcrossprod(resid1, mlm(slice_size * F, betas[-mode], (1:3)[-mode]), mode) resid1 <- `dim<-`(diag(resid2), dimX)
}, 1:3)) Map(`+`, grad_betas, Map(function(mode) {
mcrossprod(resid1, mlm(slice_size * F, betas[-mode], (1:2)[-mode]), mode)
}, 1:2))
}, grad_betas, resid2, betas)
} }
# finaly, finish gradient computation with gradients for `Omegas` # finaly, finish gradient computation with gradients for `Omegas`
grad_Omegas <- Map(function(mode) { grad_Omegas <- Map(function(R2, Omegas) {
grad <- mlm(kronperm(R2), Map(as.vector, Omegas[-mode]), (1:3)[-mode], transposed = TRUE) Map(function(mode) {
grad <- mlm(kronperm(R2), Map(as.vector, Omegas[-mode]), (1:2)[-mode], transposed = TRUE)
`dim<-`(grad, dim(Omegas[[mode]])) `dim<-`(grad, dim(Omegas[[mode]]))
}, 1:3) }, 1:2)
}, R2, Omegas)
# Update tracker for break condition # Update tracker for break condition
non_improving <- max(0L, non_improving - 1L + 2L * (last_loss < loss)) non_improving <- if (best_loss < loss) non_improving + 1L else 0L
last_loss <- loss last_loss <- loss
best_loss <- min(best_loss, loss)
# check break condition # check break condition
if (non_improving > patience) { break } if (non_improving > patience) { break }
# accumulate root mean squared gradients # accumulate root mean squared gradients
grad2_betas <- Map(function(g2, g) 0.9 * g2 + 0.1 * (g * g), grad2_betas, grad_betas) grad2_betas <- Map(function(grad2_betas, grad_betas) {
grad2_Omegas <- Map(function(g2, g) 0.9 * g2 + 0.1 * (g * g), grad2_Omegas, grad_Omegas) Map(function(g2, g) 0.9 * g2 + 0.1 * (g * g), grad2_betas, grad_betas)
}, grad2_betas, grad_betas)
grad2_Omegas <- Map(function(grad2_Omegas, grad_Omegas) {
Map(function(g2, g) 0.9 * g2 + 0.1 * (g * g), grad2_Omegas, grad_Omegas)
}, grad2_Omegas, grad_Omegas)
# Update Parameters # Update Parameters
betas <- Map(function(beta, grad, m2) { betas <- Map(function(betas, grad_betas, grad2_betas) {
beta + (step_size / (sqrt(m2) + eps)) * grad Map(function(beta, grad, M2) {
beta + (step_size / (sqrt(M2) + eps)) * grad
}, betas, grad_betas, grad2_betas) }, betas, grad_betas, grad2_betas)
Omegas <- Map(function(Omega, grad, m2) { }, betas, grad_betas, grad2_betas)
Omega + (step_size / (sqrt(m2) + eps)) * grad Omegas <- Map(function(Omegas, grad_Omegas, grad2_Omegas) {
Map(function(Omega, grad, M2) {
Omega + (step_size / (sqrt(M2) + eps)) * grad
}, Omegas, grad_Omegas, grad2_Omegas)
}, Omegas, grad_Omegas, grad2_Omegas) }, Omegas, grad_Omegas, grad2_Omegas)
# Log progress # Log progress
cat(sprintf("iter: %4d, time for iter: %d [s], loss: %f\n", cat(sprintf("iter: %4d, time for iter: %d [s], loss: %f (best: %f, non-improving: %d)\n",
iter, round(proc.time()[["elapsed"]] - start_time), loss)) iter, round(proc.time()[["elapsed"]] - start_time), loss, best_loss, non_improving))
} }
# Save a final (terminal) save point # Save a final (terminal) save point
@ -212,7 +236,7 @@ gmlm_chess <- function(
dimX, dimF, dimX, dimF,
betas, Omegas, betas, Omegas,
grad2_betas, grad2_Omegas, grad2_betas, grad2_Omegas,
last_loss, non_improving, iter, last_loss, best_loss, non_improving, iter,
file = sprintf(save_point, "final"))) file = sprintf(save_point, "final")))
} }