add: Added ensamble CVE (ECVE) capability.
This commit is contained in:
parent
4696620363
commit
54b7c9de2d
11
CVE/R/CVE.R
11
CVE/R/CVE.R
|
@ -199,6 +199,8 @@ cve <- function(formula, data, method = "simple", max.dim = 10L, ...) {
|
|||
#' below.
|
||||
#' \item "weighted" variation with addaptive weighting of slices.
|
||||
#' }
|
||||
#' @param func_list a list of functions applied to \code{Y} to form the ensamble
|
||||
#' CVE for central sub-space estimation.
|
||||
#' @param k Dimension of lower dimensional projection, if \code{k} is given
|
||||
#' only the specified dimension \code{B} matrix is estimated.
|
||||
#' @param min.dim lower bounds for \code{k}, (ignored if \code{k} is supplied).
|
||||
|
@ -251,6 +253,7 @@ cve <- function(formula, data, method = "simple", max.dim = 10L, ...) {
|
|||
#' coef(cve.obj.simple2, k = 1)
|
||||
#' @export
|
||||
cve.call <- function(X, Y, method = "simple",
|
||||
func_list = list(function (x) x),
|
||||
nObs = sqrt(nrow(X)), h = NULL,
|
||||
min.dim = 1L, max.dim = 10L, k = NULL,
|
||||
momentum = 0.0, tau = 1.0, tol = 1e-3,
|
||||
|
@ -375,8 +378,11 @@ cve.call <- function(X, Y, method = "simple",
|
|||
}
|
||||
}
|
||||
|
||||
# Evaluate each function given `Y` and build a `n x |func_list|` matrix.
|
||||
Fy <- vapply(func_list, do.call, Y, list(Y))
|
||||
|
||||
# Convert numerical values to "double".
|
||||
storage.mode(X) <- storage.mode(Y) <- "double"
|
||||
storage.mode(X) <- storage.mode(Fy) <- "double"
|
||||
|
||||
if (is.function(logger)) {
|
||||
loggerEnv <- environment(logger)
|
||||
|
@ -396,7 +402,7 @@ cve.call <- function(X, Y, method = "simple",
|
|||
}
|
||||
|
||||
dr.k <- .Call('cve_export', PACKAGE = 'CVE',
|
||||
X, Y, k, h,
|
||||
X, Fy, k, h,
|
||||
method_nr,
|
||||
V.init,
|
||||
momentum, tau, tol,
|
||||
|
@ -415,6 +421,7 @@ cve.call <- function(X, Y, method = "simple",
|
|||
# augment result information
|
||||
dr$X <- X
|
||||
dr$Y <- Y
|
||||
dr$Fy <- Fy
|
||||
dr$method <- method
|
||||
dr$call <- call
|
||||
class(dr) <- "cve"
|
||||
|
|
|
@ -4,10 +4,26 @@
|
|||
\alias{cve.call}
|
||||
\title{Conditional Variance Estimator (CVE).}
|
||||
\usage{
|
||||
cve.call(X, Y, method = "simple", nObs = sqrt(nrow(X)), h = NULL,
|
||||
min.dim = 1L, max.dim = 10L, k = NULL, momentum = 0, tau = 1,
|
||||
tol = 0.001, slack = 0, gamma = 0.5, V.init = NULL,
|
||||
max.iter = 50L, attempts = 10L, logger = NULL)
|
||||
cve.call(
|
||||
X,
|
||||
Y,
|
||||
method = "simple",
|
||||
func_list = list(function(x) x),
|
||||
nObs = sqrt(nrow(X)),
|
||||
h = NULL,
|
||||
min.dim = 1L,
|
||||
max.dim = 10L,
|
||||
k = NULL,
|
||||
momentum = 0,
|
||||
tau = 1,
|
||||
tol = 0.001,
|
||||
slack = 0,
|
||||
gamma = 0.5,
|
||||
V.init = NULL,
|
||||
max.iter = 50L,
|
||||
attempts = 10L,
|
||||
logger = NULL
|
||||
)
|
||||
}
|
||||
\arguments{
|
||||
\item{X}{Design predictor matrix.}
|
||||
|
@ -21,6 +37,9 @@ cve.call(X, Y, method = "simple", nObs = sqrt(nrow(X)), h = NULL,
|
|||
\item "weighted" variation with addaptive weighting of slices.
|
||||
}}
|
||||
|
||||
\item{func_list}{a list of functions applied to `Y` to form the ensamble
|
||||
CVE for central sub-space estimation.}
|
||||
|
||||
\item{nObs}{parameter for choosing bandwidth \code{h} using
|
||||
\code{\link{estimate.bandwidth}} (ignored if \code{h} is supplied).}
|
||||
|
||||
|
|
|
@ -20,8 +20,8 @@ the \eqn{n\times k}{n x k} dimensional matrix \eqn{X B} where \eqn{B}
|
|||
is the cve-estimate for dimension \eqn{k}.
|
||||
}
|
||||
\description{
|
||||
Returns \eqn{B'X}. That is the dimensional design matrix \eqn{X} on the
|
||||
columnspace of the cve-estimate for given dimension \eqn{k}.
|
||||
Returns \eqn{B'X}. That is, it computes the projection of the \eqn{n x p}
|
||||
design matrix \eqn{X} on the column space of \eqn{B} of dimension \eqn{k}.
|
||||
}
|
||||
\examples{
|
||||
# create B for simulation (k = 1)
|
||||
|
|
|
@ -49,6 +49,5 @@ y <- x \%*\% B + 0.25 * rnorm(100)
|
|||
# calculate cve with method 'simple' for k = 1
|
||||
set.seed(21)
|
||||
cve.obj.simple <- cve(y ~ x, k = k)
|
||||
print(cve.obj.simple$res$'1'$h)
|
||||
print(estimate.bandwidth(x, k = k))
|
||||
}
|
||||
|
|
|
@ -16,9 +16,9 @@
|
|||
\description{
|
||||
Boxplots of the output \code{L} from \code{\link{cve}} over \code{k} from
|
||||
\code{min.dim} to \code{max.dim}. For given \code{k}, \code{L} corresponds
|
||||
to \eqn{L_n(V, X_i)} where \eqn{V} is a stiefel manifold element as
|
||||
minimizer of
|
||||
\eqn{L_n(V)}, for further details see Fertl, L. and Bura, E. (2019).
|
||||
to \eqn{L_n(V, X_i)} where \eqn{V} is the minimizer of \eqn{L_n(V)} where
|
||||
\eqn{V} is an element of a Stiefel manifold (see
|
||||
Fertl, L. and Bura, E. (2019)).
|
||||
}
|
||||
\examples{
|
||||
# create B for simulation
|
||||
|
|
|
@ -17,11 +17,11 @@
|
|||
\item{...}{further arguments passed to \code{\link{mars}}.}
|
||||
}
|
||||
\value{
|
||||
prediced response at \code{newdata}.
|
||||
prediced respone(s) for \code{newdata}.
|
||||
}
|
||||
\description{
|
||||
Predict response using projected data \eqn{B'C} by fitting
|
||||
\eqn{g(B'C) + \epsilon} using \code{\link{mars}}.
|
||||
Predict response using projected data. The forward model \eqn{g(B' X)} is
|
||||
estimated with \code{\link{mars}} in the \code{\pkg{mda}} package.
|
||||
}
|
||||
\examples{
|
||||
# create B for simulation
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
% Please edit documentation in R/predict_dim.R
|
||||
\name{predict_dim}
|
||||
\alias{predict_dim}
|
||||
\title{Estimate Dimension of Reduction Space.}
|
||||
\title{Estimate Dimension of the Sufficient Reduction.}
|
||||
\usage{
|
||||
predict_dim(object, ..., method = "CV")
|
||||
}
|
||||
|
@ -12,29 +12,30 @@ predict_dim(object, ..., method = "CV")
|
|||
|
||||
\item{...}{ignored.}
|
||||
|
||||
\item{method}{This parameter specify which method will be used in dimension
|
||||
estimation. It provides three methods \code{'CV'} (default), \code{'elbow'},
|
||||
and \code{'wilcoxon'} to estimate the dimension of the SDR.}
|
||||
\item{method}{This parameter specifies which method is used in dimension
|
||||
estimation. It provides three options: \code{'CV'} (default),
|
||||
\code{'elbow'} and \code{'wilcoxon'}.}
|
||||
}
|
||||
\value{
|
||||
list with
|
||||
A \code{list} with
|
||||
\describe{
|
||||
\item{}{cretirion of method for \code{k = min.dim, ..., max.dim}.}
|
||||
\item{k}{estimated dimension as argmin over \eqn{k} of criterion.}
|
||||
\item{}{criterion for method and \code{k = min.dim, ..., max.dim}.}
|
||||
\item{k}{estimated dimension is the minimizer of the criterion.}
|
||||
}
|
||||
}
|
||||
\description{
|
||||
This function estimates the dimension of the mean dimension reduction space,
|
||||
i.e. number of columns of \eqn{B} matrix. The default method \code{'CV'}
|
||||
performs l.o.o cross-validation using \code{mars}. Given
|
||||
\code{k = min.dim, ..., max.dim} a cross-validation via \code{mars} is
|
||||
This function estimates the dimension, i.e. the rank of \eqn{B}. The default
|
||||
method \code{'CV'} performs leave-one-out (LOO) cross-validation using
|
||||
\code{mars} as follows for \code{k = min.dim, ..., max.dim} a
|
||||
cross-validation via \code{mars} is
|
||||
performed on the dataset \eqn{(Y_i, B_k' X_i)_{i = 1, ..., n}} where
|
||||
\eqn{B_k} is the \eqn{p \times k}{p x k} dimensional CVE estimate. The
|
||||
estimated SDR dimension is the \eqn{k} where the
|
||||
cross-validation mean squared error is minimal. The method \code{'elbow'}
|
||||
estimates the dimension via \eqn{k = argmin_k L_n(V_{p - k})} where
|
||||
\eqn{V_{p - k}} is space that is orthogonal to the columns-space of the CVE estimate of \eqn{B_k}. Method \code{'wilcoxon'} is similar to \code{'elbow'}
|
||||
but finds the minimum using the wilcoxon-test.
|
||||
\eqn{V_{p - k}} is the space that is orthogonal to the column space of the
|
||||
CVE estimate of \eqn{B_k}. Method \code{'wilcoxon'} finds the minimum using
|
||||
the Wilcoxon test.
|
||||
}
|
||||
\examples{
|
||||
# create B for simulation
|
||||
|
|
|
@ -2,8 +2,7 @@
|
|||
% Please edit documentation in R/util.R
|
||||
\name{rStiefel}
|
||||
\alias{rStiefel}
|
||||
\title{Draws a sample from the invariant measure on the Stiefel manifold
|
||||
\eqn{S(p, q)}.}
|
||||
\title{Random sample from Stiefel manifold.}
|
||||
\usage{
|
||||
rStiefel(p, q)
|
||||
}
|
||||
|
@ -13,12 +12,12 @@ rStiefel(p, q)
|
|||
\item{q}{col dimension}
|
||||
}
|
||||
\value{
|
||||
\eqn{p \times q}{p x q} semi-orthogonal matrix.
|
||||
A \eqn{p \times q}{p x q} semi-orthogonal matrix.
|
||||
}
|
||||
\description{
|
||||
Draws a sample from the invariant measure on the Stiefel manifold
|
||||
Draws a random sample from the invariant measure on the Stiefel manifold
|
||||
\eqn{S(p, q)}.
|
||||
}
|
||||
\examples{
|
||||
V <- rStiefel(6, 4)
|
||||
V <- rStiefel(6, 4)
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
% Please edit documentation in R/summary.R
|
||||
\name{summary.cve}
|
||||
\alias{summary.cve}
|
||||
\title{Prints a summary of a \code{cve} result.}
|
||||
\title{Prints summary statistics of the \eqn{L} \code{cve} component.}
|
||||
\usage{
|
||||
\method{summary}{cve}(object, ...)
|
||||
}
|
||||
|
@ -13,8 +13,7 @@
|
|||
\item{...}{ignored.}
|
||||
}
|
||||
\description{
|
||||
Prints a summary statistics of output \code{L} from \code{cve} for
|
||||
\code{k = min.dim, ..., max.dim}.
|
||||
Prints a summary statistics of the \code{L} component of a \code{cve} object #' for \code{k = min.dim, ..., max.dim}.
|
||||
}
|
||||
\examples{
|
||||
# create B for simulation
|
||||
|
|
|
@ -32,11 +32,11 @@ void callLogger(SEXP logger, SEXP env,
|
|||
SEXP r_iter = PROTECT(ScalarInteger(iter + 1));
|
||||
|
||||
/* Create R representations of L, V and G */
|
||||
SEXP r_L = PROTECT(allocVector(REALSXP, L->nrow));
|
||||
SEXP r_L = PROTECT(allocMatrix(REALSXP, L->nrow, L->ncol));
|
||||
SEXP r_V = PROTECT(allocMatrix(REALSXP, V->nrow, V->ncol));
|
||||
SEXP r_G = PROTECT(allocMatrix(REALSXP, G->nrow, G->ncol));
|
||||
/* Copy data to R objects */
|
||||
memcpy(REAL(r_L), L->elem, L->nrow * sizeof(double));
|
||||
memcpy(REAL(r_L), L->elem, L->nrow * L->ncol * sizeof(double));
|
||||
memcpy(REAL(r_V), V->elem, V->nrow * V->ncol * sizeof(double));
|
||||
memcpy(REAL(r_G), G->elem, G->nrow * G->ncol * sizeof(double));
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
#include "cve.h"
|
||||
|
||||
void cve(const mat *X, const mat *Y, const double h,
|
||||
void cve(const mat *X, const mat *Fy, const double h,
|
||||
const unsigned int method,
|
||||
const double momentum,
|
||||
const double tau_init, const double tol_init,
|
||||
|
@ -10,7 +10,7 @@ void cve(const mat *X, const mat *Y, const double h,
|
|||
const int maxIter, const int attempts,
|
||||
mat *V, mat *L,
|
||||
SEXP logger, SEXP loggerEnv) {
|
||||
|
||||
|
||||
// TODO: param and dim. validation.
|
||||
int n = X->nrow, p = X->ncol, q = V->ncol;
|
||||
int attempt = 0, iter;
|
||||
|
@ -24,13 +24,14 @@ void cve(const mat *X, const mat *Y, const double h,
|
|||
|
||||
/* Create further intermediate or internal variables. */
|
||||
mat *lvecD_e = (void*)0;
|
||||
mat *Ysquared = (void*)0;
|
||||
mat *Fy_sq = (void*)0;
|
||||
mat *XV = (void*)0;
|
||||
mat *lvecD = (void*)0; // TODO: combine. aka in-place lvecToSym
|
||||
mat *D = (void*)0; // TODO: combine. aka in-place lvecToSym
|
||||
mat *lvecK = (void*)0; // TODO: combine. aka in-place lvecToSym
|
||||
mat *K = (void*)0; // TODO: combine. aka in-place lvecToSym
|
||||
mat *colSumsK = (void*)0;
|
||||
mat *rowSumsL = (void*)0;
|
||||
mat *W = (void*)0;
|
||||
mat *y1 = (void*)0;
|
||||
mat *y2 = (void*)0;
|
||||
|
@ -51,7 +52,7 @@ void cve(const mat *X, const mat *Y, const double h,
|
|||
double *workMem = (double*)R_alloc(workLen, sizeof(double));
|
||||
|
||||
lvecD_e = rowDiffSquareSums(X, lvecD_e);
|
||||
Ysquared = hadamard(1.0, Y, Y, 0.0, Ysquared);
|
||||
Fy_sq = hadamard(1.0, Fy, Fy, 0.0, Fy_sq);
|
||||
|
||||
do {
|
||||
/* (Re)set learning rate. */
|
||||
|
@ -77,22 +78,26 @@ void cve(const mat *X, const mat *Y, const double h,
|
|||
/* Normalize K columns to obtain weight matrix W */
|
||||
W = colApply(K, '/', colSumsK, W);
|
||||
/* first and second order weighted responces */
|
||||
y1 = matrixprod(1.0, W, Y, 0.0, y1);
|
||||
y2 = matrixprod(1.0, W, Ysquared, 0.0, y2);
|
||||
y1 = matrixprod(1.0, W, Fy, 0.0, y1);
|
||||
y2 = matrixprod(1.0, W, Fy_sq, 0.0, y2);
|
||||
/* Compute losses */
|
||||
L = hadamard(-1.0, y1, y1, 1.0, copy(y2, L));
|
||||
/* Compute initial loss */
|
||||
if (method == weighted) {
|
||||
colSumsK = elemApply(colSumsK, '-', 1.0, colSumsK);
|
||||
sumK = sum(colSumsK);
|
||||
loss_last = dot(L, '*', colSumsK) / sumK;
|
||||
if (L->ncol == 1) {
|
||||
loss_last = dot(L, '*', colSumsK) / sumK;
|
||||
} else {
|
||||
loss_last = dot(rowSums(L, rowSumsL), '*', colSumsK) / sumK;
|
||||
}
|
||||
c = agility / sumK;
|
||||
/* Calculate the scaling matrix S */
|
||||
S = laplace(adjacence(L, Y, y1, D, K, gauss, S), workMem);
|
||||
S = laplace(adjacence(L, Fy, y1, D, K, gauss, S), workMem);
|
||||
} else { /* simple */
|
||||
loss_last = mean(L);
|
||||
/* Calculate the scaling matrix S */
|
||||
S = laplace(adjacence(L, Y, y1, D, W, gauss, S), workMem);
|
||||
S = laplace(adjacence(L, Fy, y1, D, W, gauss, S), workMem);
|
||||
}
|
||||
/* Gradient */
|
||||
tmp1 = matrixprod(1.0, S, X, 0.0, tmp1);
|
||||
|
@ -132,15 +137,19 @@ void cve(const mat *X, const mat *Y, const double h,
|
|||
/* Normalize K columns to obtain weight matrix W */
|
||||
W = colApply(K, '/', colSumsK, W);
|
||||
/* first and second order weighted responces */
|
||||
y1 = matrixprod(1.0, W, Y, 0.0, y1);
|
||||
y2 = matrixprod(1.0, W, Ysquared, 0.0, y2);
|
||||
y1 = matrixprod(1.0, W, Fy, 0.0, y1);
|
||||
y2 = matrixprod(1.0, W, Fy_sq, 0.0, y2);
|
||||
/* Compute losses */
|
||||
L = hadamard(-1.0, y1, y1, 1.0, copy(y2, L));
|
||||
/* Compute loss */
|
||||
if (method == weighted) {
|
||||
colSumsK = elemApply(colSumsK, '-', 1.0, colSumsK);
|
||||
sumK = sum(colSumsK);
|
||||
loss = dot(L, '*', colSumsK) / sumK;
|
||||
if (L->ncol == 1) {
|
||||
loss = dot(L, '*', colSumsK) / sumK;
|
||||
} else {
|
||||
loss = dot(rowSums(L, rowSumsL), '*', colSumsK) / sumK;
|
||||
}
|
||||
} else { /* simple */
|
||||
loss = mean(L);
|
||||
}
|
||||
|
@ -158,11 +167,8 @@ void cve(const mat *X, const mat *Y, const double h,
|
|||
/* Compute error, use workMem. */
|
||||
err = dist(V, V_tau);
|
||||
|
||||
// Rprintf("%2d - iter: %2d, loss: %1.3f, err: %1.3f, tau: %1.3f, norm(G) = %1.3f\n",
|
||||
// attempt, iter, loss, err, tau, sqrt(squareSum(G)));
|
||||
|
||||
/* Shift next step to current step and store loss to last. */
|
||||
V = copy(V_tau, V);
|
||||
V = copy(V_tau, V);
|
||||
loss_last = loss;
|
||||
|
||||
if (logger) {
|
||||
|
@ -177,11 +183,11 @@ void cve(const mat *X, const mat *Y, const double h,
|
|||
|
||||
if (method == weighted) {
|
||||
/* Calculate the scaling matrix S */
|
||||
S = laplace(adjacence(L, Y, y1, D, K, gauss, S), workMem);
|
||||
S = laplace(adjacence(L, Fy, y1, D, K, gauss, S), workMem);
|
||||
c = agility / sumK; // n removed previousely
|
||||
} else { /* simple */
|
||||
/* Calculate the scaling matrix S */
|
||||
S = laplace(adjacence(L, Y, y1, D, W, gauss, S), workMem);
|
||||
S = laplace(adjacence(L, Fy, y1, D, W, gauss, S), workMem);
|
||||
}
|
||||
|
||||
/* Gradient */
|
||||
|
@ -194,8 +200,6 @@ void cve(const mat *X, const mat *Y, const double h,
|
|||
A = skew(tau, G, V, 0.0, A);
|
||||
}
|
||||
|
||||
// Rprintf("\n");
|
||||
|
||||
/* Check if current attempt improved previous ones */
|
||||
if (attempt == 0 || loss < loss_best) {
|
||||
loss_best = loss;
|
||||
|
|
|
@ -106,57 +106,69 @@ mat* applyKernel(const mat* A, const double h, kernel kernel, mat* B) {
|
|||
* n +--------------------+
|
||||
*/
|
||||
// TODO: fix: cache misses in Y?!
|
||||
mat* adjacence(const mat *vec_L, const mat *vec_Y, const mat *vec_y1,
|
||||
mat* adjacence(const mat *mat_L, const mat *mat_Fy, const mat *mat_y1,
|
||||
const mat *mat_D, const mat *mat_W, kernel kernel,
|
||||
mat *mat_S) {
|
||||
int i, j, k, n = vec_L->nrow;
|
||||
int i, j, k, l, n = mat_L->nrow, m = mat_L->ncol;
|
||||
int block_size, block_batch_size;
|
||||
int max_size = 64 < n ? 64 : n; // Block Size set to 64
|
||||
|
||||
double Y_j, tmp0, tmp1, tmp2, tmp3;
|
||||
double *Y = vec_Y->elem;
|
||||
double *L = vec_L->elem;
|
||||
double *y1 = vec_y1->elem;
|
||||
double Y_j, t0, t1, t2, t3; // internal temp. values.
|
||||
double *Y, *L, *y1;
|
||||
double *D, *W, *S;
|
||||
|
||||
// TODO: Check dims.
|
||||
|
||||
if (!mat_S) {
|
||||
mat_S = matrix(n, n);
|
||||
mat_S = zero(n, n);
|
||||
} else {
|
||||
memset(mat_S->elem, 0, n * n * sizeof(double));
|
||||
}
|
||||
|
||||
for (i = 0; i < n; i += block_size) {
|
||||
/* Set blocks (left upper corner) */
|
||||
S = mat_S->elem + i;
|
||||
D = mat_D->elem + i;
|
||||
W = mat_W->elem + i;
|
||||
/* determine block size */
|
||||
block_size = n - i;
|
||||
if (block_size > max_size) {
|
||||
block_size = max_size;
|
||||
}
|
||||
block_batch_size = 4 * (block_size / 4);
|
||||
/* for each column in the block */
|
||||
for (j = 0; j < n; ++j, S += n, D += n, W += n) {
|
||||
Y_j = Y[j];
|
||||
/* iterate over block rows */
|
||||
for (k = 0; k < block_batch_size; k += 4) {
|
||||
tmp0 = Y_j - y1[k];
|
||||
tmp1 = Y_j - y1[k + 1];
|
||||
tmp2 = Y_j - y1[k + 2];
|
||||
tmp3 = Y_j - y1[k + 3];
|
||||
S[k] = (L[k] - (tmp0 * tmp0)) * D[k] * W[k];
|
||||
S[k + 1] = (L[k + 1] - (tmp1 * tmp1)) * D[k + 1] * W[k + 1];
|
||||
S[k + 2] = (L[k + 2] - (tmp2 * tmp2)) * D[k + 2] * W[k + 2];
|
||||
S[k + 3] = (L[k + 3] - (tmp3 * tmp3)) * D[k + 3] * W[k + 3];
|
||||
for (l = 0; l < m; ++l) {
|
||||
Y = mat_Fy->elem + l * n;
|
||||
L = mat_L->elem + l * n;
|
||||
y1 = mat_y1->elem + l * n;
|
||||
for (i = 0; i < n; i += block_size) {
|
||||
/* Set blocks (left upper corner) */
|
||||
S = mat_S->elem + i;
|
||||
D = mat_D->elem + i;
|
||||
W = mat_W->elem + i;
|
||||
/* determine block size */
|
||||
block_size = n - i;
|
||||
if (block_size > max_size) {
|
||||
block_size = max_size;
|
||||
}
|
||||
for (; k < block_size; ++k) {
|
||||
tmp0 = Y_j - y1[k];
|
||||
S[k] = (L[k] - (tmp0 * tmp0)) * D[k] * W[k];
|
||||
block_batch_size = 4 * (block_size / 4);
|
||||
/* for each column in the block */
|
||||
for (j = 0; j < n; ++j, S += n, D += n, W += n) {
|
||||
Y_j = Y[j];
|
||||
/* iterate over block rows */
|
||||
for (k = 0; k < block_batch_size; k += 4) {
|
||||
t0 = Y_j - y1[k];
|
||||
t1 = Y_j - y1[k + 1];
|
||||
t2 = Y_j - y1[k + 2];
|
||||
t3 = Y_j - y1[k + 3];
|
||||
S[k] += (L[k] - (t0 * t0)) * D[k] * W[k];
|
||||
S[k + 1] += (L[k + 1] - (t1 * t1)) * D[k + 1] * W[k + 1];
|
||||
S[k + 2] += (L[k + 2] - (t2 * t2)) * D[k + 2] * W[k + 2];
|
||||
S[k + 3] += (L[k + 3] - (t3 * t3)) * D[k + 3] * W[k + 3];
|
||||
}
|
||||
for (; k < block_size; ++k) {
|
||||
t0 = Y_j - y1[k];
|
||||
S[k] += (L[k] - (t0 * t0)) * D[k] * W[k];
|
||||
}
|
||||
}
|
||||
L += block_size;
|
||||
y1 += block_size;
|
||||
}
|
||||
}
|
||||
|
||||
if (m > 1) {
|
||||
S = mat_S->elem;
|
||||
for (i = 0; i < n * n; ++i) {
|
||||
S[i] /= m;
|
||||
}
|
||||
L += block_size;
|
||||
y1 += block_size;
|
||||
}
|
||||
|
||||
return mat_S;
|
||||
|
|
|
@ -25,7 +25,7 @@ static mat* asMat(SEXP S) {
|
|||
return M;
|
||||
}
|
||||
|
||||
SEXP cve_export(SEXP X, SEXP Y, SEXP k, SEXP h,
|
||||
SEXP cve_export(SEXP X, SEXP Fy, SEXP k, SEXP h,
|
||||
SEXP method,
|
||||
SEXP V, // initial
|
||||
SEXP momentum, SEXP tau, SEXP tol,
|
||||
|
@ -47,7 +47,7 @@ SEXP cve_export(SEXP X, SEXP Y, SEXP k, SEXP h,
|
|||
|
||||
/* Create output list. */
|
||||
SEXP Vout = PROTECT(allocMatrix(REALSXP, p, q));
|
||||
SEXP Lout = PROTECT(allocVector(REALSXP, n));
|
||||
SEXP Lout = PROTECT(allocMatrix(REALSXP, n, ncols(Fy)));
|
||||
|
||||
/* Check `attempts`, if not positive use passed values of `V` as
|
||||
* optimization start value without further attempts.
|
||||
|
@ -58,7 +58,7 @@ SEXP cve_export(SEXP X, SEXP Y, SEXP k, SEXP h,
|
|||
}
|
||||
|
||||
/* Call CVE */
|
||||
cve(asMat(X), asMat(Y), asReal(h),
|
||||
cve(asMat(X), asMat(Fy), asReal(h),
|
||||
asInteger(method),
|
||||
asReal(momentum), asReal(tau), asReal(tol),
|
||||
asReal(slack), asReal(gamma),
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#include <R_ext/Rdynload.h>
|
||||
|
||||
/* .Call calls */
|
||||
extern SEXP cve_export(SEXP X, SEXP Y, SEXP k, SEXP h,
|
||||
extern SEXP cve_export(SEXP X, SEXP Fy, SEXP k, SEXP h,
|
||||
SEXP method,
|
||||
SEXP V, // initial
|
||||
SEXP momentum, SEXP tau, SEXP tol,
|
||||
|
|
|
@ -55,9 +55,9 @@ mat* copy(mat *src, mat *dest) {
|
|||
|
||||
/**
|
||||
* Sum of all elements.
|
||||
*
|
||||
*
|
||||
* @param A matrix.
|
||||
*
|
||||
*
|
||||
* @returns the sum of elements of `A`.
|
||||
*/
|
||||
double sum(const mat* A) {
|
||||
|
|
Loading…
Reference in New Issue