update: CVE paper,
add: predict_dim doc, altered method check to avid "may not initialized" warning
This commit is contained in:
parent
8761407cad
commit
b71898a5bc
|
@ -28,7 +28,7 @@ predict_dim_cv <- function(object) {
|
||||||
k = as.integer(names(which.min(MSE)))
|
k = as.integer(names(which.min(MSE)))
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
# TODO: write doc
|
|
||||||
predict_dim_elbow <- function(object) {
|
predict_dim_elbow <- function(object) {
|
||||||
# extract original data from object (cve result)
|
# extract original data from object (cve result)
|
||||||
X <- object$X
|
X <- object$X
|
||||||
|
@ -122,24 +122,33 @@ predict_dim_wilcoxon <- function(object, p.value = 0.05) {
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
#' \code{"TODO: @Lukas"}
|
#' Estimate Dimension of Reduction Space.
|
||||||
|
#'
|
||||||
|
#' This function estimates the dimension of the mean dimension reduction space,
|
||||||
|
#' i.e. number of columns of \eqn{B} matrix. The default method \code{'CV'}
|
||||||
|
#' performs cross-validation using \code{mars}. Given
|
||||||
|
#' \code{k = min.dim, ..., max.dim} a cross-validation via \code{mars} is
|
||||||
|
#' performed on the dataset \eqn{(Y i, B_k' X_i)_{i = 1, ..., n}} where
|
||||||
|
#' \eqn{B_k} is the \eqn{p \times k}{p x k} dimensional CVE estimate given
|
||||||
|
#' \eqn{k}. The estimated SDR dimension is the \eqn{k} where the
|
||||||
|
#' cross-validation mean squared error is the lowest. The method \code{'elbow'}
|
||||||
|
#' estimates the dimension via \eqn{k = argmin_k L_n(V_{p − k})} where
|
||||||
|
#' \eqn{V_{p − k}} is the CVE estimate of the orthogonal columnspace of
|
||||||
|
#' \eqn{B_k}. Method \code{'wilcoxon'} is similar to \code{'elbow'} but finds
|
||||||
|
#' the minimum using the wilcoxon-test.
|
||||||
#'
|
#'
|
||||||
#' @param object instance of class \code{cve} (result of \code{\link{cve}},
|
#' @param object instance of class \code{cve} (result of \code{\link{cve}},
|
||||||
#' \code{\link{cve.call}}).
|
#' \code{\link{cve.call}}).
|
||||||
#' @param method one of \code{"CV"}, \code{"elbow"} or \code{"wilcoxon"}.
|
#' @param method This parameter specify which method will be used in dimension
|
||||||
|
#' estimation. It provides three methods \code{'CV'} (default), \code{'elbow'},
|
||||||
|
#' and \code{'wilcoxon'} to estimate the dimension of the SDR.
|
||||||
#' @param ... ignored.
|
#' @param ... ignored.
|
||||||
#'
|
#'
|
||||||
#' @return list with \code{"k"} the predicted dimension and method dependent
|
#' @return list with
|
||||||
#' informatoin.
|
#' \describe{
|
||||||
#'
|
#' \item{}{cretirion of method for \code{k = min.dim, ..., max.dim}.}
|
||||||
#' @section Method cv:
|
#' \item{k}{estimated dimension as argmin over \eqn{k} of criterion.}
|
||||||
#' TODO: \code{"TODO: @Lukas"}.
|
#' }
|
||||||
#'
|
|
||||||
#' @section Method elbow:
|
|
||||||
#' TODO: \code{"TODO: @Lukas"}.
|
|
||||||
#'
|
|
||||||
#' @section Method wilcoxon:
|
|
||||||
#' TODO: \code{"TODO: @Lukas"}.
|
|
||||||
#'
|
#'
|
||||||
#' @examples
|
#' @examples
|
||||||
#' # create B for simulation
|
#' # create B for simulation
|
||||||
|
|
Binary file not shown.
|
@ -2,7 +2,7 @@
|
||||||
% Please edit documentation in R/predict_dim.R
|
% Please edit documentation in R/predict_dim.R
|
||||||
\name{predict_dim}
|
\name{predict_dim}
|
||||||
\alias{predict_dim}
|
\alias{predict_dim}
|
||||||
\title{\code{"TODO: @Lukas"}}
|
\title{Estimate Dimension of Reduction Space.}
|
||||||
\usage{
|
\usage{
|
||||||
predict_dim(object, ..., method = "CV")
|
predict_dim(object, ..., method = "CV")
|
||||||
}
|
}
|
||||||
|
@ -12,30 +12,31 @@ predict_dim(object, ..., method = "CV")
|
||||||
|
|
||||||
\item{...}{ignored.}
|
\item{...}{ignored.}
|
||||||
|
|
||||||
\item{method}{one of \code{"CV"}, \code{"elbow"} or \code{"wilcoxon"}.}
|
\item{method}{This parameter specify which method will be used in dimension
|
||||||
|
estimation. It provides three methods \code{'CV'} (default), \code{'elbow'},
|
||||||
|
and \code{'wilcoxon'} to estimate the dimension of the SDR.}
|
||||||
}
|
}
|
||||||
\value{
|
\value{
|
||||||
list with \code{"k"} the predicted dimension and method dependent
|
list with
|
||||||
informatoin.
|
\describe{
|
||||||
|
\item{}{cretirion of method for \code{k = min.dim, ..., max.dim}.}
|
||||||
|
\item{k}{estimated dimension as argmin over \eqn{k} of criterion.}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
\description{
|
\description{
|
||||||
\code{"TODO: @Lukas"}
|
This function estimates the dimension of the mean dimension reduction space,
|
||||||
|
i.e. number of columns of \eqn{B} matrix. The default method \code{'CV'}
|
||||||
|
performs cross-validation using \code{mars}. Given
|
||||||
|
\code{k = min.dim, ..., max.dim} a cross-validation via \code{mars} is
|
||||||
|
performed on the dataset \eqn{(Y i, B_k' X_i)_{i = 1, ..., n}} where
|
||||||
|
\eqn{B_k} is the \eqn{p \times k}{p x k} dimensional CVE estimate given
|
||||||
|
\eqn{k}. The estimated SDR dimension is the \eqn{k} where the
|
||||||
|
cross-validation mean squared error is the lowest. The method \code{'elbow'}
|
||||||
|
estimates the dimension via \eqn{k = argmin_k L_n(V_{p − k})} where
|
||||||
|
\eqn{V_{p − k}} is the CVE estimate of the orthogonal columnspace of
|
||||||
|
\eqn{B_k}. Method \code{'wilcoxon'} is similar to \code{'elbow'} but finds
|
||||||
|
the minimum using the wilcoxon-test.
|
||||||
}
|
}
|
||||||
\section{Method cv}{
|
|
||||||
|
|
||||||
TODO: \code{"TODO: @Lukas"}.
|
|
||||||
}
|
|
||||||
|
|
||||||
\section{Method elbow}{
|
|
||||||
|
|
||||||
TODO: \code{"TODO: @Lukas"}.
|
|
||||||
}
|
|
||||||
|
|
||||||
\section{Method wilcoxon}{
|
|
||||||
|
|
||||||
TODO: \code{"TODO: @Lukas"}.
|
|
||||||
}
|
|
||||||
|
|
||||||
\examples{
|
\examples{
|
||||||
# create B for simulation
|
# create B for simulation
|
||||||
B <- rep(1, 5) / sqrt(5)
|
B <- rep(1, 5) / sqrt(5)
|
||||||
|
|
|
@ -82,19 +82,17 @@ void cve(const mat *X, const mat *Y, const double h,
|
||||||
/* Compute losses */
|
/* Compute losses */
|
||||||
L = hadamard(-1.0, y1, y1, 1.0, copy(y2, L));
|
L = hadamard(-1.0, y1, y1, 1.0, copy(y2, L));
|
||||||
/* Compute initial loss */
|
/* Compute initial loss */
|
||||||
if (method == simple) {
|
if (method == weighted) {
|
||||||
loss_last = mean(L);
|
|
||||||
/* Calculate the scaling matrix S */
|
|
||||||
S = laplace(adjacence(L, Y, y1, D, W, gauss, S), workMem);
|
|
||||||
} else if (method == weighted) {
|
|
||||||
colSumsK = elemApply(colSumsK, '-', 1.0, colSumsK);
|
colSumsK = elemApply(colSumsK, '-', 1.0, colSumsK);
|
||||||
sumK = sum(colSumsK);
|
sumK = sum(colSumsK);
|
||||||
loss_last = dot(L, '*', colSumsK) / sumK;
|
loss_last = dot(L, '*', colSumsK) / sumK;
|
||||||
c = agility / sumK;
|
c = agility / sumK;
|
||||||
/* Calculate the scaling matrix S */
|
/* Calculate the scaling matrix S */
|
||||||
S = laplace(adjacence(L, Y, y1, D, K, gauss, S), workMem);
|
S = laplace(adjacence(L, Y, y1, D, K, gauss, S), workMem);
|
||||||
} else {
|
} else { /* simple */
|
||||||
// TODO: error handling!
|
loss_last = mean(L);
|
||||||
|
/* Calculate the scaling matrix S */
|
||||||
|
S = laplace(adjacence(L, Y, y1, D, W, gauss, S), workMem);
|
||||||
}
|
}
|
||||||
/* Gradient */
|
/* Gradient */
|
||||||
tmp1 = matrixprod(1.0, S, X, 0.0, tmp1);
|
tmp1 = matrixprod(1.0, S, X, 0.0, tmp1);
|
||||||
|
@ -139,14 +137,12 @@ void cve(const mat *X, const mat *Y, const double h,
|
||||||
/* Compute losses */
|
/* Compute losses */
|
||||||
L = hadamard(-1.0, y1, y1, 1.0, copy(y2, L));
|
L = hadamard(-1.0, y1, y1, 1.0, copy(y2, L));
|
||||||
/* Compute loss */
|
/* Compute loss */
|
||||||
if (method == simple) {
|
if (method == weighted) {
|
||||||
loss = mean(L);
|
|
||||||
} else if (method == weighted) {
|
|
||||||
colSumsK = elemApply(colSumsK, '-', 1.0, colSumsK);
|
colSumsK = elemApply(colSumsK, '-', 1.0, colSumsK);
|
||||||
sumK = sum(colSumsK);
|
sumK = sum(colSumsK);
|
||||||
loss = dot(L, '*', colSumsK) / sumK;
|
loss = dot(L, '*', colSumsK) / sumK;
|
||||||
} else {
|
} else { /* simple */
|
||||||
// TODO: error handling!
|
loss = mean(L);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check if step is appropriate, iff not reduce learning rate. */
|
/* Check if step is appropriate, iff not reduce learning rate. */
|
||||||
|
@ -179,15 +175,13 @@ void cve(const mat *X, const mat *Y, const double h,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (method == simple) {
|
if (method == weighted) {
|
||||||
/* Calculate the scaling matrix S */
|
|
||||||
S = laplace(adjacence(L, Y, y1, D, W, gauss, S), workMem);
|
|
||||||
} else if (method == weighted) {
|
|
||||||
/* Calculate the scaling matrix S */
|
/* Calculate the scaling matrix S */
|
||||||
S = laplace(adjacence(L, Y, y1, D, K, gauss, S), workMem);
|
S = laplace(adjacence(L, Y, y1, D, K, gauss, S), workMem);
|
||||||
c = agility / sumK; // n removed previousely
|
c = agility / sumK; // n removed previousely
|
||||||
} else {
|
} else { /* simple */
|
||||||
// TODO: error handling!
|
/* Calculate the scaling matrix S */
|
||||||
|
S = laplace(adjacence(L, Y, y1, D, W, gauss, S), workMem);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Gradient */
|
/* Gradient */
|
||||||
|
|
Loading…
Reference in New Issue