2
0
Fork 0

add: directions, predict,

add: momentum, weighted, slack, ...
fix: estimate.bandwidth, typos, ...
This commit is contained in:
Daniel Kapla 2019-11-20 19:03:21 +01:00
parent 0670bb976e
commit 063c4d638b
36 changed files with 1967 additions and 819 deletions

View File

@ -2,10 +2,10 @@ Package: CVE
Type: Package Type: Package
Title: Conditional Variance Estimator for Sufficient Dimension Reduction Title: Conditional Variance Estimator for Sufficient Dimension Reduction
Version: 0.2 Version: 0.2
Date: 2019-10-24 Date: 2019-11-13
Author: Loki Author: Daniel Kapla <daniel@kapla.at>, Lukas Fertl <lukas.fertl@chello.at>
Maintainer: Loki <loki@no.mail> Maintainer: Daniel Kapla <daniel@kapla.at>
Description: Implementation of the Conditional Variance Estimation (CVE) method. This package version is writen in pure R. Description: Implementation of the Conditional Variance Estimation (CVE) method.
License: GPL-3 License: GPL-3
Encoding: UTF-8 Encoding: UTF-8
RoxygenNote: 6.1.1 RoxygenNote: 6.1.1

View File

@ -1,17 +1,23 @@
# Generated by roxygen2: do not edit by hand # Generated by roxygen2: do not edit by hand
S3method(basis,cve)
S3method(directions,cve)
S3method(plot,cve) S3method(plot,cve)
S3method(predict,cve)
S3method(predict.dim,cve)
S3method(summary,cve) S3method(summary,cve)
export(basis)
export(cve) export(cve)
export(cve.call) export(cve.call)
export(cve.grid.search)
export(dataset) export(dataset)
export(directions)
export(elem.pairs) export(elem.pairs)
export(estimate.bandwidth) export(estimate.bandwidth)
export(null) export(null)
export(projTangentStiefl) export(predict.dim)
export(rStiefl) export(projTangentStiefel)
export(retractStiefl) export(rStiefel)
export(retractStiefel)
export(skew) export(skew)
export(sym) export(sym)
import(stats) import(stats)

View File

@ -48,6 +48,17 @@
#' \item "weighted" variation with addaptive weighting of slices. #' \item "weighted" variation with addaptive weighting of slices.
#' } #' }
#' @param ... Parameters passed on to \code{cve.call}. #' @param ... Parameters passed on to \code{cve.call}.
#'
#' @return dr is a S3 object of class \code{cve} with named properties:
#' \itemize{
#' \item X: Original training data,
#' \item Y: Responce of original training data,
#' \item method: Name of used method,
#' \item call: The method call
#' }
#' as well as indexed entries \code{dr[[k]]} storing the k-dimensional SDR
#' projection matrices.
#'
#' @examples #' @examples
#' library(CVE) #' library(CVE)
#' #'
@ -61,7 +72,7 @@
#' #'
#' # Call the CVE method. #' # Call the CVE method.
#' dr <- cve(Y ~ X) #' dr <- cve(Y ~ X)
#' round(dr[[2]]$B, 1) #' (B <- basis(dr, 2))
#' #'
#' @seealso For a detailed description of \code{formula} see #' @seealso For a detailed description of \code{formula} see
#' [\code{\link{formula}}]. #' [\code{\link{formula}}].
@ -91,24 +102,60 @@ cve <- function(formula, data, method = "simple", max.dim = 10L, ...) {
#' \code{\link{estimate.bandwidth}} (ignored if \code{h} is supplied). #' \code{\link{estimate.bandwidth}} (ignored if \code{h} is supplied).
#' @param X data matrix with samples in its rows. #' @param X data matrix with samples in its rows.
#' @param Y Responses (1 dimensional). #' @param Y Responses (1 dimensional).
#' @param k Dimension of lower dimensional projection, if \code{k} is given only the specified dimension \code{B} matrix is estimated. #' @param k Dimension of lower dimensional projection, if \code{k} is given
#' only the specified dimension \code{B} matrix is estimated.
#' @param min.dim lower bounds for \code{k}, (ignored if \code{k} is supplied). #' @param min.dim lower bounds for \code{k}, (ignored if \code{k} is supplied).
#' @param max.dim upper bounds for \code{k}, (ignored if \code{k} is supplied). #' @param max.dim upper bounds for \code{k}, (ignored if \code{k} is supplied).
#' @param tau Initial step-size. #' @param tau Initial step-size.
#' @param tol Tolerance for break condition. #' @param tol Tolerance for break condition.
#' @param epochs maximum number of optimization steps. #' @param epochs maximum number of optimization steps.
#' @param attempts number of arbitrary different starting points. #' @param attempts number of arbitrary different starting points.
#' @param logger a logger function (only for advanced user, significantly slows down the computation). #' @param logger a logger function (only for advanced user, significantly slows
#' down the computation).
#'
#' @return dr is a list which contains:
#' \itemize{
#' \item dir: dir[[d]] is the central space with d-dimension
#' d = 1, 2, ..., p reduced direction of different dimensions
#' \item y: the value of response
#' \item idx: the index of variables which survives after screening
#' \item max.dim: the largest dimensions of CS or CMS which have been calculated in mave function
#' \item ky: parameter used for DIM for selection
#' \item x: the original training data
#' }
#'
#' @rdname cve #' @rdname cve
#' @export #' @export
cve.call <- function(X, Y, method = "simple", cve.call <- function(X, Y, method = "simple",
nObs = sqrt(nrow(X)), h = NULL, nObs = sqrt(nrow(X)), h = NULL,
min.dim = 1L, max.dim = 10L, k = NULL, min.dim = 1L, max.dim = 10L, k = NULL,
tau = 1.0, tol = 1e-3, momentum = 0.0, tau = 1.0, tol = 1e-3,
slack = 0.0, gamma = 0.5,
V.init = NULL,
epochs = 50L, attempts = 10L, epochs = 50L, attempts = 10L,
logger = NULL) { logger = NULL) {
# get method bitmask
methods <- list(
"simple" = 0L,
"weighted" = 1L
)
method <- tolower(method)
if (!(method %in% names(methods))) {
stop('Got unknown method.')
}
method_bitmask <- methods[[method]]
# parameter checking # parameter checking
if (!is.numeric(momentum) || length(momentum) > 1L) {
stop("Momentum must be a number.")
}
if (!is.double(momentum)) {
momentum <- as.double(momentum)
}
if (momentum < 0.0 || momentum >= 1.0) {
stop("Momentum must be in [0, 1).")
}
if (!(is.matrix(X) && is.numeric(X))) { if (!(is.matrix(X) && is.numeric(X))) {
stop("Parameter 'X' should be a numeric matrices.") stop("Parameter 'X' should be a numeric matrices.")
} }
@ -125,12 +172,23 @@ cve.call <- function(X, Y, method = "simple",
stop("'X' is one dimensional, no need for dimension reduction.") stop("'X' is one dimensional, no need for dimension reduction.")
} }
if (missing(k) || is.null(k)) { if (!is.null(V.init)) {
if (!is.matrix(V.init)) {
stop("'V.init' must be a matrix.")
}
if (!all.equal(crossprod(V.init), diag(1, ncol(V.init)))) {
stop("'V.init' must be semi-orthogonal.")
}
if (ncol(X) != nrow(V.init) || ncol(X) <= ncol(V.init)) {
stop("Dimension missmatch of 'V.init' and 'X'")
}
min.dim <- max.dim <- ncol(X) - ncol(V.init)
attempts <- 0L
} else if (missing(k) || is.null(k)) {
min.dim <- as.integer(min.dim) min.dim <- as.integer(min.dim)
max.dim <- as.integer(min(max.dim, ncol(X) - 1L)) max.dim <- as.integer(min(max.dim, ncol(X) - 1L))
} else { } else {
min.dim <- as.integer(k) min.dim <- max.dim <- as.integer(k)
max.dim <- as.integer(k)
} }
if (min.dim > max.dim) { if (min.dim > max.dim) {
stop("'min.dim' bigger 'max.dim'.") stop("'min.dim' bigger 'max.dim'.")
@ -161,6 +219,16 @@ cve.call <- function(X, Y, method = "simple",
} else { } else {
tol <- as.double(tol) tol <- as.double(tol)
} }
if (!is.numeric(slack) || length(slack) > 1L || slack < 0.0) {
stop("Break condition slack 'slack' must be not negative number.")
} else {
slack <- as.double(slack)
}
if (!is.numeric(gamma) || length(gamma) > 1L || gamma <= 0.0 || gamma >= 1.0) {
stop("Stepsize reduction 'gamma' must be between 0 and 1.")
} else {
gamma <- as.double(gamma)
}
if (!is.numeric(epochs) || length(epochs) > 1L) { if (!is.numeric(epochs) || length(epochs) > 1L) {
stop("Parameter 'epochs' must be positive integer.") stop("Parameter 'epochs' must be positive integer.")
@ -170,6 +238,8 @@ cve.call <- function(X, Y, method = "simple",
if (epochs < 1L) { if (epochs < 1L) {
stop("Parameter 'epochs' must be at least 1L.") stop("Parameter 'epochs' must be at least 1L.")
} }
if (is.null(V.init)) {
if (!is.numeric(attempts) || length(attempts) > 1L) { if (!is.numeric(attempts) || length(attempts) > 1L) {
stop("Parameter 'attempts' must be positive integer.") stop("Parameter 'attempts' must be positive integer.")
} else if (!is.integer(attempts)) { } else if (!is.integer(attempts)) {
@ -178,6 +248,7 @@ cve.call <- function(X, Y, method = "simple",
if (attempts < 1L) { if (attempts < 1L) {
stop("Parameter 'attempts' must be at least 1L.") stop("Parameter 'attempts' must be at least 1L.")
} }
}
if (is.function(logger)) { if (is.function(logger)) {
loggerEnv <- environment(logger) loggerEnv <- environment(logger)
@ -189,40 +260,28 @@ cve.call <- function(X, Y, method = "simple",
method <- tolower(method) method <- tolower(method)
call <- match.call() call <- match.call()
dr <- list() dr <- list()
dr$res <- list()
for (k in min.dim:max.dim) { for (k in min.dim:max.dim) {
if (estimate) { if (estimate) {
h <- estimate.bandwidth(X, k, nObs) h <- estimate.bandwidth(X, k, nObs)
} }
if (method == 'simple') { dr.k <- .Call('cve', PACKAGE = 'CVE',
dr.k <- .Call('cve_simple', PACKAGE = 'CVE',
X, Y, k, h, X, Y, k, h,
tau, tol, method_bitmask,
V.init,
momentum, tau, tol,
slack, gamma,
epochs, attempts, epochs, attempts,
logger, loggerEnv) logger, loggerEnv)
# dr.k <- cve_simple(X, Y, k, nObs = nObs, ...)
# } else if (method == 'linesearch') {
# dr.k <- cve_linesearch(X, Y, k, nObs = nObs, ...)
# } else if (method == 'rcg') {
# dr.k <- cve_rcg(X, Y, k, nObs = nObs, ...)
# } else if (method == 'momentum') {
# dr.k <- cve_momentum(X, Y, k, nObs = nObs, ...)
# } else if (method == 'rmsprob') {
# dr.k <- cve_rmsprob(X, Y, k, nObs = nObs, ...)
# } else if (method == 'sgdrmsprob') {
# dr.k <- cve_sgdrmsprob(X, Y, k, nObs = nObs, ...)
# } else if (method == 'sgd') {
# dr.k <- cve_sgd(X, Y, k, nObs = nObs, ...)
} else {
stop('Got unknown method.')
}
dr.k$B <- null(dr.k$V) dr.k$B <- null(dr.k$V)
dr.k$loss <- mean(dr.k$L) dr.k$loss <- mean(dr.k$L)
dr.k$h <- h dr.k$h <- h
dr.k$k <- k dr.k$k <- k
class(dr.k) <- "cve.k" class(dr.k) <- "cve.k"
dr[[k]] <- dr.k dr$res[[as.character(k)]] <- dr.k
} }
# augment result information # augment result information
@ -236,22 +295,23 @@ cve.call <- function(X, Y, method = "simple",
#' Loss distribution elbow plot. #' Loss distribution elbow plot.
#' #'
#' Boxplots of the loss from \code{min.dim} to \code{max.dim} \code{k} values.
#'
#' @param x Object of class \code{"cve"} (result of [\code{\link{cve}}]). #' @param x Object of class \code{"cve"} (result of [\code{\link{cve}}]).
#' @param ... Pass through parameters to [\code{\link{plot}}] and #' @param ... Pass through parameters to [\code{\link{plot}}] and
#' [\code{\link{lines}}] #' [\code{\link{lines}}]
#' #'
#' @seealso see \code{\link{par}} for graphical parameters to pass through #' @seealso see \code{\link{par}} for graphical parameters to pass through
#' as well as \code{\link{plot}}, the standard plot utility. #' as well as \code{\link{plot}}, the standard plot utility.
#' @importFrom graphics plot lines points
#' @method plot cve #' @method plot cve
#' Boxplots of the loss from \code{min.dim} to \code{max.dim} \code{k} values. #' @importFrom graphics plot lines points
#' @export #' @export
plot.cve <- function(x, ...) { plot.cve <- function(x, ...) {
L <- c() L <- c()
k <- c() k <- c()
for (dr.k in x) { for (dr.k in x$res) {
if (class(dr.k) == 'cve.k') { if (class(dr.k) == 'cve.k') {
k <- c(k, paste0(dr.k$k)) k <- c(k, as.character(dr.k$k))
L <- c(L, dr.k$L) L <- c(L, dr.k$L)
} }
} }
@ -260,11 +320,10 @@ plot.cve <- function(x, ...) {
xlab = "SDR dimension", xlab = "SDR dimension",
ylab = "Sample loss distribution", ylab = "Sample loss distribution",
names = k) names = k)
# lines(apply(L, 2, mean)) # TODO: ?
} }
#' Prints a summary of a \code{cve} result. #' Prints a summary of a \code{cve} result.
#' @param object Instance of 'cve' as return of \code{cve}. #' @param object Instance of 'cve' as returned by \code{cve}.
#' @method summary cve #' @method summary cve
#' @export #' @export
summary.cve <- function(object, ...) { summary.cve <- function(object, ...) {
@ -272,11 +331,151 @@ summary.cve <- function(object, ...) {
'\n', '\n',
'Dataset size: ', nrow(object$X), '\n', 'Dataset size: ', nrow(object$X), '\n',
'Data Dimension: ', ncol(object$X), '\n', 'Data Dimension: ', ncol(object$X), '\n',
'SDR Dimension: ', object$k, '\n', # 'SDR Dimension: ', object$k, '\n',
'loss: ', object$loss, '\n', # 'loss: ', object$loss, '\n',
'\n', '\n',
'Called via:\n', 'Called via:\n',
' ', ' ',
sep='') sep='')
print(object$call) print(object$call)
L <- c()
k <- c()
for (dr.k in object$res) {
if (class(dr.k) == 'cve.k') {
k <- c(k, as.character(dr.k$k))
L <- c(L, dr.k$L)
}
}
L <- matrix(L, ncol = length(k))
S <- apply(L, 2, summary)
colnames(S) <- k
cat('\n')
print(S)
}
#' @export
directions <- function(dr, k) {
UseMethod("directions")
}
#' Computes projected training data \code{X} for given dimension `k`.
#'
#' @param dr Instance of 'cve' as returned by \code{cve}.
#' @param k SDR dimension to use for projection.
#'
#' @method directions cve
#' @aliases directions directions.cve
#' @export
directions.cve <- function(dr, k) {
if (!(k %in% names(dr$res))) {
stop("SDR directions for requested dimension `k` not computed.")
}
return(dr$X %*% dr$res[[as.character(k)]]$B)
}
#' @export
basis <- function(dr, k) {
UseMethod("basis")
}
#' Gets estimated SDR basis.
#'
#' @param dr Instance of 'cve' as returned by \code{cve}.
#' @param k SDR dimension of requested basis, if not given a list of all
#' computed basis is returned.
#'
#' @return List of basis matrices, or the SDR basis for supplied dimension `k`.
#'
#' @method basis cve
#' @aliases basis basis.cve
#' @export
basis.cve <- function(dr, k) {
if (missing(k)) {
Bs <- list()
for (k in names(dr$res)) {
Bs[[k]] <- dr$res[[k]]$B
}
return(Bs)
} else if (k %in% names(dr$res)) {
return(dr$res[[as.character(k)]]$B)
} else {
stop("Requested dimenion `k` not computed.")
}
}
#' Predict method for CVE Fits.
#'
#' Predict responces using reduced data with \code{\link{mars}}.
#'
#' @param object instance of class \code{cve} (result of \code{cve},
#' \code{cve.call}).
#' @param X.new Matrix of the new data to be predicted.
#' @param dim dimension of SDR space to be used for data projecition.
#' @param ... further arguments passed to \code{\link{mars}}.
#'
#' @return prediced response of data \code{X.new}.
#'
#' @seealso \code{\link{cve}}, \code{\link{cve.call}} or \pkg{\link{mars}}.
#'
#' @examples
#' TODO:
#'
#' @aliases predict.cve
#' @rdname predict.cve
#'
#' @method predict cve
#' @export
predict.cve <- function(object, X.new, dim = NULL, ...) {
library(mda)
if (!is.matrix(X.new)) {
X.new <- matrix(X.new, nrow = 1L)
}
B <- dr$res[[as.character(dim)]]$B
model <- mars(object$X %*% B, object$Y)
predict(model, X.new %*% B)
}
#' @export
predict.dim <- function(dr) {
UseMethod("predict.dim")
}
#' @method predict.dim cve
#' @export
predict.dim.cve <- function(dr) {
library(mda)
# Get centered training data and dimensions
X <- scale(dr$X, center = TRUE, scale = FALSE)
n <- nrow(dr$X) # umber of training data samples
Sigma <- (1 / n) * crossprod(X, X)
eig <- eigen(Sigma)
Sigma_root <- eig$vectors %*% tcrossprod(diag(sqrt(eig$values)), eig$vectors)
X <- X %*% solve(Sigma_root)
pred <- matrix(0, n, length(dr$res))
colnames(pred) <- names(dr$res)
for (dr.k in dr$res) {
# get "name" of current dimension
k <- as.character(dr.k$k)
# Project dataset with current SDR basis
X.proj <- X %*% dr.k$B
for (i in 1:n) {
model <- mars(X.proj[-i, ], dr$Y[-i])
pred[i, k] <- predict(model, X.proj[i, , drop = F])
}
}
MSE <- colMeans((pred - dr$Y)^2)
return(list(
MSE = MSE,
k = as.integer(names(which.min(MSE)))
))
} }

View File

@ -2,8 +2,8 @@
#' #'
#' Estimates a bandwidth \code{h} according #' Estimates a bandwidth \code{h} according
#' \deqn{% #' \deqn{%
#' h = \chi_{k}^{-1}\left(\frac{nObs - 1}{n-1}\right)\frac{2 tr(\Sigma)}{p}}{% #' h = (2 * tr(\Sigma) / p) * (1.2 * n^{-1 / (4 + k)})^2}{%
#' h = qchisq( (nObs - 1)/(n - 1), k ) * (2 tr(\Sigma) / p)} #' h = (2 * tr(Sigma) / p) * (1.2 * n^(-1 / (4 + k)))^2}
#' with \eqn{n} the sample size, \eqn{p} its dimension #' with \eqn{n} the sample size, \eqn{p} its dimension
#' (\code{n <- nrow(X); p <- ncol(X)}) and the covariance-matrix \eqn{\Sigma} #' (\code{n <- nrow(X); p <- ncol(X)}) and the covariance-matrix \eqn{\Sigma}
#' which is \code{(n-1)/n} times the sample covariance estimate. #' which is \code{(n-1)/n} times the sample covariance estimate.
@ -12,15 +12,15 @@
#' @param k Dimension of lower dimensional projection. #' @param k Dimension of lower dimensional projection.
#' @param nObs number of points in a slice, see \eqn{nObs} in CVE paper. #' @param nObs number of points in a slice, see \eqn{nObs} in CVE paper.
#' #'
#' @seealso [\code{\link{qchisq}}] #' @return Estimated bandwidth \code{h}.
#'
#' @export #' @export
estimate.bandwidth <- function(X, k, nObs) { estimate.bandwidth <- function(X, k, nObs) {
n <- nrow(X) n <- nrow(X)
p <- ncol(X) p <- ncol(X)
X_centered <- scale(X, center = TRUE, scale = FALSE) X_centered <- scale(X, center = TRUE, scale = FALSE)
Sigma <- (1 / n) * t(X_centered) %*% X_centered Sigma <- crossprod(X_centered, X_centered) / n
quantil <- qchisq((nObs - 1) / (n - 1), k) return((2 * sum(diag(Sigma)) / p) * (1.2 * n^(-1 / (4 + k)))^2)
return(2 * quantil * sum(diag(Sigma)) / p)
} }

View File

@ -1,43 +0,0 @@
#' Performs a grid search for parameters over a parameter grid.
#' @examples
#' args <- list(
#' h = c(0.05, 0.1, 0.2),
#' method = c("simple", "sgd"),
#' tau = c(0.5, 0.1, 0.01)
#' )
#' cve.grid.search(args)
#' @export
cve.grid.search <- function(X, Y, k, args) {
args$stringsAsFactors = FALSE
args$KEEP.OUT.ATTRS = FALSE
grid <- do.call(expand.grid, args)
grid.length <- length(grid[[1]])
print(grid)
for (i in 1:grid.length) {
arguments <- as.list(grid[i, ])
# Set required arguments
arguments$X <- X
arguments$Y <- Y
arguments$k <- k
# print(arguments)
dr <- do.call(cve.call, arguments)
print(dr$loss)
}
}
# ds <- dataset()
# X <- ds$X
# Y <- ds$Y
# (k <- ncol(ds$B))
# args <- list(
# h = c(0.05, 0.1, 0.2),
# method = c("simple", "sgd"),
# tau = c(0.5, 0.1, 0.01),
# attempts = c(1L)
# )
# cve.grid.search(X, Y, k, args)

View File

@ -6,17 +6,17 @@
#' @examples #' @examples
#' V <- rStiefel(6, 4) #' V <- rStiefel(6, 4)
#' @export #' @export
rStiefl <- function(p, q) { rStiefel <- function(p, q) {
return(qr.Q(qr(matrix(rnorm(p * q, 0, 1), p, q)))) return(qr.Q(qr(matrix(rnorm(p * q, 0, 1), p, q))))
} }
#' Retraction to the manifold. #' Retraction to the manifold.
#' #'
#' @param A matrix. #' @param A matrix.
#' @return `(p, q)` semi-orthogonal matrix, aka element of the Stiefl manifold. #' @return `(p, q)` semi-orthogonal matrix, aka element of the Stiefel manifold.
#' @keywords internal #' @keywords internal
#' @export #' @export
retractStiefl <- function(A) { retractStiefel <- function(A) {
return(qr.Q(qr(A))) return(qr.Q(qr(A)))
} }
@ -40,14 +40,14 @@ sym <- function(A) {
0.5 * (A + t(A)) 0.5 * (A + t(A))
} }
#' Orthogonal Projection onto the tangent space of the stiefl manifold. #' Orthogonal Projection onto the tangent space of the stiefel manifold.
#' #'
#' @param V Point on the stiefl manifold. #' @param V Point on the stiefel manifold.
#' @param G matrix to be projected onto the tangent space at `V`. #' @param G matrix to be projected onto the tangent space at `V`.
#' @return `(p, q)` matrix as element of the tangent space at `V`. #' @return `(p, q)` matrix as element of the tangent space at `V`.
#' @keywords internal #' @keywords internal
#' @export #' @export
projTangentStiefl <- function(V, G) { projTangentStiefel <- function(V, G) {
Q <- diag(1, nrow(V)) - V %*% t(V) Q <- diag(1, nrow(V)) - V %*% t(V)
return(Q %*% G + V %*% skew(t(V) %*% G)) return(Q %*% G + V %*% skew(t(V) %*% G))
} }

21
CVE_C/man/basis.cve.Rd Normal file
View File

@ -0,0 +1,21 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CVE.R
\name{basis.cve}
\alias{basis.cve}
\alias{basis}
\title{Gets estimated SDR basis.}
\usage{
\method{basis}{cve}(dr, k)
}
\arguments{
\item{dr}{Instance of 'cve' as returned by \code{cve}.}
\item{k}{SDR dimension of requested basis, if not given a list of all
computed basis is returned.}
}
\value{
List of basis matrices, or the SDR basis for supplied dimension `k`.
}
\description{
Gets estimated SDR basis.
}

View File

@ -8,7 +8,8 @@
cve(formula, data, method = "simple", max.dim = 10L, ...) cve(formula, data, method = "simple", max.dim = 10L, ...)
cve.call(X, Y, method = "simple", nObs = sqrt(nrow(X)), h = NULL, cve.call(X, Y, method = "simple", nObs = sqrt(nrow(X)), h = NULL,
min.dim = 1L, max.dim = 10L, k = NULL, tau = 1, tol = 0.001, min.dim = 1L, max.dim = 10L, k = NULL, momentum = 0, tau = 1,
tol = 0.001, slack = 0, gamma = 0.5, V.init = NULL,
epochs = 50L, attempts = 10L, logger = NULL) epochs = 50L, attempts = 10L, logger = NULL)
} }
\arguments{ \arguments{
@ -31,15 +32,15 @@ supplied.}
\item{X}{data matrix with samples in its rows.} \item{X}{data matrix with samples in its rows.}
\item{Y}{Responces (1 dimensional).} \item{Y}{Responses (1 dimensional).}
\item{nObs}{parameter for choosing bandwidth \code{h} using \item{nObs}{parameter for choosing bandwidth \code{h} using
\code{\link{estimate.bandwidth}} (ignored if \code{h} is supplied).} \code{\link{estimate.bandwidth}} (ignored if \code{h} is supplied).}
\item{min.dim}{lower bounds for \code{k}, (ignored if \code{k} is supplied).} \item{min.dim}{lower bounds for \code{k}, (ignored if \code{k} is supplied).}
\item{k}{Dimension of lower dimensional projection, if given only the \item{k}{Dimension of lower dimensional projection, if \code{k} is given
specified dimension is estimated.} only the specified dimension \code{B} matrix is estimated.}
\item{tau}{Initial step-size.} \item{tau}{Initial step-size.}
@ -49,7 +50,30 @@ specified dimension is estimated.}
\item{attempts}{number of arbitrary different starting points.} \item{attempts}{number of arbitrary different starting points.}
\item{logger}{a logger function (only for addvanced user).} \item{logger}{a logger function (only for advanced user, significantly slows
down the computation).}
}
\value{
dr is a S3 object of class \code{cve} with named properties:
\itemize{
\item X: Original training data,
\item Y: Responce of original training data,
\item method: Name of used method,
\item call: The method call
}
as well as indexed entries \code{dr[[k]]} storing the k-dimensional SDR
projection matrices.
dr is a list which contains:
\itemize{
\item dir: dir[[d]] is the central space with d-dimension
d = 1, 2, ..., p reduced direction of different dimensions
\item y: the value of response
\item idx: the index of variables which survives after screening
\item max.dim: the largest dimensions of CS or CMS which have been calculated in mave function
\item ky: parameter used for DIM for selection
\item x: the original training data
}
} }
\description{ \description{
TODO: reuse of package description and details!!!! TODO: reuse of package description and details!!!!
@ -67,10 +91,10 @@ Y <- Y[, 1L]^2 + Y[, 2L]^2 + rnorm(n, 0, 0.3)
# Call the CVE method. # Call the CVE method.
dr <- cve(Y ~ X) dr <- cve(Y ~ X)
round(dr[[2]]$B, 1) (B <- basis(dr, 2))
} }
\seealso{ \seealso{
For a detailed description of the formula parameter see For a detailed description of \code{formula} see
[\code{\link{formula}}]. [\code{\link{formula}}].
} }

View File

@ -1,19 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gridSearch.R
\name{cve.grid.search}
\alias{cve.grid.search}
\title{Performs a grid search for parameters over a parameter grid.}
\usage{
cve.grid.search(X, Y, k, args)
}
\description{
Performs a grid search for parameters over a parameter grid.
}
\examples{
args <- list(
h = c(0.05, 0.1, 0.2),
method = c("simple", "sgd"),
tau = c(0.5, 0.1, 0.01)
)
cve.grid.search(args)
}

View File

@ -0,0 +1,17 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CVE.R
\name{directions.cve}
\alias{directions.cve}
\alias{directions}
\title{Computes projected training data \code{X} for given dimension `k`.}
\usage{
\method{directions}{cve}(dr, k)
}
\arguments{
\item{dr}{Instance of 'cve' as returned by \code{cve}.}
\item{k}{SDR dimension to use for projection.}
}
\description{
Computes projected training data \code{X} for given dimension `k`.
}

View File

@ -11,17 +11,17 @@ estimate.bandwidth(X, k, nObs)
\item{k}{Dimension of lower dimensional projection.} \item{k}{Dimension of lower dimensional projection.}
\item{nObs}{Expected number of points in a slice, see paper.} \item{nObs}{number of points in a slice, see \eqn{nObs} in CVE paper.}
}
\value{
Estimated bandwidth \code{h}.
} }
\description{ \description{
Estimates a propper bandwidth \code{h} according Estimates a bandwidth \code{h} according
\deqn{% \deqn{%
h = \chi_{k}^{-1}\left(\frac{nObs - 1}{n-1}\right)\frac{2 tr(\Sigma)}{p}}{% h = (2 * tr(\Sigma) / p) * (1.2 * n^{-1 / (4 + k)})^2}{%
h = qchisq( (nObs - 1)/(n - 1), k ) * (2 tr(\Sigma) / p)} h = (2 * tr(Sigma) / p) * (1.2 * n^(-1 / (4 + k)))^2}
with \eqn{n} the number of sample and \eqn{p} its dimension with \eqn{n} the sample size, \eqn{p} its dimension
(\code{n <- nrow(X); p <- ncol(X)}) and the covariance-matrix \eqn{\Sigma} (\code{n <- nrow(X); p <- ncol(X)}) and the covariance-matrix \eqn{\Sigma}
which is given by the standard maximum likelihood estimate. which is \code{(n-1)/n} times the sample covariance estimate.
}
\seealso{
[\code{\link{qchisq}}]
} }

View File

@ -13,7 +13,7 @@
[\code{\link{lines}}]} [\code{\link{lines}}]}
} }
\description{ \description{
Loss distribution elbow plot. Boxplots of the loss from \code{min.dim} to \code{max.dim} \code{k} values.
} }
\seealso{ \seealso{
see \code{\link{par}} for graphical parameters to pass through see \code{\link{par}} for graphical parameters to pass through

31
CVE_C/man/predict.cve.Rd Normal file
View File

@ -0,0 +1,31 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CVE.R
\name{predict.cve}
\alias{predict.cve}
\title{Predict method for CVE Fits.}
\usage{
\method{predict}{cve}(object, X.new, dim = NULL, ...)
}
\arguments{
\item{object}{instance of class \code{cve} (result of \code{cve},
\code{cve.call}).}
\item{X.new}{Matrix of the new data to be predicted.}
\item{dim}{dimension of SDR space to be used for data projecition.}
\item{...}{further arguments passed to \code{\link{mars}}.}
}
\value{
prediced response of data \code{X.new}.
}
\description{
Predict responces using reduced data with \code{\link{mars}}.
}
\examples{
TODO:
}
\seealso{
\code{\link{cve}}, \code{\link{cve.call}} or \pkg{\link{mars}}.
}

View File

@ -1,13 +1,13 @@
% Generated by roxygen2: do not edit by hand % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R % Please edit documentation in R/util.R
\name{projTangentStiefl} \name{projTangentStiefel}
\alias{projTangentStiefl} \alias{projTangentStiefel}
\title{Orthogonal Projection onto the tangent space of the stiefl manifold.} \title{Orthogonal Projection onto the tangent space of the stiefel manifold.}
\usage{ \usage{
projTangentStiefl(V, G) projTangentStiefel(V, G)
} }
\arguments{ \arguments{
\item{V}{Point on the stiefl manifold.} \item{V}{Point on the stiefel manifold.}
\item{G}{matrix to be projected onto the tangent space at `V`.} \item{G}{matrix to be projected onto the tangent space at `V`.}
} }
@ -15,6 +15,6 @@ projTangentStiefl(V, G)
`(p, q)` matrix as element of the tangent space at `V`. `(p, q)` matrix as element of the tangent space at `V`.
} }
\description{ \description{
Orthogonal Projection onto the tangent space of the stiefl manifold. Orthogonal Projection onto the tangent space of the stiefel manifold.
} }
\keyword{internal} \keyword{internal}

22
CVE_C/man/rStiefel.Rd Normal file
View File

@ -0,0 +1,22 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{rStiefel}
\alias{rStiefel}
\title{Draws a sample from the invariant measure on the Stiefel manifold \eqn{S(p, q)}.}
\usage{
rStiefel(p, q)
}
\arguments{
\item{p}{row dimension}
\item{q}{col dimension}
}
\value{
\code{p} times \code{q} semi-orthogonal matrix.
}
\description{
Draws a sample from the invariant measure on the Stiefel manifold \eqn{S(p, q)}.
}
\examples{
V <- rStiefel(6, 4)
}

View File

@ -1,22 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{rStiefl}
\alias{rStiefl}
\title{Samples uniform from the Stiefl Manifold.}
\usage{
rStiefl(p, q)
}
\arguments{
\item{p}{row dim.}
\item{q}{col dim.}
}
\value{
`(p, q)` semi-orthogonal matrix
}
\description{
Samples uniform from the Stiefl Manifold.
}
\examples{
V <- rStiefel(6, 4)
}

View File

@ -1,16 +1,16 @@
% Generated by roxygen2: do not edit by hand % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R % Please edit documentation in R/util.R
\name{retractStiefl} \name{retractStiefel}
\alias{retractStiefl} \alias{retractStiefel}
\title{Retraction to the manifold.} \title{Retraction to the manifold.}
\usage{ \usage{
retractStiefl(A) retractStiefel(A)
} }
\arguments{ \arguments{
\item{A}{matrix.} \item{A}{matrix.}
} }
\value{ \value{
`(p, q)` semi-orthogonal matrix, aka element of the Stiefl manifold. `(p, q)` semi-orthogonal matrix, aka element of the Stiefel manifold.
} }
\description{ \description{
Retraction to the manifold. Retraction to the manifold.

View File

@ -7,7 +7,7 @@
\method{summary}{cve}(object, ...) \method{summary}{cve}(object, ...)
} }
\arguments{ \arguments{
\item{object}{Instance of 'cve' as return of \code{cve}.} \item{object}{Instance of 'cve' as returned by \code{cve}.}
} }
\description{ \description{
Prints a summary of a \code{cve} result. Prints a summary of a \code{cve} result.

View File

@ -5,17 +5,22 @@ static inline double gaussKernel(const double x, const double scale) {
return exp(scale * x * x); return exp(scale * x * x);
} }
void cve_simple_sub(const int n, const int p, const int q, void cve_sub(const int n, const int p, const int q,
const double *X, const double *Y, const double h, const double *X, const double *Y, const double h,
const unsigned int method,
const double momentum,
const double tau_init, const double tol_init, const double tau_init, const double tol_init,
const double slack, const double gamma,
const int epochs, const int attempts, const int epochs, const int attempts,
double *V, double *L, double *V, double *L,
SEXP logger, SEXP loggerEnv) { SEXP logger, SEXP loggerEnv) {
int attempt, epoch, i, nn = (n * (n - 1)) / 2; int attempt = 0, epoch, i, nn = (n * (n - 1)) / 2;
double loss, loss_last, loss_best, err, tau; double loss, loss_last, loss_best, err, tau;
double tol = tol_init * sqrt((double)(2 * q)); double tol = tol_init * sqrt((double)(2 * q));
double gKscale = -0.5 / h; double gKscale = -0.5 / h;
double agility = -2.0 * (1.0 - momentum) / (h * h);
double c;
/* Create further intermediate or internal variables. */ /* Create further intermediate or internal variables. */
double *Q = (double*)R_alloc(p * p, sizeof(double)); double *Q = (double*)R_alloc(p * p, sizeof(double));
@ -23,8 +28,8 @@ void cve_simple_sub(const int n, const int p, const int q,
double *L_best = (double*)R_alloc(n, sizeof(double)); double *L_best = (double*)R_alloc(n, sizeof(double));
double *V_tau = (double*)R_alloc(p * q, sizeof(double)); double *V_tau = (double*)R_alloc(p * q, sizeof(double));
double *X_diff = (double*)R_alloc(nn * p, sizeof(double)); double *X_diff = (double*)R_alloc(nn * p, sizeof(double));
double *X_proj = (double*)R_alloc(nn * p, sizeof(double)); // TODO: needed? double *X_proj = (double*)R_alloc(nn * p, sizeof(double));
double *y1 = (double*)R_alloc(n , sizeof(double)); // TODO: needed? double *y1 = (double*)R_alloc(n, sizeof(double));
double *vecD = (double*)R_alloc(nn, sizeof(double)); double *vecD = (double*)R_alloc(nn, sizeof(double));
double *vecK = (double*)R_alloc(nn, sizeof(double)); double *vecK = (double*)R_alloc(nn, sizeof(double));
double *vecS = (double*)R_alloc(nn, sizeof(double)); double *vecS = (double*)R_alloc(nn, sizeof(double));
@ -32,6 +37,12 @@ void cve_simple_sub(const int n, const int p, const int q,
double *G = (double*)R_alloc(p * q, sizeof(double)); double *G = (double*)R_alloc(p * q, sizeof(double));
double *A = (double*)R_alloc(p * p, sizeof(double)); double *A = (double*)R_alloc(p * p, sizeof(double));
double *V_init = (void*)0;
if (attempts < 1) {
V_init = (double*)R_alloc(p * q, sizeof(double));
memcpy(V_init, V, p * q * sizeof(double));
}
/* Determine size of working memory used by subroutines. */ /* Determine size of working memory used by subroutines. */
const int workLen = getWorkLen(n, p, q); const int workLen = getWorkLen(n, p, q);
double *workMem = (double*)R_alloc(workLen, sizeof(double)); double *workMem = (double*)R_alloc(workLen, sizeof(double));
@ -39,14 +50,20 @@ void cve_simple_sub(const int n, const int p, const int q,
/* Compute X_diff, this is static for the entire algorithm. */ /* Compute X_diff, this is static for the entire algorithm. */
rowDiffs(X, n, p, X_diff); rowDiffs(X, n, p, X_diff);
for (attempt = 0; attempt < attempts; ++attempt) { do {
/* (Re)set learning rate. */ /* (Re)set learning rate. */
tau = tau_init; tau = tau_init;
/* Sample start value from stiefl manifold. */ /* Check if start value for `V` was supplied. */
rStiefl(p, q, V, workMem, workLen); if (V_init == (void*)0) {
/* Sample start value from stiefel manifold. */
rStiefel(p, q, V, workMem, workLen);
} else {
/* (Re)Set start value of `V` to `V_init`. */
memcpy(V, V_init, p * q * sizeof(double));
}
/* Create projection matrix for initial `V`. */ /* Create projection matrix `Q <- I - V V^T` for initial `V`. */
nullProj(V, p, q, Q); nullProj(V, p, q, Q);
/* Compute Distance vector. */ /* Compute Distance vector. */
@ -65,7 +82,7 @@ void cve_simple_sub(const int n, const int p, const int q,
/* Compute loss given the kernel vector and its column sums. /* Compute loss given the kernel vector and its column sums.
* Additionally the first momentum `y1` is computed and stored in * Additionally the first momentum `y1` is computed and stored in
* the working memory (only intermidiate result, needed for `vecS`). */ * the working memory (only intermidiate result, needed for `vecS`). */
loss_last = cost(n, Y, vecK, colSums, y1, L); loss_last = cost(method, n, Y, vecK, colSums, y1, L);
if (logger) { if (logger) {
callLogger(logger, loggerEnv, callLogger(logger, loggerEnv,
@ -74,16 +91,27 @@ void cve_simple_sub(const int n, const int p, const int q,
} }
/* Calc the scaling vector used for final computation of grad. */ /* Calc the scaling vector used for final computation of grad. */
scaling(n, Y, y1, L, vecD, vecK, colSums, vecS); scaling(method, n, Y, y1, L, vecD, vecK, colSums, vecS);
/* Compute the eucledian gradient `G`. */ /* Compute the eucledian gradient `G`. */
rowSweep(X_diff, nn, p, "*", vecS, X_proj); rowSweep(X_diff, nn, p, "*", vecS, X_proj);
crossprod(X_diff, nn, p, X_proj, nn, p, workMem); crossprod(X_diff, nn, p, X_proj, nn, p, workMem);
matrixprod(workMem, p, p, V, p, q, G); matrixprod(workMem, p, p, V, p, q, G);
scale(-2. / (((double)n) * h * h), G, p * q); // in-place if (method == CVE_METHOD_WEIGHTED) {
/* Compute summ of all kernel applied distances by summing the
* colSums of the kernel matrix. */
c = -(double)n; // to scale with sum(K) - n
for (i = 0; i < n; ++i) {
c += colSums[i];
}
// TODO: check for division by zero, but should not happen!!!
} else {
c = n; // TODO: move (init) up cause always the same ^^ ...
}
scale(agility / c, G, p * q); // in-place
/* Compute Skew-Symmetric matrix `A` used in Cayley transform. /* Compute Skew-Symmetric matrix `A` used in Cayley transform.
+ `A <- tau * (G V^T - V G^T) + 0 * A`*/ * `A <- tau * (G V^T - V G^T) + 0 * A`*/
skew(p, q, tau, G, V, 0.0, A); skew(p, q, tau, G, V, 0.0, A);
for (epoch = 0; epoch < epochs; ++epoch) { for (epoch = 0; epoch < epochs; ++epoch) {
@ -108,13 +136,13 @@ void cve_simple_sub(const int n, const int p, const int q,
/* Compute loss given the kernel vector and its column sums. /* Compute loss given the kernel vector and its column sums.
* Additionally the first momentum `y1` is computed and stored in * Additionally the first momentum `y1` is computed and stored in
* the working memory (only intermidiate result, needed for `vecS`). */ * the working memory (only intermidiate result, needed for vecS).*/
loss = cost(n, Y, vecK, colSums, y1, L); loss = cost(method, n, Y, vecK, colSums, y1, L);
/* Check if step is appropriate, iff not reduce learning rate. */ /* Check if step is appropriate, iff not reduce learning rate. */
if ((loss - loss_last) > 0.0) { if ((loss - loss_last) > loss_last * slack) {
tau *= 0.5; tau *= gamma;
scale(0.5, A, p * p); scale(gamma, A, p * p);
continue; continue;
} }
@ -139,16 +167,34 @@ void cve_simple_sub(const int n, const int p, const int q,
/* Continue computing the gradient. */ /* Continue computing the gradient. */
/* Calc the scaling vector used for final computation of grad. */ /* Calc the scaling vector used for final computation of grad. */
scaling(n, Y, y1, L, vecD, vecK, colSums, vecS); scaling(method, n, Y, y1, L, vecD, vecK, colSums, vecS);
/* Compute the eucledian gradient `G`. */ /* Compute the eucledian gradient `G`. */
rowSweep(X_diff, nn, p, "*", vecS, X_proj); rowSweep(X_diff, nn, p, "*", vecS, X_proj);
crossprod(X_diff, nn, p, X_proj, nn, p, workMem); crossprod(X_diff, nn, p, X_proj, nn, p, workMem);
matrixprod(workMem, p, p, V, p, q, G); // /* Update without momentum */
scale(-2. / (((double)n) * h * h), G, p * q); // in-place // matrixprod(workMem, p, p, V, p, q, G);
// scale(-2. / (((double)n) * h * h), G, p * q); // in-place
/* G <- momentum * G + agility * workMem V */
if (method == CVE_METHOD_WEIGHTED) {
/* Compute summ of all kernel applied distances by summing the
* colSums of the kernel matrix. */
c = -(double)n; // to scale with sum(K) - n
for (i = 0; i < n; ++i) {
c += colSums[i];
}
c = agility / c;
// TODO: check for division by zero, but should not happen!!!
} else {
c = agility / n;
}
F77_NAME(dgemm)("N", "N", &p, &q, &p,
&c, workMem, &p, V, &p,
&momentum, G, &p);
/* Compute Skew-Symmetric matrix `A` used in Cayley transform. /* Compute Skew-Symmetric matrix `A` used in Cayley transform.
+ `A <- tau * (G V^T - V G^T) + 0 * A`*/ * `A <- tau * (G V^T - V G^T) + 0 * A`*/
skew(p, q, tau, G, V, 0.0, A); skew(p, q, tau, G, V, 0.0, A);
} }
@ -158,7 +204,7 @@ void cve_simple_sub(const int n, const int p, const int q,
memcpy(V_best, V, p * q * sizeof(double)); memcpy(V_best, V, p * q * sizeof(double));
memcpy(L_best, L, n * sizeof(double)); memcpy(L_best, L, n * sizeof(double));
} }
} } while (++attempt < attempts);
memcpy(V, V_best, p * q * sizeof(double)); memcpy(V, V_best, p * q * sizeof(double));
memcpy(L, L_best, n * sizeof(double)); memcpy(L, L_best, n * sizeof(double));

View File

@ -13,10 +13,25 @@
#define CVE_MEM_CHUNK_SIZE 2032 #define CVE_MEM_CHUNK_SIZE 2032
#define CVE_MEM_CHUNK_SMALL 1016 #define CVE_MEM_CHUNK_SMALL 1016
void cve_simple_sub(const int n, const int p, const int q, /* Bis masks for method types */
#define CVE_METHOD_WEIGHTED 1
// typedef struct {
// unsigned int nrow;
// unsigned int ncol;
// unsigned int memsize;
// double *data;
// } mat;
// mat* Matrix(const unsigned int nrow, const unsigned int ncol);
void cve_sub(const int n, const int p, const int q,
const double *X, const double *Y, const double h, const double *X, const double *Y, const double h,
const unsigned int method,
const double momentum,
const double tau_init, const double tol_init, const double tau_init, const double tol_init,
const int epochs, const int attempts, const double slack, const double gamma,
const int epochs, int attempts,
double *V, double *L, double *V, double *L,
SEXP logger, SEXP loggerEnv); SEXP logger, SEXP loggerEnv);
@ -28,19 +43,21 @@ void callLogger(SEXP logger, SEXP env,
/* CVE sub-routines */ /* CVE sub-routines */
int getWorkLen(const int n, const int p, const int q); int getWorkLen(const int n, const int p, const int q);
double cost(const int n, double cost(const unsigned int method,
const int n,
const double *Y, const double *Y,
const double *vecK, const double *vecK,
const double *colSums, const double *colSums,
double *y1, double *L); double *y1, double *L);
void scaling(const int n, void scaling(const unsigned int method,
const int n,
const double *Y, const double *y1, const double *L, const double *Y, const double *y1, const double *L,
const double *vecD, const double *vecK, const double *vecD, const double *vecK,
const double *colSums, const double *colSums,
double *vecS); double *vecS);
/* rStiefl */ /* rStiefel */
void rStiefl(const int p, const int q, double *V, void rStiefel(const int p, const int q, double *V,
double *workMem, int workLen); double *workMem, int workLen);
/* MATRIX */ /* MATRIX */

View File

@ -16,13 +16,14 @@ int getWorkLen(const int n, const int p, const int q) {
} }
} }
double cost(const int n, double cost(const unsigned int method,
const int n,
const double *Y, const double *Y,
const double *vecK, const double *vecK,
const double *colSums, const double *colSums,
double *y1, double *L) { double *y1, double *L) {
int i, j, k; int i, j, k;
double tmp; double tmp, sum;
for (i = 0; i < n; ++i) { for (i = 0; i < n; ++i) {
y1[i] = Y[i]; y1[i] = Y[i];
@ -44,13 +45,23 @@ double cost(const int n,
} }
tmp = 0.0; tmp = 0.0;
if (method == CVE_METHOD_WEIGHTED) {
sum = 0.0;
for (i = 0; i < n; ++i) {
tmp += (colSums[i] - 1.0) * (L[i] -= y1[i] * y1[i]);
sum += colSums[i];
}
return tmp / (sum - (double)n); // TODO: check for division by zero!
} else {
for (i = 0; i < n; ++i) { for (i = 0; i < n; ++i) {
tmp += (L[i] -= y1[i] * y1[i]); tmp += (L[i] -= y1[i] * y1[i]);
} }
return tmp / (double)n; return tmp / (double)n;
} }
}
void scaling(const int n, void scaling(const unsigned int method,
const int n,
const double *Y, const double *y1, const double *L, const double *Y, const double *y1, const double *L,
const double *vecD, const double *vecK, const double *vecD, const double *vecK,
const double *colSums, const double *colSums,
@ -58,6 +69,16 @@ void scaling(const int n,
int i, j, k, nn = (n * (n - 1)) / 2; int i, j, k, nn = (n * (n - 1)) / 2;
double tmp; double tmp;
if (method == CVE_METHOD_WEIGHTED) {
for (k = j = 0; j < n; ++j) {
for (i = j + 1; i < n; ++i, ++k) {
tmp = Y[j] - y1[i];
vecS[k] = (L[i] - (tmp * tmp));
tmp = Y[i] - y1[j];
vecS[k] += (L[j] - (tmp * tmp));
}
}
} else {
for (k = j = 0; j < n; ++j) { for (k = j = 0; j < n; ++j) {
for (i = j + 1; i < n; ++i, ++k) { for (i = j + 1; i < n; ++i, ++k) {
tmp = Y[j] - y1[i]; tmp = Y[j] - y1[i];
@ -66,6 +87,7 @@ void scaling(const int n,
vecS[k] += (L[j] - (tmp * tmp)) / colSums[j]; vecS[k] += (L[j] - (tmp * tmp)) / colSums[j];
} }
} }
}
for (k = 0; k < nn; ++k) { for (k = 0; k < nn; ++k) {
vecS[k] *= vecK[k] * vecD[k]; vecS[k] *= vecK[k] * vecD[k];

View File

@ -1,6 +1,6 @@
#include "cve.h" #include "cve.h"
// SEXP rStiefl_c(SEXP pin, SEXP qin) { // SEXP rStiefel_c(SEXP pin, SEXP qin) {
// int p = asInteger(pin); // int p = asInteger(pin);
// int q = asInteger(qin); // int q = asInteger(qin);
@ -9,19 +9,22 @@
// int workLen = 2 * (p + 1) * q; // int workLen = 2 * (p + 1) * q;
// double *workMem = (double*)R_alloc(workLen, sizeof(double)); // double *workMem = (double*)R_alloc(workLen, sizeof(double));
// rStiefl(p, q, REAL(Vout), workMem, workLen); // rStiefel(p, q, REAL(Vout), workMem, workLen);
// UNPROTECT(1); // UNPROTECT(1);
// return Vout; // return Vout;
// } // }
SEXP cve_simple(SEXP X, SEXP Y, SEXP k, SEXP h, SEXP cve(SEXP X, SEXP Y, SEXP k, SEXP h,
SEXP tau, SEXP tol, SEXP method,
SEXP V, // initial
SEXP momentum, SEXP tau, SEXP tol,
SEXP slack, SEXP gamma,
SEXP epochs, SEXP attempts, SEXP epochs, SEXP attempts,
SEXP logger, SEXP loggerEnv) { SEXP logger, SEXP loggerEnv) {
/* Handle logger parameter, set to NULL pointer if not a function. */ /* Handle logger parameter, set to NULL pointer if not a function. */
if (!(isFunction(logger) && isEnvironment(loggerEnv))) { if (!(isFunction(logger) && isEnvironment(loggerEnv))) {
logger = (SEXP)0; logger = (void*)0;
} }
/* Get dimensions. */ /* Get dimensions. */
@ -30,20 +33,31 @@ SEXP cve_simple(SEXP X, SEXP Y, SEXP k, SEXP h,
int q = p - asInteger(k); int q = p - asInteger(k);
/* Convert types if needed. */ /* Convert types if needed. */
// TODO: // TODO: implement! (or leave in calling R code?)
/* Create output list. */ /* Create output list. */
SEXP Vout = PROTECT(allocMatrix(REALSXP, p, q)); SEXP Vout = PROTECT(allocMatrix(REALSXP, p, q));
SEXP Lout = PROTECT(allocVector(REALSXP, n)); SEXP Lout = PROTECT(allocVector(REALSXP, n));
/* Check `attempts`, if not positive use passed values of `V` as
* optimization start value without further attempts.
* Therefor, copy from `V` to `Vout`. */
if (asInteger(attempts) < 1L) {
// TODO: Check for
memcpy(REAL(Vout), REAL(V), p * q * sizeof(double));
}
/* Call CVE simple subroutine. */ /* Call CVE simple subroutine. */
cve_simple_sub(n, p, q, cve_sub(n, p, q,
REAL(X), REAL(Y), asReal(h), REAL(X), REAL(Y), asReal(h),
asReal(tau), asReal(tol), asInteger(method),
asReal(momentum), asReal(tau), asReal(tol),
asReal(slack), asReal(gamma),
asInteger(epochs), asInteger(attempts), asInteger(epochs), asInteger(attempts),
REAL(Vout), REAL(Lout), REAL(Vout), REAL(Lout),
logger, loggerEnv); logger, loggerEnv);
/* Build output list object with names "V", "L" */
SEXP out = PROTECT(allocVector(VECSXP, 2)); SEXP out = PROTECT(allocVector(VECSXP, 2));
SET_VECTOR_ELT(out, 0, Vout); SET_VECTOR_ELT(out, 0, Vout);
SET_VECTOR_ELT(out, 1, Lout); SET_VECTOR_ELT(out, 1, Lout);

View File

@ -3,25 +3,22 @@
#include <stdlib.h> // for NULL #include <stdlib.h> // for NULL
#include <R_ext/Rdynload.h> #include <R_ext/Rdynload.h>
/* FIXME:
Check these declarations against the C/Fortran source code.
*/
/* .Call calls */ /* .Call calls */
extern SEXP cve_simple(SEXP X, SEXP Y, SEXP k, extern SEXP cve(SEXP X, SEXP Y, SEXP k, SEXP h,
SEXP h, SEXP method,
SEXP tau, SEXP tol, SEXP V, // initial
SEXP momentum, SEXP tau, SEXP tol,
SEXP slack, SEXP gamma,
SEXP epochs, SEXP attempts, SEXP epochs, SEXP attempts,
SEXP logger, SEXP loggerEnv); SEXP logger, SEXP loggerEnv);
static const R_CallMethodDef CallEntries[] = { static const R_CallMethodDef CallEntries[] = {
{"cve_simple", (DL_FUNC) &cve_simple, 10}, {"cve", (DL_FUNC) &cve, 15},
{NULL, NULL, 0} {NULL, NULL, 0}
}; };
/* Restrict C entrypoints to registered routines. */ /* Restrict C entrypoints to registered routines. */
void R_initCVE(DllInfo *dll) void R_initCVE(DllInfo *dll) {
{
R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_registerRoutines(dll, NULL, CallEntries, NULL, NULL);
R_useDynamicSymbols(dll, FALSE); R_useDynamicSymbols(dll, FALSE);
} }

View File

@ -1,5 +1,15 @@
#include "cve.h" #include "cve.h"
// mat* Matrix(const unsigned int nrow, const unsigned int ncol) {
// mat* newMat = (mat*)R_alloc(1, sizeof(mat));
// newMat->nrow = nrow;
// newMat->ncol = ncol;
// newMat->memsize = nrow * ncol;
// newMat->data = (double*)R_alloc(nrow * ncol, sizeof(double));
// return newMat;
// }
double norm(const double *A, const int nrow, const int ncol, double norm(const double *A, const int nrow, const int ncol,
const char *type) { const char *type) {
int i, nelem = nrow * ncol; int i, nelem = nrow * ncol;
@ -86,7 +96,7 @@ void scale(const double s, double *A, const int nelem) {
} }
} }
// A dence skwe-symmetric rank 2 update. // A dence skew-symmetric rank 2 update.
// Perform the update // Perform the update
// C := alpha (A * B^T - B * A^T) + beta C // C := alpha (A * B^T - B * A^T) + beta C
void skew(const int nrow, const int ncol, void skew(const int nrow, const int ncol,

View File

@ -49,7 +49,15 @@
// return Qout; // return Qout;
// } // }
void rStiefl(const int p, const int q, double *V, /**
* Draws a sample from invariant measure on the Stiefel manifold \eqn{S(p, q)}.
*
* @param p row dimension
* @param q col dimension
* @return \code{p} times \code{q} semi-orthogonal matrix.
* `V <- qr.Q(qr(matrix(rnorm(p * q, 0, 1), p, q)))`
*/
void rStiefel(const int p, const int q, double *V,
double *workMem, int workLen) { double *workMem, int workLen) {
int i, j, info; int i, j, info;
int pq = p * q; int pq = p * q;

View File

@ -1,6 +1,29 @@
#include "cve.h" #include "cve.h"
/* C[, j] = A[, j] * v for each j = 1 to ncol */ #define ROW_SWEEP_ALG(op) \
/* Iterate `(block_size_i, ncol)` submatrix blocks. */ \
for (i = 0; i < nrow; i += block_size_i) { \
/* Set `A` and `C` to block beginning. */ \
A = A_block; \
C = C_block; \
/* Get current block's row size. */ \
block_size_i = nrow - i; \
if (block_size_i > block_size) { \
block_size_i = block_size; \
} \
/* Perform element wise operation for block. */ \
for (; A < A_end; A += nrow, C += nrow) { \
for (j = 0; j < block_size_i; ++j) { \
C[j] = (A[j]) op (v[j]); \
} \
} \
/* Step one block forth. */ \
A_block += block_size_i; \
C_block += block_size_i; \
v += block_size_i; \
}
/* C[, j] = A[, j] op v for each j = 1 to ncol with op as one of +, -, *, / */
void rowSweep(const double *A, const int nrow, const int ncol, void rowSweep(const double *A, const int nrow, const int ncol,
const char* op, const char* op,
const double *v, // vector of length nrow const double *v, // vector of length nrow
@ -17,92 +40,12 @@ void rowSweep(const double *A, const int nrow, const int ncol,
} }
if (*op == '+') { if (*op == '+') {
// Iterate `(block_size_i, ncol)` submatrix blocks. ROW_SWEEP_ALG(+)
for (i = 0; i < nrow; i += block_size_i) {
// Set `A` and `C` to block beginning.
A = A_block;
C = C_block;
// Get current block's row size.
block_size_i = nrow - i;
if (block_size_i > block_size) {
block_size_i = block_size;
}
// Perform element wise operation for block.
for (; A < A_end; A += nrow, C += nrow) {
for (j = 0; j < block_size_i; ++j) {
C[j] = A[j] + v[j]; // FUN = '+'
}
}
// Step one block forth.
A_block += block_size_i;
C_block += block_size_i;
v += block_size_i;
}
} else if (*op == '-') { } else if (*op == '-') {
// Iterate `(block_size_i, ncol)` submatrix blocks. ROW_SWEEP_ALG(-)
for (i = 0; i < nrow; i += block_size_i) {
// Set `A` and `C` to block beginning.
A = A_block;
C = C_block;
// Get current block's row size.
block_size_i = nrow - i;
if (block_size_i > block_size) {
block_size_i = block_size;
}
// Perform element wise operation for block.
for (; A < A_end; A += nrow, C += nrow) {
for (j = 0; j < block_size_i; ++j) {
C[j] = A[j] - v[j]; // FUN = '-'
}
}
// Step one block forth.
A_block += block_size_i;
C_block += block_size_i;
v += block_size_i;
}
} else if (*op == '*') { } else if (*op == '*') {
// Iterate `(block_size_i, ncol)` submatrix blocks. ROW_SWEEP_ALG(*)
for (i = 0; i < nrow; i += block_size_i) {
// Set `A` and `C` to block beginning.
A = A_block;
C = C_block;
// Get current block's row size.
block_size_i = nrow - i;
if (block_size_i > block_size) {
block_size_i = block_size;
}
// Perform element wise operation for block.
for (; A < A_end; A += nrow, C += nrow) {
for (j = 0; j < block_size_i; ++j) {
C[j] = A[j] * v[j]; // FUN = '*'
}
}
// Step one block forth.
A_block += block_size_i;
C_block += block_size_i;
v += block_size_i;
}
} else if (*op == '/') { } else if (*op == '/') {
// Iterate `(block_size_i, ncol)` submatrix blocks. ROW_SWEEP_ALG(/)
for (i = 0; i < nrow; i += block_size_i) {
// Set `A` and `C` to block beginning.
A = A_block;
C = C_block;
// Get current block's row size.
block_size_i = nrow - i;
if (block_size_i > block_size) {
block_size_i = block_size;
}
// Perform element wise operation for block.
for (; A < A_end; A += nrow, C += nrow) {
for (j = 0; j < block_size_i; ++j) {
C[j] = A[j] / v[j]; // FUN = '/'
}
}
// Step one block forth.
A_block += block_size_i;
C_block += block_size_i;
v += block_size_i;
}
} }
} }

View File

@ -0,0 +1,189 @@
library("mda") #library for mars
local_linear<-function(x,h,dat,beta){
Y<-dat[,1]
X<-dat[,-1]
N<-length(Y)
X<-X%*%beta
x<-x%*%beta#beta%*%x
D_mat<-cbind(rep(1,N),X)
if (is.vector(X)){
dim<-1
d<-abs(X-rep(x,N))
}
else{
dim<-length(X[1,])
d<-sqrt(apply(X-t(matrix(rep(x,N),dim,N)),1,norm2))
}
K<-diag(dnorm(d/h)/dnorm(0))
pred<-c(1,x)%*%solve(t(D_mat)%*%K%*%D_mat)%*%t(D_mat)%*%K%*%Y
return(pred)
}
##### performs estimation of small dimesnion by CV with local linear forward regression
est_dim_CV<-function(Blist,dat,h_loclin=NULL,dim.max,median_use=F,method_mars=F){
#standardize regressors by symmetric root of inverse covariance mat
Sig<-est_varmat(dat[,-1])
eig_dec<-eigen(Sig)
Sroot_inv<-eig_dec$vectors%*%((diag(eig_dec$values^(-1/2))))%*%t(eig_dec$vectors)
dat[,-1]<-as.matrix(dat[,-1])%*%Sroot_inv
N<-length(dat[,1])
dim<-length(dat[1,-1])
MSE<-mat.or.vec(N,dim.max)
for(u in 1:dim.max){
beta<-Blist[[u]]
if(is.null(h_loclin)){
#h_loclin<-(1/N)^(1/(3+2*u))
h_loclin<-1.2*N^(-1/(4+u))#(1/N)^(1/(3+2*j))
}
for(i in 1:N){
x<-dat[i,-1]
if(method_mars==F){MSE[i,u]<-(dat[i,1]-local_linear(x,h_loclin,as.matrix(dat[-i,]),beta))^2} #predict with local linear
if(method_mars==T){
dat_fit<-dat[-i,-1]%*%beta
X_new<-dat[i,-1]%*%beta
mars_mod<-mars(dat_fit,dat[-i,1]) #fit mars model
MSE[i,u]<-(dat[i,1]-predict(mars_mod,X_new))^2 #predict with mars model
}#predict with mars
}
}
if(median_use){MSE_ave<-apply(MSE,2,median)}
else{MSE_ave<-colMeans(MSE)}
#apply(MSE,2,median)
est_dim<-which.min(MSE_ave)
ret<-list(est_dim = est_dim, MSE_ave = MSE_ave, MSE = MSE)
return(ret)
}
###########
test_for_dim<-function(Lmat,dim.max=NULL,alpha=0.1,method='greater'){
#Lmat... matrix with dimension N times dim.max with columns corresponding to aov_dat for dim 1,2,3,....,max.dim
if(is.null(dim.max)){dim.max<-length(Lmat[1,])}
pval<-mat.or.vec(dim.max-1,1)
est_dim<-dim.max
# for (j in 1:(dim.max-1)){
j<-1
while(est_dim==dim.max&j<dim.max){
if (method=='greater'){
pval[(j)]<-t.test(Lmat[,(j)],Lmat[,(j+1)],alternative = 'greater',paired=T)$p.value#mod[[1]][,5][1]
if(pval[j]<alpha){est_dim<-j}
}
if (method=='lower'){
pval[(j)]<-t.test(Lmat[,(j)],Lmat[,(j+1)],alternative = 'less',paired=F)$p.value#mod[[1]][,5][1]
if(pval[j]>alpha){est_dim<-j}
}
j<-j+1
}
ret<-list(est_dim,pval)
names(ret)<-c('estdim','pval')
return(ret)
}
####
##
test_for_dim_elbow<-function(Lmat){
dim.max<-length(Lmat[1,])
ave<-colMeans(Lmat)
boxplot(Lmat,xlab='k')
lines(seq(1,dim.max),ave,col='red')
# tmp<-cbind(ave,seq(1,dim.max))
# colnames(tmp)<-c('response','k')
# diff<-lm(response~k,data=as.data.frame(tmp))$coefficients[2]
#
# est_dim<-dim.max
# i<-1
# while(i<dim.max&est_dim==dim.max){
# if(ave[i+1]-ave[i]>diff){est_dim<-i}
# i<-i+1
# }
return(which.min(ave))
}
######
#Small simulation example for truedim =1
set.seed(21)
dim<-6
truedim<-1
N<-50
b<-c(1,0,0,0,0,0)
m<-20
est_dim<-mat.or.vec(m,7)
dim.max<-4
for(i in 1:m){
dat<-creat_sample(b,N,fsquare,0.5)
Blist<-list()
Lmat<-mat.or.vec(N,dim.max)
for(u in 1:dim.max){ #calculate B for different possible truedim's
m1<-stiefl_opt(dat,k=(dim-u)) #original bandwidth selection rule used that controlls number of points in a slice!!!!!!, also choose_h_2
Blist[[u]]<-fill_base(m1$est_base)[,1:u]
Lmat[,u]<-m1$aov_dat
}
#estimate truedim with different methods
est_dim[i,1]<-est_dim_CV(Blist,dat,dim.max = dim.max,median_use = T)$est_dim
est_dim[i,2]<-est_dim_CV(Blist,dat,dim.max = dim.max,median_use = F)$est_dim
est_dim[i,3]<-est_dim_CV(Blist,dat,dim.max = dim.max,median_use = F,method_mars = T)$est_dim
est_dim[i,4]<-test_for_dim(Lmat)$estdim
est_dim[i,5]<-test_for_dim(Lmat,method = 'lower')$estdim
est_dim[i,6]<-test_for_dim_elbow(cbind((dat[,1]-mean(dat[,1]))^2,Lmat))-1
mod_t<-mave(Y~.,data=as.data.frame(dat),method = 'meanMAVE')
est_dim[i,7]<-which.min(mave.dim(mod_t)$cv)
print(i)
}
length(which(est_dim[,1]==truedim))/m #fraction of where dimension is estimated correctly with mwthod 1 (CV with median)
#0.5
length(which(est_dim[,2]==truedim))/m #fraction of where dimension is estimated correctly with mwthod 1 (CV with mean)
#0.9
length(which(est_dim[,3]==truedim))/m #fraction of where dimension is estimated correctly with mwthod 1 (CV with mars and mean)
#0.8
length(which(est_dim[,4]==truedim))/m #fraction of where dimension is estimated correctly with mwthod 1 (t.test method='greater')
#0.0
length(which(est_dim[,5]==truedim))/m #fraction of where dimension is estimated correctly with mwthod 1 (t.test method='lower')
#0.05
length(which(est_dim[,6]==truedim))/m #fraction of where dimension is estimated correctly with mwthod (elbow)
#1
length(which(est_dim[,7]==truedim))/m #fraction of where dimension is estimated correctly with mave
#0.95
##########
#Small simulation example for truedim =2
set.seed(21)
dim<-6
truedim<-2
N<-100
b<-cbind(c(1,rep(0,dim-1)),c(0,1,rep(0,dim-2)))
m<-20
est_dim<-mat.or.vec(m,7)
dim.max<-4
for(i in 1:m){
dat<-creat_sample(b,N,function(x){return(x[1]*x[2])},0.5)
Blist<-list()
Lmat<-mat.or.vec(N,dim.max)
for(u in 1:dim.max){
m1<-stiefl_opt(dat,k=(dim-u)) #original bandwidth selection used !!!!!!!!!!!!!!!!!!!!!, also choose_h_2
Blist[[u]]<-fill_base(m1$est_base)[,1:u]
Lmat[,u]<-m1$aov_dat
}
est_dim[i,1]<-est_dim_CV(Blist,dat,dim.max = dim.max,median_use = T)$est_dim
est_dim[i,2]<-est_dim_CV(Blist,dat,dim.max = dim.max,median_use = F)$est_dim
est_dim[i,3]<-est_dim_CV(Blist,dat,dim.max = dim.max,median_use = F,method_mars = T)$est_dim
est_dim[i,4]<-test_for_dim(Lmat)$estdim
est_dim[i,5]<-test_for_dim(Lmat,method = 'lower')$estdim
est_dim[i,6]<-test_for_dim_elbow(cbind((dat[,1]-mean(dat[,1]))^2,Lmat))-1
mod_t<-mave(Y~.,data=as.data.frame(dat),method = 'meanMAVE')
est_dim[i,7]<-which.min(mave.dim(mod_t)$cv)
print(i)
}
length(which(est_dim[,1]==truedim))/m #fraction of where dimension is estimated correctly with mwthod (CV with median)
length(which(est_dim[,2]==truedim))/m #fraction of where dimension is estimated correctly with mwthod (CV with mean)
length(which(est_dim[,3]==truedim))/m #fraction of where dimension is estimated correctly with mwthod (CV with mars and mean)
length(which(est_dim[,4]==truedim))/m #fraction of where dimension is estimated correctly with mwthod (t.test method='greater')
length(which(est_dim[,5]==truedim))/m #fraction of where dimension is estimated correctly with mwthod (t.test method='lower')
length(which(est_dim[,6]==truedim))/m #fraction of where dimension is estimated correctly with mwthod (elbow)

View File

@ -0,0 +1,236 @@
LV_weight_partial<-function(V,Xl,dtemp,h,q,Y,grad=T){
N<-length(Y)
if(is.vector(V)){k<-1}
else{k<-length(V[1,])}
Xlv<-Xl%*%V
d<-dtemp-((Xlv^2)%*%rep(1,k))
w<-exp(-0.5*(d/h)^2)
w<-matrix(w,N,q)
wn<-apply(w,2,sum)-rep(1,q)#new
w<-apply(w,2,column_normalize)
mY<-t(w)%*%Y
sig<-t(w)%*%(Y^2)-(mY)^2
W<-(kronecker(t(wn),rep(1,N)))##new
if(grad==T){
grad<-mat.or.vec(dim,k)
tmp1<-(kronecker(sig,rep(1,N))-(as.vector(kronecker(rep(1,q),Y))-kronecker(mY,rep(1,N)))^2)
if(k==1){
grad_d<- -2*Xl*as.vector(Xlv)
grad<-(1/h^2)*(1/sum(wn))*t(grad_d*as.vector(d)*as.vector(w)*as.vector(W))%*%tmp1 #new
# wn_grad<-(-1/h^2)*t(grad_d*as.vector(d)*as.vector(w))%*%kronecker(diag(rep(1,q)),rep(1,N))
# grad<- wn_grad%*%(sig-rep(var1[2],q))/(sum(wn))+grad
}
else{
for (j in 1:(k)){
grad_d<- -2*Xl*as.vector(Xlv[,j])
grad[,j]<- (1/h^2)*(1/sum(wn))*t(grad_d*as.vector(d)*as.vector(w)*as.vector(W))%*%tmp1#new
# wn_grad<-(-1/h^2)*t(grad_d*as.vector(d)*as.vector(w))%*%kronecker(diag(rep(1,q)),rep(1,N))
# grad[,j]<- wn_grad%*%(sig-rep(var1[2],q))/(sum(wn))+grad
}
}
ret<-list(t(wn)%*%sig/sum(wn),sig,grad)#new
names(ret)<-c('var','sig','grad')
}
else{
ret<-list(t(wn)%*%sig/sum(wn),sig)#new
names(ret)<-c('var','sig')
}
return(ret)
}
################
stiefl_weight_partial_opt<-function(dat,h=NULL,k,k0=30,p=1,maxit=50,nObs=sqrt(length(dat[,1])),lambda_0=1,tol=10^(-3),sclack_para=0){
Y<-dat[,1]
X<-dat[,-1]
N<-length(Y)
dim<-length(X[1,])
if(p<1){
S<-est_varmat(X)
tmp1<-q_ind(X,S,p)
q<-tmp1$q
ind<-tmp1$ind
}
else{
q<-N
ind<-1:N
}
Xl<-(kronecker(rep(1,q),X)-kronecker(X[ind,],rep(1,N)))
dtemp<-apply(Xl,1,norm2)
if(is.null(h)){
S<-est_varmat(X)
tr<-var_tr(S)
h<-choose_h_2(dim,k,N,nObs,tr)
}
best<-exp(10000)
Vend<-mat.or.vec(dim,k)
sig<-mat.or.vec(q,1)
for(u in 1:k0){
Vnew<-Vold<-stiefl_startval(dim,k)
#print(Vold)
#print(LV(Vold,Xl,dtemp,h,q,Y)$var)
Lnew<-Lold<-exp(10000)
lambda<-lambda_0
err<-10
count<-0
count2<-0
while(err>tol&count<maxit){
#print(Vold)
tmp2<-LV_weight_partial(Vold,Xl,dtemp,h,q,Y)
G<-tmp2$grad
Lold<-tmp2$var
W<-G%*%t(Vold)-Vold%*%t(G)
stepsize<-lambda#/(2*sqrt(count+1))
Vnew<-solve(diag(1,dim)+stepsize*W)%*%(diag(1,dim)-stepsize*W)%*%Vold
# print(Vnew)
tmp3<-LV_weight_partial(Vnew,Xl,dtemp,h,q,Y,grad=F)
Lnew<-tmp3$var
err<-sqrt(sum((Vold%*%t(Vold)-Vnew%*%t(Vnew))^2))/sqrt(2*k)#sqrt(sum(tmp3$grad^2))/(dim*k)#
#print(err)
if(((Lnew-Lold)/Lold) > sclack_para){#/(count+1)^(0.5)
lambda=lambda/2
err<-10
count2<-count2+1
count<-count-1
Vnew<-Vold #!!!!!
}
Vold<-Vnew
count<-count+1
#print(count)
}
if(best>Lnew){
best<-Lnew
Vend<-Vnew
sig<-tmp3$sig
}
}
ret<-list(Vend,best,sig,count,h,count2)
names(ret)<-c('est_base','var','aov_dat','count','h','count2')
return(ret)
}
#################MAVE, OPG, rMAVE, rOPG from Bing Li book
opg=function(x,y,d){
p=dim(x)[2];
n=dim(x)[1]
c0=2.34;
p0=max(p,3);
rn=n^(-1/(2*(p0+6)));
h=c0*n^(-(1/(p0+6)))
sig=diag(var(x));
x=apply(x,2,standvec)
kmat=kern(x,h);
bmat=numeric()
for(i in 1:dim(x)[1]){
wi=kmat[,i];
xi=cbind(1,t(t(x)-x[i,]))
bmat=cbind(bmat,wls(xi,y,wi)$b)}
beta=eigen(bmat%*%t(bmat))$vectors[,1:d]
return(diag(sig^(-1/2))%*%beta)
}
######################
wls=function(x,y,w){
n=dim(x)[1];
p=dim(x)[2]-1
out=c(solve(t(x*w)%*%x/n)%*%apply(x*y*w,2,mean))
return(list(a=out[1],b=out[2:(p+1)]))
}
#################
kern=function(x,h){
x=as.matrix(x);
n=dim(x)[1]
k2=x%*%t(x);
k1=t(matrix(diag(k2),n,n));
k3=t(k1);
k=k1-2*k2+k3
return(exp(-(1/(2*h^2))*(k1-2*k2+k3)))
}
###############
standvec=function(x) return((x-mean(x))/sd(x))
##############
mave2=function(x,y,h,d,nit){
sig=diag(var(x));
n=dim(x)[1];
p=dim(x)[2]
x=apply(x,2,standvec);
beta=opg(x,y,d);#beta=opg(x,y,h,d);
kermat=kern(x,h)
for(iit in 1:nit){
b=numeric();
a=numeric();
for(i in 1:n){
wi=kermat[,i]/(apply(kermat,2,mean)[i])
ui=cbind(1,t(t(x)-x[i,])%*%beta)
out=wls(ui,y,wi);
a=c(a,out$a);b=cbind(b,out$b)}
out=0;out1=0;
for(i in 1:n){
xi=kronecker(t(t(x)-x[i,]),t(b[,i]))
yi=y-a[i];
wi=kermat[,i]/apply(kermat,2,mean)[i]
out=out+apply(xi*yi*wi,2,mean)
out1=out1+t(xi*wi)%*%xi/n}
beta=t(matrix(solve(out1)%*%out,d,p))
}
return(diag(sig^(-1/2))%*%beta)
}
######################
rmave=function(x,y,d,nit){
sig=diag(var(x));
n=dim(x)[1];
p=dim(x)[2]
x=apply(x,2,standvec)
c0=2.34;
p0=max(p,3);
h=c0*n^(-(1/(p0+6)));
rn=n^(-1/(2*(p0+6)))
beta=opg(x,y,d)
for(iit in 1:nit){
kermat=kern(x%*%beta,h);
mkermat=apply(kermat,2,mean)
b=numeric();a=numeric()
for(i in 1:n){
wi=kermat[,i]/mkermat[i];
ui=cbind(1,t(t(x)-x[i,])%*%beta)
out=wls(ui,y,wi);
a=c(a,out$a);b=cbind(b,out$b)
}
out=0;
out1=0
for(i in 1:n) {
xi=kronecker(t(t(x)-x[i,]),t(b[,i]));
yi=y-a[i]
wi=kermat[,i]/mkermat[i]
out=out+apply(xi*yi*wi,2,mean)
out1=out1+t(xi*wi)%*%xi/n}
beta=t(matrix(solve(out1)%*%out,d,p))
h=max(rn*h,c0*n^((-1/(d+4))))
}
return(diag(sig^(-1/2))%*%beta)
}
#########################
ropg=function(x,y,d,nit){
sig=diag(var(x));
x=apply(x,2,standvec);
p=dim(x)[2];
n=dim(x)[1]
c0=2.34;
p0=max(p,3);
rn=n^(-1/(2*(p0+6)));
h=c0*n^(-(1/(p0+6)))
beta=diag(p)
for(iit in 1:nit){
kmat=kern(x%*%beta,h);
bmat=numeric()
for(i in 1:dim(x)[1]){
wi=kmat[,i];
xi=cbind(1,t(t(x)-x[i,]))
bmat=cbind(bmat,wls(xi,y,wi)$b)
}
beta=eigen(bmat%*%t(bmat))$vectors[,1:d]
h=max(rn*h,c0*n^((-1/(d+4))))
}
beta.final=diag(sig^(-1/2))%*%beta
return(beta.final)
}

View File

@ -0,0 +1,315 @@
LV_weight_partial<-function(V,Xl,dtemp,h,q,Y,grad=T){
N<-length(Y)
if(is.vector(V)){k<-1}
else{k<-length(V[1,])}
Xlv<-Xl%*%V
d<-dtemp-((Xlv^2)%*%rep(1,k))
w<-exp(-0.5*(d/h)^2)
w<-matrix(w,N,q)
wn<-apply(w,2,sum)-rep(1,q)#new
w<-apply(w,2,column_normalize)
mY<-t(w)%*%Y
sig<-t(w)%*%(Y^2)-(mY)^2
W<-(kronecker(t(wn),rep(1,N)))##new
if(grad==T){
grad<-mat.or.vec(dim,k)
tmp1<-(kronecker(sig,rep(1,N))-(as.vector(kronecker(rep(1,q),Y))-kronecker(mY,rep(1,N)))^2)
if(k==1){
grad_d<- -2*Xl*as.vector(Xlv)
grad<-(1/h^2)*(1/sum(wn))*t(grad_d*as.vector(d)*as.vector(w)*as.vector(W))%*%tmp1 #new
# wn_grad<-(-1/h^2)*t(grad_d*as.vector(d)*as.vector(w))%*%kronecker(diag(rep(1,q)),rep(1,N))
# grad<- wn_grad%*%(sig-rep(var1[2],q))/(sum(wn))+grad
}
else{
for (j in 1:(k)){
grad_d<- -2*Xl*as.vector(Xlv[,j])
grad[,j]<- (1/h^2)*(1/sum(wn))*t(grad_d*as.vector(d)*as.vector(w)*as.vector(W))%*%tmp1#new
# wn_grad<-(-1/h^2)*t(grad_d*as.vector(d)*as.vector(w))%*%kronecker(diag(rep(1,q)),rep(1,N))
# grad[,j]<- wn_grad%*%(sig-rep(var1[2],q))/(sum(wn))+grad
}
}
ret<-list(t(wn)%*%sig/sum(wn),sig,grad)#new
names(ret)<-c('var','sig','grad')
}
else{
ret<-list(t(wn)%*%sig/sum(wn),sig)#new
names(ret)<-c('var','sig')
}
return(ret)
}
################
stiefl_weight_partial_opt<-function(dat,h=NULL,k,k0=30,p=1,maxit=50,nObs=sqrt(length(dat[,1])),lambda_0=1,tol=10^(-3),sclack_para=0){
Y<-dat[,1]
X<-dat[,-1]
N<-length(Y)
dim<-length(X[1,])
if(p<1){
S<-est_varmat(X)
tmp1<-q_ind(X,S,p)
q<-tmp1$q
ind<-tmp1$ind
}
else{
q<-N
ind<-1:N
}
Xl<-(kronecker(rep(1,q),X)-kronecker(X[ind,],rep(1,N)))
dtemp<-apply(Xl,1,norm2)
if(is.null(h)){
S<-est_varmat(X)
tr<-var_tr(S)
h<-choose_h_2(dim,k,N,nObs,tr)
}
best<-exp(10000)
Vend<-mat.or.vec(dim,k)
sig<-mat.or.vec(q,1)
for(u in 1:k0){
Vnew<-Vold<-stiefl_startval(dim,k)
#print(Vold)
#print(LV(Vold,Xl,dtemp,h,q,Y)$var)
Lnew<-Lold<-exp(10000)
lambda<-lambda_0
err<-10
count<-0
count2<-0
while(err>tol&count<maxit){
#print(Vold)
tmp2<-LV_weight_partial(Vold,Xl,dtemp,h,q,Y)
G<-tmp2$grad
Lold<-tmp2$var
W<-G%*%t(Vold)-Vold%*%t(G)
stepsize<-lambda#/(2*sqrt(count+1))
Vnew<-solve(diag(1,dim)+stepsize*W)%*%(diag(1,dim)-stepsize*W)%*%Vold
# print(Vnew)
tmp3<-LV_weight_partial(Vnew,Xl,dtemp,h,q,Y,grad=F)
Lnew<-tmp3$var
err<-sqrt(sum((Vold%*%t(Vold)-Vnew%*%t(Vnew))^2))/sqrt(2*k)#sqrt(sum(tmp3$grad^2))/(dim*k)#
#print(err)
if(((Lnew-Lold)/Lold) > sclack_para){#/(count+1)^(0.5)
lambda=lambda/2
err<-10
count2<-count2+1
count<-count-1
Vnew<-Vold #!!!!!
}
Vold<-Vnew
count<-count+1
#print(count)
}
if(best>Lnew){
best<-Lnew
Vend<-Vnew
sig<-tmp3$sig
}
}
ret<-list(Vend,best,sig,count,h,count2)
names(ret)<-c('est_base','var','aov_dat','count','h','count2')
return(ret)
}
#################MAVE, OPG, rMAVE, rOPG from Bing Li book
opg=function(x,y,d){
p=dim(x)[2];
n=dim(x)[1]
c0=2.34;
p0=max(p,3);
rn=n^(-1/(2*(p0+6)));
h=c0*n^(-(1/(p0+6)))
sig=diag(var(x));
x=apply(x,2,standvec)
kmat=kern(x,h);
bmat=numeric()
for(i in 1:dim(x)[1]){
wi=kmat[,i];
xi=cbind(1,t(t(x)-x[i,]))
bmat=cbind(bmat,wls(xi,y,wi)$b)}
beta=eigen(bmat%*%t(bmat))$vectors[,1:d]
return(diag(sig^(-1/2))%*%beta)
}
#######################
stiefl_opt_momentum<-function(dat,h=NULL,k,k0=30,p=1,maxit=50,nObs=sqrt(length(dat[,1])),lambda_0=1,tol=10^(-3),sclack_para=0,momentum_para=0.8){
Y<-dat[,1]
X<-dat[,-1]
N<-length(Y)
dim<-length(X[1,])
if(p<1){
S<-est_varmat(X)
tmp1<-q_ind(X,S,p)
q<-tmp1$q
ind<-tmp1$ind
}
else{
q<-N
ind<-1:N
}
Xl<-(kronecker(rep(1,q),X)-kronecker(X[ind,],rep(1,N)))
dtemp<-apply(Xl,1,norm2)
if(is.null(h)){
S<-est_varmat(X)
tr<-var_tr(S)
h<-choose_h_2(dim,k,N,nObs,tr)
}
best<-exp(10000)
Vend<-mat.or.vec(dim,k)
sig<-mat.or.vec(q,1)
for(u in 1:k0){
Vold<-stiefl_startval(dim,k)
#print(Vold)
#print(LV(Vold,Xl,dtemp,h,q,Y)$var)
Lnew<-Lold<-exp(10000)
lambda<-lambda_0
err<-10
count<-0
count2<-0
Lnew<-LV(Vold,Xl,dtemp,h,q,Y)$var
#print(Lnew)
if(best>Lnew){
best<-Lnew
Vend<-Vold
#sig<-tmp3$sig
}
}
Vnew<-Vold<-Vend
G<-matrix(rep(0,dim*k),dim,k)
while(err>tol&count<maxit){
#print(Vold)
tmp2<-LV(Vold,Xl,dtemp,h,q,Y)
#G<-tmp2$grad
G<-(1-momentum_para)*G + momentum_para*tmp2$grad
Lold<-tmp2$var
W<-G%*%t(Vold)-Vold%*%t(G)
stepsize<-lambda#/(2*sqrt(count+1))
Vnew<-solve(diag(1,dim)+stepsize*W)%*%(diag(1,dim)-stepsize*W)%*%Vold
# print(Vnew)
tmp3<-LV(Vnew,Xl,dtemp,h,q,Y,grad=F)
Lnew<-tmp3$var
err<-sqrt(sum((Vold%*%t(Vold)-Vnew%*%t(Vnew))^2))/sqrt(2*k)#sqrt(sum(tmp3$grad^2))/(dim*k)#
#print(err)
if(((Lnew-Lold)/Lold) > sclack_para){#/(count+1)^(0.5)
lambda=lambda/2
err<-10
count2<-count2+1
count<-count-1
Vnew<-Vold #!!!!!
Lnew<-Lold
}
Vold<-Vnew
count<-count+1
#print(count)
}
ret<-list(Vnew,Lnew,count,h,count2)
names(ret)<-c('est_base','var','count','h','count2')
return(ret)
}
######################
wls=function(x,y,w){
n=dim(x)[1];
p=dim(x)[2]-1
out=c(solve(t(x*w)%*%x/n)%*%apply(x*y*w,2,mean))
return(list(a=out[1],b=out[2:(p+1)]))
}
#################
kern=function(x,h){
x=as.matrix(x);
n=dim(x)[1]
k2=x%*%t(x);
k1=t(matrix(diag(k2),n,n));
k3=t(k1);
k=k1-2*k2+k3
return(exp(-(1/(2*h^2))*(k1-2*k2+k3)))
}
###############
standvec=function(x) return((x-mean(x))/sd(x))
##############
mave2=function(x,y,h,d,nit){
sig=diag(var(x));
n=dim(x)[1];
p=dim(x)[2]
x=apply(x,2,standvec);
beta=opg(x,y,d);#beta=opg(x,y,h,d);
kermat=kern(x,h)
for(iit in 1:nit){
b=numeric();
a=numeric();
for(i in 1:n){
wi=kermat[,i]/(apply(kermat,2,mean)[i])
ui=cbind(1,t(t(x)-x[i,])%*%beta)
out=wls(ui,y,wi);
a=c(a,out$a);b=cbind(b,out$b)}
out=0;out1=0;
for(i in 1:n){
xi=kronecker(t(t(x)-x[i,]),t(b[,i]))
yi=y-a[i];
wi=kermat[,i]/apply(kermat,2,mean)[i]
out=out+apply(xi*yi*wi,2,mean)
out1=out1+t(xi*wi)%*%xi/n}
beta=t(matrix(solve(out1)%*%out,d,p))
}
return(diag(sig^(-1/2))%*%beta)
}
######################
rmave=function(x,y,d,nit){
sig=diag(var(x));
n=dim(x)[1];
p=dim(x)[2]
x=apply(x,2,standvec)
c0=2.34;
p0=max(p,3);
h=c0*n^(-(1/(p0+6)));
rn=n^(-1/(2*(p0+6)))
beta=opg(x,y,d)
for(iit in 1:nit){
kermat=kern(x%*%beta,h);
mkermat=apply(kermat,2,mean)
b=numeric();a=numeric()
for(i in 1:n){
wi=kermat[,i]/mkermat[i];
ui=cbind(1,t(t(x)-x[i,])%*%beta)
out=wls(ui,y,wi);
a=c(a,out$a);b=cbind(b,out$b)
}
out=0;
out1=0
for(i in 1:n) {
xi=kronecker(t(t(x)-x[i,]),t(b[,i]));
yi=y-a[i]
wi=kermat[,i]/mkermat[i]
out=out+apply(xi*yi*wi,2,mean)
out1=out1+t(xi*wi)%*%xi/n}
beta=t(matrix(solve(out1)%*%out,d,p))
h=max(rn*h,c0*n^((-1/(d+4))))
}
return(diag(sig^(-1/2))%*%beta)
}
#########################
ropg=function(x,y,d,nit){
sig=diag(var(x));
x=apply(x,2,standvec);
p=dim(x)[2];
n=dim(x)[1]
c0=2.34;
p0=max(p,3);
rn=n^(-1/(2*(p0+6)));
h=c0*n^(-(1/(p0+6)))
beta=diag(p)
for(iit in 1:nit){
kmat=kern(x%*%beta,h);
bmat=numeric()
for(i in 1:dim(x)[1]){
wi=kmat[,i];
xi=cbind(1,t(t(x)-x[i,]))
bmat=cbind(bmat,wls(xi,y,wi)$b)
}
beta=eigen(bmat%*%t(bmat))$vectors[,1:d]
h=max(rn*h,c0*n^((-1/(d+4))))
}
beta.final=diag(sig^(-1/2))%*%beta
return(beta.final)
}

412
README.md
View File

@ -1,10 +1,42 @@
# TODOs
Doc:
- [x] Stiefel (instead of Stiefl)
- [x] Return value description (`@returs`)
- [x] DESCRIPTION
- [x] Maintainer
- [x] Author
- [x] Volume
- [x] Description (from Paper) and Ref.
- [x] Ref paper in doc
- [ ] Data set descriptions and augmentations.
- [x] Demonstration of the `Logger` function usage (Demo file or so, ...)
# Overview Methods to be implemented:
- **CVE/**: Contains actual `R` package. - [x] simple
- **CVE_legacy/**: Contains original (first) `R` implementatin of the CVE method. - [x] weighted
The `*.R` and `*.cpp` files in the root directory are _development_ and _test_ files. - [x] momentum
- [x] weighted with momentum
## TODO: README.md Performance:
- [x] Pure C implementation.
- [NOT Feasible] Stochastic Version
- [NOT Feasible] Gradient Approximations (using Algebraic Software for alternative Loss function formulations and gradient optimizations)
- [NOT Sufficient] Alternative Kernels for reducing samples
- [ ] (To Be further investigated) "Kronecker" optimization
Features (functions):
- [x] Initial `V.init` parameter (only ONE try, ignore number of `attempts` parameter)
- [x] `basis.cve` list of estimated `B`s (with `k` supplied, only `B`)
- [x] `directions.cve` Projected `X` given `k`
- [ ] `predict.cve` using `mars` for predicting responses given new data.
- [ ] `predict.dim.cve` Cross-validation or `aov` (in stats package) or "elbow" estimation
- [x] `plot.elbow`
- [x] `summary`
Changes:
- [-] New `estimate.bandwidth` implementation.
(h = 2 * (tr(\Sigma) / p) * (6/5 * n^(-1 / (4 + k)))^2,
\Sigma = 1/n * (X-mean)'(X-mean))
# Package Structure # Package Structure
@ -31,3 +63,373 @@ the demo file. You can add pauses by adding:
**Note**: Demos are not automatically tested by `R CMD check`. This means that they **Note**: Demos are not automatically tested by `R CMD check`. This means that they
can easily break without your knowledge. can easily break without your knowledge.
# General Notes for Source Code analysis
## Search in multiple files.
Using the Linux `grep` program with the parameters `-rnw` and specifying a include files filter like the following example.
```bash
grep --include=*\.{c,h,R} -rnw '.' -e "sweep"
```
searches in all `C` source and header fils as well as `R` source files for the term _sweep_.
## Recursive dir. compair with colored sructure (more or less).
```bash
diff -r CVE_R/ CVE_C/ | grep -E "^([<>]|[^<>].*)"
```
## Parsing `bash` script parameters.
```bash
usage="$0 [-v|--verbose] [-n|--dry-run] [(-s|--stack-size) <size>] [-h|--help] [-- [p1, [p2, ...]]]"
verbose=false
help=false
dry_run=false
stack_size=0
while [ $# -gt 0 ]; do
case "$1" in
-v | --verbose ) verbose=true; shift ;;
-n | --dry-run ) dry_run=true; shift ;;
-s | --stack-size ) stack_size="$2"; shift; shift ;;
-h | --help ) echo $usage; exit ;; # On help print usage and exit.
-- ) shift; break ;; # Break param "parsing".
* ) echo $usage >&2; exit 1 ;; # Print usage and exit with failure.
esac
done
echo verbose=$verbose
echo dry_run=$dry_run
echo stack_size=$stack_size
```
# Development
## Build and install.
To build the package the `devtools` package is used. This also provides `roxygen2` which is used for documentation and authomatic creaton of the `NAMESPACE` file.
```R
setwd("./CVE_R") # Set path to the package root.
library(devtools) # Load required `devtools` package.
document() # Create `.Rd` files and write `NAMESPACE`.
```
Next the package needs to be build, therefore (if pure `R` package, aka. `C/C++`, `Fortran`, ... code) just do the following.
```bash
R CMD build CVE_R
R CMD INSTALL CVE_0.1.tar.gz
```
Then we are ready for using the package.
```R
library(CVE)
help(package = "CVE")
```
## Build and install from within `R`.
An alternative approach is the following.
```R
setwd('./CVE_R')
getwd()
library(devtools)
document()
# No vignettes to build but "inst/doc/" is required!
(path <- build(vignettes = FALSE))
install.packages(path, repos = NULL, type = "source")
```
**Note: I only recommend this approach during development.**
# Analysing
## Logging (a `cve` run).
To log `loss`, `error` (estimated) the true error (error of current estimated `B` against the true `B`) or even the stepsize one can use the `logger` parameter. A `logger` is a function that gets the current `environment` of the CVE optimization methods (__do not alter this environment, only read from it__). This can be used to create logs like in the following example.
```R
library(CVE)
# Setup histories.
(epochs <- 50)
(attempts <- 10)
loss.history <- matrix(NA, epochs + 1, attempts)
error.history <- matrix(NA, epochs + 1, attempts)
tau.history <- matrix(NA, epochs + 1, attempts)
true.error.history <- matrix(NA, epochs + 1, attempts)
# Create a dataset
ds <- dataset("M1")
X <- ds$X
Y <- ds$Y
B <- ds$B # the true `B`
(k <- ncol(ds$B))
# True projection matrix.
P <- B %*% solve(t(B) %*% B) %*% t(B)
# Define the logger for the `cve()` method.
logger <- function(env) {
# Note the `<<-` assignement!
loss.history[env$epoch + 1, env$attempt] <<- env$loss
error.history[env$epoch + 1, env$attempt] <<- env$error
tau.history[env$epoch + 1, env$attempt] <<- env$tau
# Compute true error by comparing to the true `B`
B.est <- null(env$V) # Function provided by CVE
P.est <- B.est %*% solve(t(B.est) %*% B.est) %*% t(B.est)
true.error <- norm(P - P.est, 'F') / sqrt(2 * k)
true.error.history[env$epoch + 1, env$attempt] <<- true.error
}
# Performa SDR
dr <- cve(Y ~ X, k = k, logger = logger, epochs = epochs, attempts = attempts)
# Plot history's
par(mfrow = c(2, 2))
matplot(loss.history, type = 'l', log = 'y', xlab = 'iter',
main = 'loss', ylab = expression(L(V[iter])))
matplot(error.history, type = 'l', log = 'y', xlab = 'iter',
main = 'error', ylab = 'error')
matplot(tau.history, type = 'l', log = 'y', xlab = 'iter',
main = 'tau', ylab = 'tau')
matplot(true.error.history, type = 'l', log = 'y', xlab = 'iter',
main = 'true error', ylab = 'true error')
```
## Reading log files.
The runtime tests (upcomming further tests) are creating log files saved in `tmp/`. These log files are `CSV` files (actualy `TSV`) with a header storing the test results. Depending on the test the files may contain differnt data. As an example we use the runtime test logs which store in each line the `dataset`, the used `method` as well as the `error` (actual error of estimated `B` against real `B`) and the `time`. For reading and analysing the data see the following example.
```R
# Load log as `data.frame`
log <- read.csv('tmp/test0.log', sep = '\t')
# Create a error boxplot grouped by dataset.
boxplot(error ~ dataset, log)
# Overview
for (ds.name in paste0('M', seq(5))) {
ds <- subset(log, dataset == ds.name, select = c('method', 'dataset', 'time', 'error'))
print(summary(ds))
}
```
## Environments and variable lookup.
In the following a view simple examples of how `R` searches for variables.
In addition we manipulate funciton closures to alter the search path in variable lookup and outer scope variable manipulation.
```R
droids <- "These aren't the droids you're looking for."
search <- function() {
print(droids)
}
trooper.seeks <- function() {
droids <- c("R2-D2", "C-3PO")
search()
}
jedi.seeks <- function() {
droids <- c("R2-D2", "C-3PO")
environment(search) <- environment()
search()
}
trooper.seeks()
# [1] "These aren't the droids you're looking for."
jedi.seeks()
# [1] "R2-D2", "C-3PO"
```
The next example ilustrates how to write (without local copies) to variables outside the functions local environment.
```R
counting <- function() {
count <<- count + 1 # Note the `<<-` assignment.
}
(function() {
environment(counting) <- environment()
count <- 0
for (i in 1:10) {
counting()
}
return(count)
})()
(function () {
closure <- new.env()
environment(counting) <- closure
assign("count", 0, envir = closure)
for (i in 1:10) {
counting()
}
return(closure$count)
})()
```
Another example for the usage of `do.call` where the evaluation of parameters is illustated (example taken (and altered) from `?do.call`).
```R
## examples of where objects will be found.
A <- "A.Global"
f <- function(x) print(paste("f.new", x))
env <- new.env()
assign("A", "A.new", envir = env)
assign("f", f, envir = env)
f <- function(x) print(paste("f.Global", x))
f(A) # f.Global A.Global
do.call("f", list(A)) # f.Global A.Global
do.call("f", list(A), envir = env) # f.new A.Global
do.call(f, list(A), envir = env) # f.Global A.Global
do.call("f", list(quote(A)), envir = env) # f.new A.new
do.call(f, list(quote(A)), envir = env) # f.Global A.new
do.call("f", list(as.name("A")), envir = env) # f.new A.new
do.call("f", list(as.name("A")), envir = env) # f.new A.new
```
# Performance benchmarks
In this section alternative implementations of simple algorithms are compared for there performance.
### Computing the trace of a matrix multiplication.
```R
library(microbenchmark)
A <- matrix(runif(120), 12, 10)
# Check correctnes and benckmark performance.
stopifnot(
all.equal(
sum(diag(t(A) %*% A)), sum(diag(crossprod(A, A)))
),
all.equal(
sum(diag(t(A) %*% A)), sum(A * A)
)
)
microbenchmark(
MM = sum(diag(t(A) %*% A)),
cross = sum(diag(crossprod(A, A))),
elem = sum(A * A)
)
# Unit: nanoseconds
# expr min lq mean median uq max neval
# MM 4232 4570.0 5138.81 4737 4956.0 40308 100
# cross 2523 2774.5 2974.93 2946 3114.5 5078 100
# elem 582 762.5 973.02 834 964.0 12945 100
```
```R
n <- 200
M <- matrix(runif(n^2), n, n)
dnorm2 <- function(x) exp(-0.5 * x^2) / sqrt(2 * pi)
stopifnot(
all.equal(dnorm(M), dnorm2(M))
)
microbenchmark(
dnorm = dnorm(M),
dnorm2 = dnorm2(M),
exp = exp(-0.5 * M^2) # without scaling -> irrelevant for usage
)
# Unit: microseconds
# expr min lq mean median uq max neval
# dnorm 841.503 843.811 920.7828 855.7505 912.4720 2405.587 100
# dnorm2 543.510 580.319 629.5321 597.8540 607.3795 2603.763 100
# exp 502.083 535.943 577.2884 548.3745 561.3280 2113.220 100
```
### Using `crosspord()`
```R
p <- 12
q <- 10
V <- matrix(runif(p * q), p, q)
stopifnot(
all.equal(V %*% t(V), tcrossprod(V)),
all.equal(V %*% t(V), tcrossprod(V, V))
)
microbenchmark(
V %*% t(V),
tcrossprod(V),
tcrossprod(V, V)
)
# Unit: microseconds
# expr min lq mean median uq max neval
# V %*% t(V) 2.293 2.6335 2.94673 2.7375 2.9060 19.592 100
# tcrossprod(V) 1.148 1.2475 1.86173 1.3440 1.4650 30.688 100
# tcrossprod(V, V) 1.003 1.1575 1.28451 1.2400 1.3685 2.742 100
```
### Recycling vs. Sweep
```R
(n <- 200)
(p <- 12)
(q <- 10)
X_diff <- matrix(runif(n * (n - 1) / 2 * p), n * (n - 1) / 2, p)
V <- matrix(rnorm(p * q), p, q)
vecS <- runif(n * (n - 1) / 2)
stopifnot(
all.equal((X_diff %*% V) * rep(vecS, q),
sweep(X_diff %*% V, 1, vecS, `*`)),
all.equal((X_diff %*% V) * rep(vecS, q),
(X_diff %*% V) * vecS)
)
microbenchmark(
rep = (X_diff %*% V) * rep(vecS, q),
sweep = sweep(X_diff %*% V, 1, vecS, `*`, check.margin = FALSE),
recycle = (X_diff %*% V) * vecS
)
# Unit: microseconds
# expr min lq mean median uq max neval
# rep 851.723 988.3655 1575.639 1203.6385 1440.578 18999.23 100
# sweep 1313.177 1522.4010 2355.269 1879.2605 2065.399 18783.24 100
# recycle 719.001 786.1265 1157.285 881.8825 1163.202 19091.79 100
```
### Scaled `crossprod` with matmul order.
```R
(n <- 200)
(p <- 12)
(q <- 10)
X_diff <- matrix(runif(n * (n - 1) / 2 * p), n * (n - 1) / 2, p)
V <- matrix(rnorm(p * q), p, q)
vecS <- runif(n * (n - 1) / 2)
ref <- crossprod(X_diff, X_diff * vecS) %*% V
stopifnot(
all.equal(ref, crossprod(X_diff, (X_diff %*% V) * vecS)),
all.equal(ref, crossprod(X_diff, (X_diff %*% V) * vecS))
)
microbenchmark(
inner = crossprod(X_diff, X_diff * vecS) %*% V,
outer = crossprod(X_diff, (X_diff %*% V) * vecS)
)
# Unit: microseconds
# expr min lq mean median uq max neval
# inner 789.065 867.939 1683.812 987.9375 1290.055 16800.265 100
# outer 1141.479 1216.929 1404.702 1317.7315 1582.800 2531.766 100
```
### Fast dist matrix computation (aka. row sum of squares).
```R
library(microbenchmark)
library(CVE)
(n <- 200)
(N <- n * (n - 1) / 2)
(p <- 12)
M <- matrix(runif(N * p), N, p)
stopifnot(
all.equal(rowSums(M^2), rowSums.c(M^2)),
all.equal(rowSums(M^2), rowSquareSums.c(M))
)
microbenchmark(
sums = rowSums(M^2),
sums.c = rowSums.c(M^2),
sqSums.c = rowSquareSums.c(M)
)
# Unit: microseconds
# expr min lq mean median uq max neval
# sums 666.311 1051.036 1612.3100 1139.0065 1547.657 13940.97 100
# sums.c 342.647 672.453 1009.9109 740.6255 1224.715 13765.90 100
# sqSums.c 115.325 142.128 175.6242 153.4645 169.678 759.87 100
```
## Using `Rprof()` for performance.
The standart method for profiling where an algorithm is spending its time is with `Rprof()`.
```R
path <- '../tmp/R.prof' # path to profiling file
Rprof(path)
cve.res <- cve.call(X, Y, k = k)
Rprof(NULL)
(prof <- summaryRprof(path)) # Summarise results
```
**Note: considure to run `gc()` before measuring**, aka cleaning up by explicitely calling the garbage collector.

View File

@ -205,6 +205,21 @@ crossprod.c <- function(A, B) {
.Call('R_crossprod', PACKAGE = 'benchmark', A, B) .Call('R_crossprod', PACKAGE = 'benchmark', A, B)
} }
kronecker.c <- function(A, B, op = '*') {
stopifnot(
is.matrix(A), is.numeric(A),
is.matrix(B), is.numeric(B),
is.character(op), op %in% c('*', '+', '/', '-')
)
if (!is.double(A)) {
A <- matrix(as.double(A), nrow = nrow(A))
}
if (!is.double(B)) {
B <- matrix(as.double(B), nrow = nrow(B))
}
.Call('R_kronecker', PACKAGE = 'benchmark', A, B, op)
}
skewSymRank2k.c <- function(A, B, alpha = 1, beta = 0) { skewSymRank2k.c <- function(A, B, alpha = 1, beta = 0) {
stopifnot( stopifnot(
is.matrix(A), is.numeric(A), is.matrix(A), is.numeric(A),
@ -269,6 +284,22 @@ microbenchmark(
crossprod.c = crossprod.c(A, B) crossprod.c = crossprod.c(A, B)
) )
n <- 100L
m <- 12L
p <- 11L
q <- 10L
A <- matrix(runif(n * m), n, m)
B <- matrix(runif(p * q), p, q)
stopifnot(all.equal(
kronecker(A, B),
kronecker.c(A, B)
))
microbenchmark(
kronecker = kronecker(A, B),
kronecker.c = kronecker.c(A, B)
)
n <- 12 n <- 12
k <- 11 k <- 11
A <- matrix(runif(n * k), n, k) A <- matrix(runif(n * k), n, k)
@ -307,6 +338,50 @@ microbenchmark(
nullProj.c = nullProj.c(V) nullProj.c = nullProj.c(V)
) )
# ## Kronecker optimizations ----------------------------------------------------
# library(microbenchmark)
# dist.1 <- function(X_diff, Q) {
# rowSums((X_diff %*% Q)^2)
# }
# dist.2 <- function(X, Q) {
# ones <- rep(1, nrow(X))
# proj <- X %*% Q
# rowSums((kronecker(proj, ones) - kronecker(ones, proj))^2)
# }
# n <- 400L
# p <- 12L
# k <- 2L
# q <- p - k
# X <- matrix(rnorm(n * p), n, p)
# Q <- diag(1, p) - tcrossprod(rnorm(p))
# ones <- rep(1, n)
# X_diff <- kronecker(X, ones) - kronecker(ones, X)
# stopifnot(all.equal(dist.1(X_diff, Q), dist.2(X, Q)))
# microbenchmark(
# dist.1(X_diff, Q),
# dist.2(X, Q),
# times = 10L
# )
# # if (!persistent) {
# # pair.index <- elem.pairs(seq(n))
# # i <- pair.index[, 1] # `i` indices of `(i, j)` pairs
# # j <- pair.index[, 2] # `j` indices of `(i, j)` pairs
# # lower <- ((i - 1) * n) + j
# # upper <- ((j - 1) * n) + i
# # X_diff <- X[i, , drop = F] - X[j, , drop = F]
# # }
# # # Projection matrix onto `span(V)`
# # Q <- diag(1, p) - tcrossprod(V, V)
# # # Vectorized distance matrix `D`.
# # vecD <- rowSums((X_diff %*% Q)^2)
# ## WIP for gradient. ---------------------------------------------------------- # ## WIP for gradient. ----------------------------------------------------------
@ -374,7 +449,7 @@ grad <- function(X, Y, V, h, persistent = TRUE) {
G <- (-2 / (n * h^2)) * G G <- (-2 / (n * h^2)) * G
return(G) return(G)
} }
rStiefl <- function(p, q) { rStiefel <- function(p, q) {
return(qr.Q(qr(matrix(rnorm(p * q, 0, 1), p, q)))) return(qr.Q(qr(matrix(rnorm(p * q, 0, 1), p, q))))
} }
@ -384,7 +459,7 @@ q <- 10
X <- matrix(runif(n * p), n, p) X <- matrix(runif(n * p), n, p)
Y <- runif(n) Y <- runif(n)
V <- rStiefl(p, q) V <- rStiefel(p, q)
h <- 0.1 h <- 0.1
pair.index <- elem.pairs(seq(n)) pair.index <- elem.pairs(seq(n))

View File

@ -204,6 +204,29 @@ void rowSumsSymVec(const double *Avec, const int nrow,
} }
} }
#define ROW_SWEEP_ALG(op) \
/* Iterate `(block_size_i, ncol)` submatrix blocks. */ \
for (i = 0; i < nrow; i += block_size_i) { \
/* Set `A` and `C` to block beginning. */ \
A = A_block; \
C = C_block; \
/* Get current block's row size. */ \
block_size_i = nrow - i; \
if (block_size_i > block_size) { \
block_size_i = block_size; \
} \
/* Perform element wise operation for block. */ \
for (; A < A_end; A += nrow, C += nrow) { \
for (j = 0; j < block_size_i; ++j) { \
C[j] = (A[j]) op (v[j]); \
} \
} \
/* Step one block forth. */ \
A_block += block_size_i; \
C_block += block_size_i; \
v += block_size_i; \
}
/* C[, j] = A[, j] * v for each j = 1 to ncol */ /* C[, j] = A[, j] * v for each j = 1 to ncol */
void rowSweep(const double *A, const int nrow, const int ncol, void rowSweep(const double *A, const int nrow, const int ncol,
const char* op, const char* op,
@ -221,93 +244,13 @@ void rowSweep(const double *A, const int nrow, const int ncol,
} }
if (*op == '+') { if (*op == '+') {
// Iterate `(block_size_i, ncol)` submatrix blocks. ROW_SWEEP_ALG(+)
for (i = 0; i < nrow; i += block_size_i) {
// Set `A` and `C` to block beginning.
A = A_block;
C = C_block;
// Get current block's row size.
block_size_i = nrow - i;
if (block_size_i > block_size) {
block_size_i = block_size;
}
// Perform element wise operation for block.
for (; A < A_end; A += nrow, C += nrow) {
for (j = 0; j < block_size_i; ++j) {
C[j] = A[j] + v[j]; // FUN = '+'
}
}
// Step one block forth.
A_block += block_size_i;
C_block += block_size_i;
v += block_size_i;
}
} else if (*op == '-') { } else if (*op == '-') {
// Iterate `(block_size_i, ncol)` submatrix blocks. ROW_SWEEP_ALG(-)
for (i = 0; i < nrow; i += block_size_i) {
// Set `A` and `C` to block beginning.
A = A_block;
C = C_block;
// Get current block's row size.
block_size_i = nrow - i;
if (block_size_i > block_size) {
block_size_i = block_size;
}
// Perform element wise operation for block.
for (; A < A_end; A += nrow, C += nrow) {
for (j = 0; j < block_size_i; ++j) {
C[j] = A[j] - v[j]; // FUN = '-'
}
}
// Step one block forth.
A_block += block_size_i;
C_block += block_size_i;
v += block_size_i;
}
} else if (*op == '*') { } else if (*op == '*') {
// Iterate `(block_size_i, ncol)` submatrix blocks. ROW_SWEEP_ALG(*)
for (i = 0; i < nrow; i += block_size_i) {
// Set `A` and `C` to block beginning.
A = A_block;
C = C_block;
// Get current block's row size.
block_size_i = nrow - i;
if (block_size_i > block_size) {
block_size_i = block_size;
}
// Perform element wise operation for block.
for (; A < A_end; A += nrow, C += nrow) {
for (j = 0; j < block_size_i; ++j) {
C[j] = A[j] * v[j]; // FUN = '*'
}
}
// Step one block forth.
A_block += block_size_i;
C_block += block_size_i;
v += block_size_i;
}
} else if (*op == '/') { } else if (*op == '/') {
// Iterate `(block_size_i, ncol)` submatrix blocks. ROW_SWEEP_ALG(/)
for (i = 0; i < nrow; i += block_size_i) {
// Set `A` and `C` to block beginning.
A = A_block;
C = C_block;
// Get current block's row size.
block_size_i = nrow - i;
if (block_size_i > block_size) {
block_size_i = block_size;
}
// Perform element wise operation for block.
for (; A < A_end; A += nrow, C += nrow) {
for (j = 0; j < block_size_i; ++j) {
C[j] = A[j] / v[j]; // FUN = '/'
}
}
// Step one block forth.
A_block += block_size_i;
C_block += block_size_i;
v += block_size_i;
}
} else { } else {
error("Got unknown 'op' (opperation) argument."); error("Got unknown 'op' (opperation) argument.");
} }
@ -364,12 +307,45 @@ void crossprod(const double *A, const int nrowA, const int ncolA,
&zero, C, &ncolA); &zero, C, &ncolA);
} }
#define KRONECKER_ALG(op) \
for (j = 0; j < ncolA; ++j) { \
for (l = 0; l < ncolB; ++l) { \
colB = B + (l * nrowB); \
for (i = 0; i < nrowA; ++i) { \
for (k = 0; k < nrowB; ++k) { \
*(C++) = (A[i]) op (colB[k]); \
} \
} \
} \
A += nrowA; \
}
void kronecker(const double * restrict A, const int nrowA, const int ncolA,
const double * restrict B, const int nrowB, const int ncolB,
const char* op,
double * restrict C) {
int i, j, k, l;
const double *colB;
if (*op == '+') {
KRONECKER_ALG(+)
} else if (*op == '-') {
KRONECKER_ALG(-)
} else if (*op == '*') {
KRONECKER_ALG(*)
} else if (*op == '/') {
KRONECKER_ALG(/)
} else {
error("Got unknown 'op' (opperation) argument.");
}
}
void nullProj(const double *B, const int nrowB, const int ncolB, void nullProj(const double *B, const int nrowB, const int ncolB,
double *Q) { double *Q) {
const double minusOne = -1.0; const double minusOne = -1.0;
const double one = 1.0; const double one = 1.0;
// Initialize as identity matrix. // Initialize Q as identity matrix.
memset(Q, 0, sizeof(double) * nrowB * nrowB); memset(Q, 0, sizeof(double) * nrowB * nrowB);
double *Q_diag, *Q_end = Q + nrowB * nrowB; double *Q_diag, *Q_end = Q + nrowB * nrowB;
for (Q_diag = Q; Q_diag < Q_end; Q_diag += nrowB + 1) { for (Q_diag = Q; Q_diag < Q_end; Q_diag += nrowB + 1) {
@ -377,7 +353,7 @@ void nullProj(const double *B, const int nrowB, const int ncolB,
} }
// DGEMM with parameterization: // DGEMM with parameterization:
// C <- (-1.0 * B %*% t(B)) + C // Q <- (-1.0 * B %*% t(B)) + Q
F77_NAME(dgemm)("N", "T", &nrowB, &nrowB, &ncolB, F77_NAME(dgemm)("N", "T", &nrowB, &nrowB, &ncolB,
&minusOne, B, &nrowB, B, &nrowB, &minusOne, B, &nrowB, B, &nrowB,
&one, Q, &nrowB); &one, Q, &nrowB);

View File

@ -137,6 +137,24 @@ SEXP R_crossprod(SEXP A, SEXP B) {
return C; return C;
} }
void kronecker(const double *A, const int nrowA, const int ncolA,
const double *B, const int nrowB, const int ncolB,
const char *op,
double *C);
SEXP R_kronecker(SEXP A, SEXP B, SEXP op) {
SEXP C = PROTECT(allocMatrix(REALSXP,
nrows(A) * nrows(B),
ncols(A) * ncols(B)));
kronecker(REAL(A), nrows(A), ncols(A),
REAL(B), nrows(B), ncols(B),
CHAR(STRING_ELT(op, 0)),
REAL(C));
UNPROTECT(1);
return C;
}
void skewSymRank2k(const int n, const int k, void skewSymRank2k(const int n, const int k,
double alpha, const double *A, const double *B, double alpha, const double *A, const double *B,
double beta, double beta,

368
notes.md
View File

@ -1,368 +0,0 @@
# General Notes for Souce Code analysis
## Search in multiple files.
Using the Linux `grep` program with the parameters `-rnw` and specifying a include files filter like the following example.
```bash
grep --include=*\.{c,h,R} -rnw '.' -e "sweep"
```
searches in all `C` source and header fils as well as `R` source files for the term _sweep_.
## Recursive dir. compair with colored sructure (more or less).
```bash
diff -r CVE_R/ CVE_C/ | grep -E "^([<>]|[^<>].*)"
```
## Parsing `bash` script parameters.
```bash
usage="$0 [-v|--verbose] [-n|--dry-run] [(-s|--stack-size) <size>] [-h|--help] [-- [p1, [p2, ...]]]"
verbose=false
help=false
dry_run=false
stack_size=0
while [ $# -gt 0 ]; do
case "$1" in
-v | --verbose ) verbose=true; shift ;;
-n | --dry-run ) dry_run=true; shift ;;
-s | --stack-size ) stack_size="$2"; shift; shift ;;
-h | --help ) echo $usage; exit ;; # On help print usage and exit.
-- ) shift; break ;; # Break param "parsing".
* ) echo $usage >&2; exit 1 ;; # Print usage and exit with failure.
esac
done
echo verbose=$verbose
echo dry_run=$dry_run
echo stack_size=$stack_size
```
# Development
## Build and install.
To build the package the `devtools` package is used. This also provides `roxygen2` which is used for documentation and authomatic creaton of the `NAMESPACE` file.
```R
setwd("./CVE_R") # Set path to the package root.
library(devtools) # Load required `devtools` package.
document() # Create `.Rd` files and write `NAMESPACE`.
```
Next the package needs to be build, therefore (if pure `R` package, aka. `C/C++`, `Fortran`, ... code) just do the following.
```bash
R CMD build CVE_R
R CMD INSTALL CVE_0.1.tar.gz
```
Then we are ready for using the package.
```R
library(CVE)
help(package = "CVE")
```
## Build and install from within `R`.
An alternative approach is the following.
```R
setwd('./CVE_R')
getwd()
library(devtools)
document()
# No vignettes to build but "inst/doc/" is required!
(path <- build(vignettes = FALSE))
install.packages(path, repos = NULL, type = "source")
```
**Note: I only recommend this approach during development.**
# Analysing
## Logging (a `cve` run).
To log `loss`, `error` (estimated) the true error (error of current estimated `B` against the true `B`) or even the stepsize one can use the `logger` parameter. A `logger` is a function that gets the current `environment` of the CVE optimization methods (__do not alter this environment, only read from it__). This can be used to create logs like in the following example.
```R
library(CVE)
# Setup histories.
(epochs <- 50)
(attempts <- 10)
loss.history <- matrix(NA, epochs + 1, attempts)
error.history <- matrix(NA, epochs + 1, attempts)
tau.history <- matrix(NA, epochs + 1, attempts)
true.error.history <- matrix(NA, epochs + 1, attempts)
# Create a dataset
ds <- dataset("M1")
X <- ds$X
Y <- ds$Y
B <- ds$B # the true `B`
(k <- ncol(ds$B))
# True projection matrix.
P <- B %*% solve(t(B) %*% B) %*% t(B)
# Define the logger for the `cve()` method.
logger <- function(env) {
# Note the `<<-` assignement!
loss.history[env$epoch + 1, env$attempt] <<- env$loss
error.history[env$epoch + 1, env$attempt] <<- env$error
tau.history[env$epoch + 1, env$attempt] <<- env$tau
# Compute true error by comparing to the true `B`
B.est <- null(env$V) # Function provided by CVE
P.est <- B.est %*% solve(t(B.est) %*% B.est) %*% t(B.est)
true.error <- norm(P - P.est, 'F') / sqrt(2 * k)
true.error.history[env$epoch + 1, env$attempt] <<- true.error
}
# Performa SDR
dr <- cve(Y ~ X, k = k, logger = logger, epochs = epochs, attempts = attempts)
# Plot history's
par(mfrow = c(2, 2))
matplot(loss.history, type = 'l', log = 'y', xlab = 'iter',
main = 'loss', ylab = expression(L(V[iter])))
matplot(error.history, type = 'l', log = 'y', xlab = 'iter',
main = 'error', ylab = 'error')
matplot(tau.history, type = 'l', log = 'y', xlab = 'iter',
main = 'tau', ylab = 'tau')
matplot(true.error.history, type = 'l', log = 'y', xlab = 'iter',
main = 'true error', ylab = 'true error')
```
## Reading log files.
The runtime tests (upcomming further tests) are creating log files saved in `tmp/`. These log files are `CSV` files (actualy `TSV`) with a header storing the test results. Depending on the test the files may contain differnt data. As an example we use the runtime test logs which store in each line the `dataset`, the used `method` as well as the `error` (actual error of estimated `B` against real `B`) and the `time`. For reading and analysing the data see the following example.
```R
# Load log as `data.frame`
log <- read.csv('tmp/test0.log', sep = '\t')
# Create a error boxplot grouped by dataset.
boxplot(error ~ dataset, log)
# Overview
for (ds.name in paste0('M', seq(5))) {
ds <- subset(log, dataset == ds.name, select = c('method', 'dataset', 'time', 'error'))
print(summary(ds))
}
```
## Environments and variable lookup.
In the following a view simple examples of how `R` searches for variables.
In addition we manipulate funciton closures to alter the search path in variable lookup and outer scope variable manipulation.
```R
droids <- "These aren't the droids you're looking for."
search <- function() {
print(droids)
}
trooper.seeks <- function() {
droids <- c("R2-D2", "C-3PO")
search()
}
jedi.seeks <- function() {
droids <- c("R2-D2", "C-3PO")
environment(search) <- environment()
search()
}
trooper.seeks()
# [1] "These aren't the droids you're looking for."
jedi.seeks()
# [1] "R2-D2", "C-3PO"
```
The next example ilustrates how to write (without local copies) to variables outside the functions local environment.
```R
counting <- function() {
count <<- count + 1 # Note the `<<-` assignment.
}
(function() {
environment(counting) <- environment()
count <- 0
for (i in 1:10) {
counting()
}
return(count)
})()
(function () {
closure <- new.env()
environment(counting) <- closure
assign("count", 0, envir = closure)
for (i in 1:10) {
counting()
}
return(closure$count)
})()
```
Another example for the usage of `do.call` where the evaluation of parameters is illustated (example taken (and altered) from `?do.call`).
```R
## examples of where objects will be found.
A <- "A.Global"
f <- function(x) print(paste("f.new", x))
env <- new.env()
assign("A", "A.new", envir = env)
assign("f", f, envir = env)
f <- function(x) print(paste("f.Global", x))
f(A) # f.Global A.Global
do.call("f", list(A)) # f.Global A.Global
do.call("f", list(A), envir = env) # f.new A.Global
do.call(f, list(A), envir = env) # f.Global A.Global
do.call("f", list(quote(A)), envir = env) # f.new A.new
do.call(f, list(quote(A)), envir = env) # f.Global A.new
do.call("f", list(as.name("A")), envir = env) # f.new A.new
do.call("f", list(as.name("A")), envir = env) # f.new A.new
```
# Performance benchmarks
In this section alternative implementations of simple algorithms are compared for there performance.
### Computing the trace of a matrix multiplication.
```R
library(microbenchmark)
A <- matrix(runif(120), 12, 10)
# Check correctnes and benckmark performance.
stopifnot(
all.equal(
sum(diag(t(A) %*% A)), sum(diag(crossprod(A, A)))
),
all.equal(
sum(diag(t(A) %*% A)), sum(A * A)
)
)
microbenchmark(
MM = sum(diag(t(A) %*% A)),
cross = sum(diag(crossprod(A, A))),
elem = sum(A * A)
)
# Unit: nanoseconds
# expr min lq mean median uq max neval
# MM 4232 4570.0 5138.81 4737 4956.0 40308 100
# cross 2523 2774.5 2974.93 2946 3114.5 5078 100
# elem 582 762.5 973.02 834 964.0 12945 100
```
```R
n <- 200
M <- matrix(runif(n^2), n, n)
dnorm2 <- function(x) exp(-0.5 * x^2) / sqrt(2 * pi)
stopifnot(
all.equal(dnorm(M), dnorm2(M))
)
microbenchmark(
dnorm = dnorm(M),
dnorm2 = dnorm2(M),
exp = exp(-0.5 * M^2) # without scaling -> irrelevant for usage
)
# Unit: microseconds
# expr min lq mean median uq max neval
# dnorm 841.503 843.811 920.7828 855.7505 912.4720 2405.587 100
# dnorm2 543.510 580.319 629.5321 597.8540 607.3795 2603.763 100
# exp 502.083 535.943 577.2884 548.3745 561.3280 2113.220 100
```
### Using `crosspord()`
```R
p <- 12
q <- 10
V <- matrix(runif(p * q), p, q)
stopifnot(
all.equal(V %*% t(V), tcrossprod(V)),
all.equal(V %*% t(V), tcrossprod(V, V))
)
microbenchmark(
V %*% t(V),
tcrossprod(V),
tcrossprod(V, V)
)
# Unit: microseconds
# expr min lq mean median uq max neval
# V %*% t(V) 2.293 2.6335 2.94673 2.7375 2.9060 19.592 100
# tcrossprod(V) 1.148 1.2475 1.86173 1.3440 1.4650 30.688 100
# tcrossprod(V, V) 1.003 1.1575 1.28451 1.2400 1.3685 2.742 100
```
### Recycling vs. Sweep
```R
(n <- 200)
(p <- 12)
(q <- 10)
X_diff <- matrix(runif(n * (n - 1) / 2 * p), n * (n - 1) / 2, p)
V <- matrix(rnorm(p * q), p, q)
vecS <- runif(n * (n - 1) / 2)
stopifnot(
all.equal((X_diff %*% V) * rep(vecS, q),
sweep(X_diff %*% V, 1, vecS, `*`)),
all.equal((X_diff %*% V) * rep(vecS, q),
(X_diff %*% V) * vecS)
)
microbenchmark(
rep = (X_diff %*% V) * rep(vecS, q),
sweep = sweep(X_diff %*% V, 1, vecS, `*`, check.margin = FALSE),
recycle = (X_diff %*% V) * vecS
)
# Unit: microseconds
# expr min lq mean median uq max neval
# rep 851.723 988.3655 1575.639 1203.6385 1440.578 18999.23 100
# sweep 1313.177 1522.4010 2355.269 1879.2605 2065.399 18783.24 100
# recycle 719.001 786.1265 1157.285 881.8825 1163.202 19091.79 100
```
### Scaled `crossprod` with matmul order.
```R
(n <- 200)
(p <- 12)
(q <- 10)
X_diff <- matrix(runif(n * (n - 1) / 2 * p), n * (n - 1) / 2, p)
V <- matrix(rnorm(p * q), p, q)
vecS <- runif(n * (n - 1) / 2)
ref <- crossprod(X_diff, X_diff * vecS) %*% V
stopifnot(
all.equal(ref, crossprod(X_diff, (X_diff %*% V) * vecS)),
all.equal(ref, crossprod(X_diff, (X_diff %*% V) * vecS))
)
microbenchmark(
inner = crossprod(X_diff, X_diff * vecS) %*% V,
outer = crossprod(X_diff, (X_diff %*% V) * vecS)
)
# Unit: microseconds
# expr min lq mean median uq max neval
# inner 789.065 867.939 1683.812 987.9375 1290.055 16800.265 100
# outer 1141.479 1216.929 1404.702 1317.7315 1582.800 2531.766 100
```
### Fast dist matrix computation (aka. row sum of squares).
```R
library(microbenchmark)
library(CVE)
(n <- 200)
(N <- n * (n - 1) / 2)
(p <- 12)
M <- matrix(runif(N * p), N, p)
stopifnot(
all.equal(rowSums(M^2), rowSums.c(M^2)),
all.equal(rowSums(M^2), rowSquareSums.c(M))
)
microbenchmark(
sums = rowSums(M^2),
sums.c = rowSums.c(M^2),
sqSums.c = rowSquareSums.c(M)
)
# Unit: microseconds
# expr min lq mean median uq max neval
# sums 666.311 1051.036 1612.3100 1139.0065 1547.657 13940.97 100
# sums.c 342.647 672.453 1009.9109 740.6255 1224.715 13765.90 100
# sqSums.c 115.325 142.128 175.6242 153.4645 169.678 759.87 100
```
## Using `Rprof()` for performance.
The standart method for profiling where an algorithm is spending its time is with `Rprof()`.
```R
path <- '../tmp/R.prof' # path to profiling file
Rprof(path)
cve.res <- cve.call(X, Y, k = k)
Rprof(NULL)
(prof <- summaryRprof(path)) # Summarise results
```
**Note: considure to run `gc()` before measuring**, aka cleaning up by explicitely calling the garbage collector.

View File

@ -9,12 +9,19 @@ tell.user <- function(name, start.time, i, length) {
i, "/", length, i, "/", length,
" - elapsed:", format(Sys.time() - start.time), "\033[K") " - elapsed:", format(Sys.time() - start.time), "\033[K")
} }
#' Computes "distance" of spanned subspaces.
#' @param B1 Semi-orthonormal basis matrix
#' @param B2 Semi-orthonormal basis matrix
#' @return Frobenius norm of subspace projection matrix diff.
subspace.dist <- function(B1, B2){ subspace.dist <- function(B1, B2){
P1 <- B1 %*% solve(t(B1) %*% B1) %*% t(B1) P1 <- tcrossprod(B1, B1)
P2 <- B2 %*% solve(t(B2) %*% B2) %*% t(B2) P2 <- tcrossprod(B2, B2)
return(norm(P1 - P2, type = 'F')) return(norm(P1 - P2, type = 'F'))
} }
# Set random seed
set.seed(437)
# Number of simulations # Number of simulations
SIM.NR <- 50 SIM.NR <- 50
# maximal number of iterations in curvilinear search algorithm # maximal number of iterations in curvilinear search algorithm
@ -70,10 +77,12 @@ for (sim in 1:SIM.NR) {
for (method in methods) { for (method in methods) {
if (tolower(method) == "legacy") { if (tolower(method) == "legacy") {
dr.time <- system.time( dr.time <- system.time(
dr <- stiefl_opt(data, dr <- stiefel_opt(data,
k = dim - truedim, k = dim - truedim,
k0 = ATTEMPTS, k0 = ATTEMPTS,
h = estimate.bandwidth(X, k = truedim, nObs = sqrt(nrow(X))), h = estimate.bandwidth(X,
k = truedim,
nObs = sqrt(nrow(X))),
maxit = MAXIT maxit = MAXIT
) )
) )
@ -86,7 +95,7 @@ for (sim in 1:SIM.NR) {
attempts = ATTEMPTS attempts = ATTEMPTS
) )
) )
dr <- dr[[truedim]] dr$B <- basis(dr, truedim)
} }
key <- paste0(name, '-', method) key <- paste0(name, '-', method)
@ -104,22 +113,19 @@ for (sim in 1:SIM.NR) {
} }
} }
cat("\n\n## Time [sec] Means:\n") cat("\n\n## Time [sec] Summary:\n")
print(colMeans(time)) print(summary(time))
cat("\n## Error Means:\n") cat("\n## Error Summary:\n")
print(colMeans(error)) print(summary(error))
at <- seq(ncol(error)) + rep(seq(ncol(error) / length(methods)) - 1, each = length(methods))
boxplot(error, boxplot(error,
main = paste0("Error (Nr of simulations ", SIM.NR, ")"), main = paste0("Error (Nr of simulations ", SIM.NR, ")"),
ylab = "Error", ylab = "Error",
las = 2, las = 2
at = at
) )
boxplot(time, boxplot(time,
main = paste0("Time (Nr of simulations ", SIM.NR, ")"), main = paste0("Time (Nr of simulations ", SIM.NR, ")"),
ylab = "Time [sec]", ylab = "Time [sec]",
las = 2, las = 2
at = at
) )

8
test.R
View File

@ -1,10 +1,15 @@
args <- commandArgs(TRUE) args <- commandArgs(TRUE)
if (length(args) > 0) { if (length(args) > 0L) {
method <- args[1] method <- args[1]
} else { } else {
method <- "simple" method <- "simple"
} }
if (length((args) > 1L)) {
momentum <- as.double(args[2])
} else {
momentum <- 0.0
}
epochs <- 50L epochs <- 50L
attempts <- 25L attempts <- 25L
@ -56,6 +61,7 @@ for (name in paste0("M", seq(5))) {
true.error.history <- matrix(NA, epochs + 1, attempts) true.error.history <- matrix(NA, epochs + 1, attempts)
dr <- cve(Y ~ X, k = k, method = method, dr <- cve(Y ~ X, k = k, method = method,
momentum = momentum,
epochs = epochs, attempts = attempts, epochs = epochs, attempts = attempts,
logger = logger) logger = logger)