2
0
Fork 0

rewrote (C): to use new Gradient formula,

rewrote (C): subroutine interface to use matrix struct
This commit is contained in:
Daniel Kapla 2019-12-05 17:35:29 +01:00
parent 5638821b85
commit 4b68c245a6
31 changed files with 1915 additions and 858 deletions

View File

@ -4,7 +4,6 @@ S3method(coef,cve)
S3method(directions,cve) S3method(directions,cve)
S3method(plot,cve) S3method(plot,cve)
S3method(predict,cve) S3method(predict,cve)
S3method(predict.dim,cve)
S3method(summary,cve) S3method(summary,cve)
export(cve) export(cve)
export(cve.call) export(cve.call)
@ -13,7 +12,7 @@ export(directions)
export(elem.pairs) export(elem.pairs)
export(estimate.bandwidth) export(estimate.bandwidth)
export(null) export(null)
export(predict.dim) export(predict_dim)
export(projTangentStiefel) export(projTangentStiefel)
export(rStiefel) export(rStiefel)
export(retractStiefel) export(retractStiefel)

View File

@ -45,6 +45,7 @@
#' below. #' below.
#' \item "weighted" variation with addaptive weighting of slices. #' \item "weighted" variation with addaptive weighting of slices.
#' } #' }
#' @param max.dim upper bounds for \code{k}, (ignored if \code{k} is supplied).
#' @param ... Parameters passed on to \code{cve.call}. #' @param ... Parameters passed on to \code{cve.call}.
#' #'
#' @return an S3 object of class \code{cve} with components: #' @return an S3 object of class \code{cve} with components:
@ -58,19 +59,54 @@
#' } #' }
#' #'
#' @examples #' @examples
#' # create dataset #' # set dimensions for simulation model
#' x <- matrix(rnorm(400), 100, 4) #' p <- 8
#' y <- x[, 1] + x[, 2] + as.matrix(rnorm(100)) #' k <- 2
#' #' # create B for simulation
#' # Call CVE. #' b1 <- rep(1 / sqrt(p), p)
#' dr <- cve(y ~ x) #' b2 <- (-1)^seq(1, p) / sqrt(p)
#' # Call weighted CVE. #' B <- cbind(b1, b2)
#' dr.weighted <- cve(y ~ x, method = "weighted") #' # samplsize
#' #' n <- 200
#' # Training data responces of reduced data. #' set.seed(21)
#' y.est <- directions(dr, 1) #' # creat predictor data x ~ N(0, I_p)
#' # Extract SDR subspace basis of dimension 1. #' x <- matrix(rnorm(n * p), n, p)
#' B <- coef(dr.momentum, 1) #' # simulate response variable
#' # y = f(B'x) + err
#' # with f(x1, x2) = x1^2 + 2 * x2 and err ~ N(0, 0.25^2)
#' y <- (x %*% b1)^2 + 2 * (x %*% b2) + 0.25 * rnorm(100)
#' # calculate cve with method 'simple' for k unknown in 1, ..., 4
#' cve.obj.s <- cve(y ~ x, max.dim = 4) # default method 'simple'
#' # calculate cve with method 'weighed' for k = 2
#' cve.obj.w <- cve(y ~ x, k = 2, method = 'weighted')
#' # estimate dimension from cve.obj.s
#' khat <- predict_dim(cve.obj.s)$k
#' # get cve-estimate for B with dimensions (p, k = khat)
#' B2 <- coef(cve.obj.s, k = khat)
#' # get projected X data (same as cve.obj.s$X %*% B2)
#' proj.X <- directions(cve.obj.s, k = khat)
#' # plot y against projected data
#' plot(proj.X[, 1], y)
#' plot(proj.X[, 2], y)
#' # creat 10 new x points and y according to model
#' x.new <- matrix(rnorm(10 * p), 10, p)
#' y.new <- (x.new %*% b1)^2 + 2 * (x.new %*% b2) + 0.25 * rnorm(10)
#' # predict y.new
#' yhat <- predict(cve.obj.s, x.new, khat)
#' plot(y.new, yhat)
#' # projection matrix on span(B)
#' # same as B %*% t(B) since B is semi-orthogonal
#' PB <- B %*% solve(t(B) %*% B) %*% t(B)
#' # cve estimates for B with simple and weighted method
#' B.s <- coef(cve.obj.s, k = 2)
#' B.w <- coef(cve.obj.w, k = 2)
#' # same as B.s %*% t(B.s) since B.s is semi-orthogonal (same vor B.w)
#' PB.s <- B.s %*% solve(t(B.s) %*% B.s) %*% t(B.s)
#' PB.w <- B.w %*% solve(t(B.w) %*% B.w) %*% t(B.w)
#' # compare estimation accuracy of simple and weighted cve estimate by
#' # Frobenius norm of difference of projections.
#' norm(PB - PB.s, type = 'F')
#' norm(PB - PB.w, type = 'F')
#' #'
#' @seealso For a detailed description of \code{formula} see #' @seealso For a detailed description of \code{formula} see
#' \code{\link{formula}}. #' \code{\link{formula}}.
@ -107,6 +143,12 @@ cve <- function(formula, data, method = "simple", max.dim = 10L, ...) {
#' \code{\link{estimate.bandwidth}} (ignored if \code{h} is supplied). #' \code{\link{estimate.bandwidth}} (ignored if \code{h} is supplied).
#' @param X data matrix with samples in its rows. #' @param X data matrix with samples in its rows.
#' @param Y Responses (1 dimensional). #' @param Y Responses (1 dimensional).
#' @param method specifies the CVE method variation as one of
#' \itemize{
#' \item "simple" exact implementation as described in the paper listed
#' below.
#' \item "weighted" variation with addaptive weighting of slices.
#' }
#' @param k Dimension of lower dimensional projection, if \code{k} is given #' @param k Dimension of lower dimensional projection, if \code{k} is given
#' only the specified dimension \code{B} matrix is estimated. #' only the specified dimension \code{B} matrix is estimated.
#' @param min.dim lower bounds for \code{k}, (ignored if \code{k} is supplied). #' @param min.dim lower bounds for \code{k}, (ignored if \code{k} is supplied).
@ -124,28 +166,35 @@ cve <- function(formula, data, method = "simple", max.dim = 10L, ...) {
#' @param slack Positive scaling to allow small increases of the loss while #' @param slack Positive scaling to allow small increases of the loss while
#' optimizing. #' optimizing.
#' @param gamma step-size reduction multiple. #' @param gamma step-size reduction multiple.
#' @param V.init Semi-orthogonal matrix of dimensions `(ncol(X), ncol(X) - k)` #' as optimization starting value. (If supplied, \code{attempts} is #' @param V.init Semi-orthogonal matrix of dimensions `(ncol(X), ncol(X) - k)
#' as optimization starting value. (If supplied, \code{attempts} is
#' set to 1 and \code{k} to match dimension) #' set to 1 and \code{k} to match dimension)
#' #'
#' @inherit cve return #' @inherit cve return
#' #'
#' @examples #' @examples
#' # Create a dataset (n samples): #' # create B for simulation (k = 1)
#' n <- 100 #' B <- rep(1, 5) / sqrt(5)
#' X <- matrix(rnorm(4 * n), n)
#' Y <- matrix(X[, 1] + cos(X[, 2]) + rnorm(n, 0, .1), n)
#' #'
#' # Create logger function: #' set.seed(21)
#' logger <- function(attempt, iter, data) { #' # creat predictor data X ~ N(0, I_p)
#' if (iter == 0) { #' X <- matrix(rnorm(500), 100, 5)
#' cat("Starting attempt nr:", attempt, "\n") #' # simulate response variable
#' } #' # Y = f(B'X) + err
#' cat(" iter:", iter, "loss:", data$loss, "\n") #' # with f(x1) = x1 and err ~ N(0, 0.25^2)
#' } #' Y <- X %*% B + 0.25 * rnorm(100)
#' #'
#' Call 'cve' with logger: #' # calculate cve with method 'simple' for k = 1
#' cve(Y ~ X, logger = logger) #' set.seed(21)
#' cve.obj.simple1 <- cve(Y ~ X, k = 1)
#' #'
#' # same as
#' set.seed(21)
#' cve.obj.simple2 <- cve.call(X, Y, k = 1)
#'
#' # extract estimated B's.
#' coef(cve.obj.simple1, k = 1)
#' coef(cve.obj.simple2, k = 1)
#' @export #' @export
cve.call <- function(X, Y, method = "simple", cve.call <- function(X, Y, method = "simple",
nObs = sqrt(nrow(X)), h = NULL, nObs = sqrt(nrow(X)), h = NULL,
@ -178,7 +227,7 @@ cve.call <- function(X, Y, method = "simple",
} }
if (!(is.matrix(X) && is.numeric(X))) { if (!(is.matrix(X) && is.numeric(X))) {
stop("Parameter 'X' should be a numeric matrices.") stop("Parameter 'X' should be a numeric matrix.")
} }
if (!is.numeric(Y)) { if (!is.numeric(Y)) {
stop("Parameter 'Y' must be numeric.") stop("Parameter 'Y' must be numeric.")
@ -288,7 +337,7 @@ cve.call <- function(X, Y, method = "simple",
h <- estimate.bandwidth(X, k, nObs) h <- estimate.bandwidth(X, k, nObs)
} }
dr.k <- .Call('cve', PACKAGE = 'CVE', dr.k <- .Call('cve_export', PACKAGE = 'CVE',
X, Y, k, h, X, Y, k, h,
method_nr, method_nr,
V.init, V.init,

View File

@ -9,10 +9,35 @@
#' @return dir the matrix of CS or CMS of given dimension #' @return dir the matrix of CS or CMS of given dimension
#' #'
#' @examples #' @examples
#' x <- matrix(rnorm(400),100,4) #' # set dimensions for simulation model
#' y <- x[, 1] + x[, 2] + as.matrix(rnorm(100)) #' p <- 8 # sample dimension
#' dr <- cve(y ~ x, k = 2) # Only for sub-space dim. 2 #' k <- 2 # real dimension of SDR subspace
#' B2 <- coef(dr, 2) #' n <- 200 # samplesize
#' # create B for simulation
#' b1 <- rep(1 / sqrt(p), p)
#' b2 <- (-1)^seq(1, p) / sqrt(p)
#' B <- cbind(b1, b2)
#'
#' set.seed(21)
#' # creat predictor data x ~ N(0, I_p)
#' x <- matrix(rnorm(n * p), n, p)
#' # simulate response variable
#' # y = f(B'x) + err
#' # with f(x1, x2) = x1^2 + 2 * x2 and err ~ N(0, 0.25^2)
#' y <- (x %*% b1)^2 + 2 * (x %*% b2) + 0.25 * rnorm(100)
#' # calculate cve for k = 1, ..., 5
#' cve.obj <- cve(y ~ x, max.dim = 5)
#' # get cve-estimate for B with dimensions (p, k = 2)
#' B2 <- coef(cve.obj, k = 2)
#'
#' # Projection matrix on span(B)
#' # equivalent to `B %*% t(B)` since B is semi-orthonormal
#' PB <- B %*% solve(t(B) %*% B) %*% t(B)
#' # Projection matrix on span(B2)
#' # equivalent to `B2 %*% t(B2)` since B2 is semi-orthonormal
#' PB2 <- B2 %*% solve(t(B2) %*% B2) %*% t(B2)
#' # compare estimation accuracy by Frobenius norm of difference of projections
#' norm(PB - PB2, type = 'F')
#' #'
#' @method coef cve #' @method coef cve
#' @aliases coef.cve #' @aliases coef.cve

View File

@ -8,6 +8,24 @@ directions <- function(dr, k) {
#' @param dr Instance of 'cve' as returned by \code{cve}. #' @param dr Instance of 'cve' as returned by \code{cve}.
#' @param k SDR dimension to use for projection. #' @param k SDR dimension to use for projection.
#' #'
#' @examples
#' # create B for simulation (k = 1)
#' B <- rep(1, 5) / sqrt(5)
#' set.seed(21)
#' # creat predictor data x ~ N(0, I_p)
#' x <- matrix(rnorm(500), 100, 5)
#' # simulate response variable
#' # y = f(B'x) + err
#' # with f(x1) = x1 and err ~ N(0, 0.25^2)
#' y <- x %*% B + 0.25 * rnorm(100)
#' # calculate cve with method 'simple' for k = 1
#' set.seed(21)
#' cve.obj.simple <- cve(y ~ x, k = 1, method = 'simple')
#' # get projected data for k = 1
#' x.proj <- directions(cve.obj.simple, k = 1)
#' # plot y against projected data
#' plot(x.proj, y)
#'
#' @method directions cve #' @method directions cve
#' @aliases directions directions.cve #' @aliases directions directions.cve
#' @export #' @export

View File

@ -14,6 +14,25 @@
#' #'
#' @return Estimated bandwidth \code{h}. #' @return Estimated bandwidth \code{h}.
#' #'
#' @examples
#' # set dimensions for simulation model
#' p <- 5; k <- 1
#' # create B for simulation
#' B <- rep(1, p) / sqrt(p)
#' # samplsize
#' n <- 100
#' set.seed(21)
#' #creat predictor data x ~ N(0, I_p)
#' x <- matrix(rnorm(n * p), n, p)
#' # simulate response variable
#' # y = f(B'x) + err
#' # with f(x1) = x1 and err ~ N(0, 0.25^2)
#' y <- x %*% B + 0.25 * rnorm(100)
#' # calculate cve with method 'simple' for k = 1
#' set.seed(21)
#' cve.obj.simple <- cve(y ~ x, k = k)
#' print(cve.obj.simple$res$'1'$h)
#' print(estimate.bandwidth(x, k = k))
#' @export #' @export
estimate.bandwidth <- function(X, k, nObs) { estimate.bandwidth <- function(X, k, nObs) {
n <- nrow(X) n <- nrow(X)

View File

@ -5,6 +5,31 @@
#' @param x Object of class \code{"cve"} (result of [\code{\link{cve}}]). #' @param x Object of class \code{"cve"} (result of [\code{\link{cve}}]).
#' @param ... Pass through parameters to [\code{\link{plot}}] and #' @param ... Pass through parameters to [\code{\link{plot}}] and
#' [\code{\link{lines}}] #' [\code{\link{lines}}]
#' @examples
#' # create B for simulation
#' B <- cbind(rep(1, 6), (-1)^seq(6)) / sqrt(6)
#'
#' set.seed(21)
#' # creat predictor data x ~ N(0, I_p)
#' X <- matrix(rnorm(600), 100)
#'
#' # simulate response variable
#' # y = f(B'x) + err
#' # with f(x1, x2) = x1^2 + 2 x2 and err ~ N(0, 0.25^2)
#' Y <- (X %*% B[, 1])^2 + 2 * X %*% B[, 2] + rnorm(100, 0, .1)
#'
#' # Create bandwidth estimation function
#' estimate.bandwidth <- function(X, k, nObs) {
#' n <- nrow(X)
#' p <- ncol(X)
#' X_c <- scale(X, center = TRUE, scale = FALSE)
#' 2 * qchisq((nObs - 1) / (n - 1), k) * sum(X_c^2) / (n * p)
#' }
#' # calculate cve with method 'simple' for k = min.dim,...,max.dim
#' cve.obj.simple <- cve(Y ~ X, h = estimate.bandwidth, nObs = sqrt(nrow(X)))
#'
#' # elbow plot
#' plot(cve.obj.simple)
#' #'
#' @seealso see \code{\link{par}} for graphical parameters to pass through #' @seealso see \code{\link{par}} for graphical parameters to pass through
#' as well as \code{\link{plot}}, the standard plot utility. #' as well as \code{\link{plot}}, the standard plot utility.

View File

@ -10,6 +10,32 @@
#' #'
#' @return prediced response of data \code{newdata}. #' @return prediced response of data \code{newdata}.
#' #'
#' @examples
#' # create B for simulation
#' B <- rep(1, 5) / sqrt(5)
#'
#' set.seed(21)
#' # creat predictor data x ~ N(0, I_p)
#' x <- matrix(rnorm(500), 100)
#'
#' # simulate response variable
#' # y = f(B'x) + err
#' # with f(x1) = x1 and err ~ N(0, 0.25^2)
#' y <- x %*% B + 0.25 * rnorm(100)
#'
#' x.train <- x[1:80, ]
#' x.test <- x[81:100, ]
#' y.train <- y[1:80, ]
#' y.test <- y[81:100, ]
#'
#' # calculate cve with method 'simple' for k = 1
#' cve.obj.simple <- cve(y.train ~ x.train, k = 1)
#'
#' # predict y.test from x.test
#' yhat <- predict(cve.obj.simple, x.test, 1)
#'
#' # plot prediction against y.test
#' plot(yhat, y.test)
#' @seealso \code{\link{cve}}, \code{\link{cve.call}} or \pkg{\link{mars}}. #' @seealso \code{\link{cve}}, \code{\link{cve.call}} or \pkg{\link{mars}}.
#' #'
#' @rdname predict.cve #' @rdname predict.cve

View File

@ -10,15 +10,26 @@
#' \item k: predicted dimensions. #' \item k: predicted dimensions.
#' } #' }
#' #'
#' @examples
#' # create B for simulation
#' B <- rep(1, 5) / sqrt(5)
#'
#' set.seed(21)
#' # creat predictor data x ~ N(0, I_p)
#' x <- matrix(rnorm(500), 100)
#'
#' # simulate response variable
#' # y = f(B'x) + err
#' # with f(x1) = x1 and err ~ N(0, 0.25^2)
#' y <- x %*% B + 0.25 * rnorm(100)
#'
#' # Calculate cve for unknown k between min.dim and max.dim.
#' cve.obj.simple <- cve(y ~ x)
#'
#' predict_dim(cve.obj.simple)
#'
#' @export #' @export
predict.dim <- function(object, ...) { predict_dim <- function(object, ...) {
UseMethod("predict.dim")
}
#' @aliases predict.dim
#' @method predict.dim cve
#' @export
predict.dim.cve <- function(object, ...) {
# Get centered training data and dimensions # Get centered training data and dimensions
X <- scale(object$X, center = TRUE, scale = FALSE) X <- scale(object$X, center = TRUE, scale = FALSE)
n <- nrow(object$X) # umber of training data samples n <- nrow(object$X) # umber of training data samples

View File

@ -1,6 +1,25 @@
#' Prints a summary of a \code{cve} result. #' Prints a summary of a \code{cve} result.
#' @param object Instance of 'cve' as returned by \code{cve}. #' @param object Instance of 'cve' as returned by \code{cve}.
#' @param ... ignored. #' @param ... ignored.
#'
#' @examples
#' # create B for simulation
#' B <- rep(1, 5) / sqrt(5)
#'
#' set.seed(21)
#' #creat predictor data x ~ N(0, I_p)
#' x <- matrix(rnorm(500), 100)
#'
#' # simulate response variable
#' # y = f(B'x) + err
#' # with f(x1) = x1 and err ~ N(0, 0.25^2)
#' y <- x %*% B + 0.25 * rnorm(100)
#'
#' # calculate cve for unknown k between min.dim and max.dim.
#' cve.obj.simple <- cve(y ~ x)
#'
#' summary(cve.obj.simple)
#'
#' @method summary cve #' @method summary cve
#' @export #' @export
summary.cve <- function(object, ...) { summary.cve <- function(object, ...) {

View File

@ -21,9 +21,34 @@ dir the matrix of CS or CMS of given dimension
Returns the SDR basis matrix for SDR dimension(s). Returns the SDR basis matrix for SDR dimension(s).
} }
\examples{ \examples{
x <- matrix(rnorm(400),100,4) # set dimensions for simulation model
y <- x[, 1] + x[, 2] + as.matrix(rnorm(100)) p <- 8 # sample dimension
dr <- cve(y ~ x, k = 2) # Only for sub-space dim. 2 k <- 2 # real dimension of SDR subspace
B2 <- coef(dr, 2) n <- 200 # samplesize
# create B for simulation
b1 <- rep(1 / sqrt(p), p)
b2 <- (-1)^seq(1, p) / sqrt(p)
B <- cbind(b1, b2)
set.seed(21)
# creat predictor data x ~ N(0, I_p)
x <- matrix(rnorm(n * p), n, p)
# simulate response variable
# y = f(B'x) + err
# with f(x1, x2) = x1^2 + 2 * x2 and err ~ N(0, 0.25^2)
y <- (x \%*\% b1)^2 + 2 * (x \%*\% b2) + 0.25 * rnorm(100)
# calculate cve for k = 1, ..., 5
cve.obj <- cve(y ~ x, max.dim = 5)
# get cve-estimate for B with dimensions (p, k = 2)
B2 <- coef(cve.obj, k = 2)
# Projection matrix on span(B)
# equivalent to `B \%*\% t(B)` since B is semi-orthonormal
PB <- B \%*\% solve(t(B) \%*\% B) \%*\% t(B)
# Projection matrix on span(B2)
# equivalent to `B2 \%*\% t(B2)` since B2 is semi-orthonormal
PB2 <- B2 \%*\% solve(t(B2) \%*\% B2) \%*\% t(B2)
# compare estimation accuracy by Frobenius norm of difference of projections
norm(PB - PB2, type = 'F')
} }

View File

@ -20,6 +20,8 @@ supplied.}
\item "weighted" variation with addaptive weighting of slices. \item "weighted" variation with addaptive weighting of slices.
}} }}
\item{max.dim}{upper bounds for \code{k}, (ignored if \code{k} is supplied).}
\item{...}{Parameters passed on to \code{cve.call}.} \item{...}{Parameters passed on to \code{cve.call}.}
} }
\value{ \value{
@ -58,19 +60,54 @@ a real \eqn{p \times k}{p x k} of rank \eqn{k <= p}{k \leq p}.
Without loss of generality \eqn{B} is assumed to be orthonormal. Without loss of generality \eqn{B} is assumed to be orthonormal.
} }
\examples{ \examples{
# create dataset # set dimensions for simulation model
x <- matrix(rnorm(400), 100, 4) p <- 8
y <- x[, 1] + x[, 2] + as.matrix(rnorm(100)) k <- 2
# create B for simulation
# Call CVE. b1 <- rep(1 / sqrt(p), p)
dr <- cve(y ~ x) b2 <- (-1)^seq(1, p) / sqrt(p)
# Call weighted CVE. B <- cbind(b1, b2)
dr.weighted <- cve(y ~ x, method = "weighted") # samplsize
n <- 200
# Training data responces of reduced data. set.seed(21)
y.est <- directions(dr, 1) # creat predictor data x ~ N(0, I_p)
# Extract SDR subspace basis of dimension 1. x <- matrix(rnorm(n * p), n, p)
B <- coef(dr.momentum, 1) # simulate response variable
# y = f(B'x) + err
# with f(x1, x2) = x1^2 + 2 * x2 and err ~ N(0, 0.25^2)
y <- (x \%*\% b1)^2 + 2 * (x \%*\% b2) + 0.25 * rnorm(100)
# calculate cve with method 'simple' for k unknown in 1, ..., 4
cve.obj.s <- cve(y ~ x, max.dim = 4) # default method 'simple'
# calculate cve with method 'weighed' for k = 2
cve.obj.w <- cve(y ~ x, k = 2, method = 'weighted')
# estimate dimension from cve.obj.s
khat <- predict_dim(cve.obj.s)$k
# get cve-estimate for B with dimensions (p, k = khat)
B2 <- coef(cve.obj.s, k = khat)
# get projected X data (same as cve.obj.s$X \%*\% B2)
proj.X <- directions(cve.obj.s, k = khat)
# plot y against projected data
plot(proj.X[, 1], y)
plot(proj.X[, 2], y)
# creat 10 new x points and y according to model
x.new <- matrix(rnorm(10 * p), 10, p)
y.new <- (x.new \%*\% b1)^2 + 2 * (x.new \%*\% b2) + 0.25 * rnorm(10)
# predict y.new
yhat <- predict(cve.obj.s, x.new, khat)
plot(y.new, yhat)
# projection matrix on span(B)
# same as B \%*\% t(B) since B is semi-orthogonal
PB <- B \%*\% solve(t(B) \%*\% B) \%*\% t(B)
# cve estimates for B with simple and weighted method
B.s <- coef(cve.obj.s, k = 2)
B.w <- coef(cve.obj.w, k = 2)
# same as B.s \%*\% t(B.s) since B.s is semi-orthogonal (same vor B.w)
PB.s <- B.s \%*\% solve(t(B.s) \%*\% B.s) \%*\% t(B.s)
PB.w <- B.w \%*\% solve(t(B.w) \%*\% B.w) \%*\% t(B.w)
# compare estimation accuracy of simple and weighted cve estimate by
# Frobenius norm of difference of projections.
norm(PB - PB.s, type = 'F')
norm(PB - PB.w, type = 'F')
} }
\references{ \references{

View File

@ -14,6 +14,13 @@ cve.call(X, Y, method = "simple", nObs = sqrt(nrow(X)), h = NULL,
\item{Y}{Responses (1 dimensional).} \item{Y}{Responses (1 dimensional).}
\item{method}{specifies the CVE method variation as one of
\itemize{
\item "simple" exact implementation as described in the paper listed
below.
\item "weighted" variation with addaptive weighting of slices.
}}
\item{nObs}{parameter for choosing bandwidth \code{h} using \item{nObs}{parameter for choosing bandwidth \code{h} using
\code{\link{estimate.bandwidth}} (ignored if \code{h} is supplied).} \code{\link{estimate.bandwidth}} (ignored if \code{h} is supplied).}
@ -39,7 +46,8 @@ optimizing.}
\item{gamma}{step-size reduction multiple.} \item{gamma}{step-size reduction multiple.}
\item{V.init}{Semi-orthogonal matrix of dimensions `(ncol(X), ncol(X) - k)` #' as optimization starting value. (If supplied, \code{attempts} is \item{V.init}{Semi-orthogonal matrix of dimensions `(ncol(X), ncol(X) - k)
as optimization starting value. (If supplied, \code{attempts} is
set to 1 and \code{k} to match dimension)} set to 1 and \code{k} to match dimension)}
\item{max.iter}{maximum number of optimization steps.} \item{max.iter}{maximum number of optimization steps.}
@ -84,3 +92,27 @@ and \eqn{B = (b_1, ..., b_k)} is
a real \eqn{p \times k}{p x k} of rank \eqn{k <= p}{k \leq p}. a real \eqn{p \times k}{p x k} of rank \eqn{k <= p}{k \leq p}.
Without loss of generality \eqn{B} is assumed to be orthonormal. Without loss of generality \eqn{B} is assumed to be orthonormal.
} }
\examples{
# create B for simulation (k = 1)
B <- rep(1, 5) / sqrt(5)
set.seed(21)
# creat predictor data X ~ N(0, I_p)
X <- matrix(rnorm(500), 100, 5)
# simulate response variable
# Y = f(B'X) + err
# with f(x1) = x1 and err ~ N(0, 0.25^2)
Y <- X \%*\% B + 0.25 * rnorm(100)
# calculate cve with method 'simple' for k = 1
set.seed(21)
cve.obj.simple1 <- cve(Y ~ X, k = 1)
# same as
set.seed(21)
cve.obj.simple2 <- cve.call(X, Y, k = 1)
# extract estimated B's.
coef(cve.obj.simple1, k = 1)
coef(cve.obj.simple2, k = 1)
}

View File

@ -15,3 +15,22 @@
\description{ \description{
Computes projected training data \code{X} for given dimension `k`. Computes projected training data \code{X} for given dimension `k`.
} }
\examples{
# create B for simulation (k = 1)
B <- rep(1, 5) / sqrt(5)
set.seed(21)
# creat predictor data x ~ N(0, I_p)
x <- matrix(rnorm(500), 100, 5)
# simulate response variable
# y = f(B'x) + err
# with f(x1) = x1 and err ~ N(0, 0.25^2)
y <- x \%*\% B + 0.25 * rnorm(100)
# calculate cve with method 'simple' for k = 1
set.seed(21)
cve.obj.simple <- cve(y ~ x, k = 1, method = 'simple')
# get projected data for k = 1
x.proj <- directions(cve.obj.simple, k = 1)
# plot y against projected data
plot(x.proj, y)
}

View File

@ -25,3 +25,23 @@ with \eqn{n} the sample size, \eqn{p} its dimension
(\code{n <- nrow(X); p <- ncol(X)}) and the covariance-matrix \eqn{\Sigma} (\code{n <- nrow(X); p <- ncol(X)}) and the covariance-matrix \eqn{\Sigma}
which is \code{(n-1)/n} times the sample covariance estimate. which is \code{(n-1)/n} times the sample covariance estimate.
} }
\examples{
# set dimensions for simulation model
p <- 5; k <- 1
# create B for simulation
B <- rep(1, p) / sqrt(p)
# samplsize
n <- 100
set.seed(21)
#creat predictor data x ~ N(0, I_p)
x <- matrix(rnorm(n * p), n, p)
# simulate response variable
# y = f(B'x) + err
# with f(x1) = x1 and err ~ N(0, 0.25^2)
y <- x \%*\% B + 0.25 * rnorm(100)
# calculate cve with method 'simple' for k = 1
set.seed(21)
cve.obj.simple <- cve(y ~ x, k = k)
print(cve.obj.simple$res$'1'$h)
print(estimate.bandwidth(x, k = k))
}

View File

@ -14,6 +14,33 @@
} }
\description{ \description{
Boxplots of the loss from \code{min.dim} to \code{max.dim} \code{k} values. Boxplots of the loss from \code{min.dim} to \code{max.dim} \code{k} values.
}
\examples{
# create B for simulation
B <- cbind(rep(1, 6), (-1)^seq(6)) / sqrt(6)
set.seed(21)
# creat predictor data x ~ N(0, I_p)
X <- matrix(rnorm(600), 100)
# simulate response variable
# y = f(B'x) + err
# with f(x1, x2) = x1^2 + 2 x2 and err ~ N(0, 0.25^2)
Y <- (X \%*\% B[, 1])^2 + 2 * X \%*\% B[, 2] + rnorm(100, 0, .1)
# Create bandwidth estimation function
estimate.bandwidth <- function(X, k, nObs) {
n <- nrow(X)
p <- ncol(X)
X_c <- scale(X, center = TRUE, scale = FALSE)
2 * qchisq((nObs - 1) / (n - 1), k) * sum(X_c^2) / (n * p)
}
# calculate cve with method 'simple' for k = min.dim,...,max.dim
cve.obj.simple <- cve(Y ~ X, h = estimate.bandwidth, nObs = sqrt(nrow(X)))
# elbow plot
plot(cve.obj.simple)
} }
\seealso{ \seealso{
see \code{\link{par}} for graphical parameters to pass through see \code{\link{par}} for graphical parameters to pass through

View File

@ -22,6 +22,33 @@ prediced response of data \code{newdata}.
\description{ \description{
Predict responces using reduced data with \code{\link{mars}}. Predict responces using reduced data with \code{\link{mars}}.
} }
\examples{
# create B for simulation
B <- rep(1, 5) / sqrt(5)
set.seed(21)
# creat predictor data x ~ N(0, I_p)
x <- matrix(rnorm(500), 100)
# simulate response variable
# y = f(B'x) + err
# with f(x1) = x1 and err ~ N(0, 0.25^2)
y <- x \%*\% B + 0.25 * rnorm(100)
x.train <- x[1:80, ]
x.test <- x[81:100, ]
y.train <- y[1:80, ]
y.test <- y[81:100, ]
# calculate cve with method 'simple' for k = 1
cve.obj.simple <- cve(y.train ~ x.train, k = 1)
# predict y.test from x.test
yhat <- predict(cve.obj.simple, x.test, 1)
# plot prediction against y.test
plot(yhat, y.test)
}
\seealso{ \seealso{
\code{\link{cve}}, \code{\link{cve.call}} or \pkg{\link{mars}}. \code{\link{cve}}, \code{\link{cve.call}} or \pkg{\link{mars}}.
} }

View File

@ -1,24 +0,0 @@
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict_dim.R
\name{predict.dim}
\alias{predict.dim}
\title{Predicts SDR dimension using \code{\link[mda]{mars}} via a Cross-Validation.}
\usage{
predict.dim(object, ...)
}
\arguments{
\item{object}{instance of class \code{cve} (result of \code{cve},
\code{cve.call}).}
\item{...}{ignored.}
}
\value{
list with
\itemize{
\item MSE: Mean Square Error,
\item k: predicted dimensions.
}
}
\description{
Predicts SDR dimension using \code{\link[mda]{mars}} via a Cross-Validation.
}

View File

@ -14,3 +14,22 @@
\description{ \description{
Prints a summary of a \code{cve} result. Prints a summary of a \code{cve} result.
} }
\examples{
# create B for simulation
B <- rep(1, 5) / sqrt(5)
set.seed(21)
#creat predictor data x ~ N(0, I_p)
x <- matrix(rnorm(500), 100)
# simulate response variable
# y = f(B'x) + err
# with f(x1) = x1 and err ~ N(0, 0.25^2)
y <- x \%*\% B + 0.25 * rnorm(100)
# calculate cve for unknown k between min.dim and max.dim.
cve.obj.simple <- cve(y ~ x)
summary(cve.obj.simple)
}

View File

@ -13,22 +13,18 @@
* *
* @param logger Pointer to a SEXP R object representing an R function. * @param logger Pointer to a SEXP R object representing an R function.
* @param env Pointer to a SEXP R object representing an R environment. * @param env Pointer to a SEXP R object representing an R environment.
* @param attempt counter of attempts.
* @param iter optimization iteration counter.
* @param L Pointer to a SEXP R object representing an R environment. * @param L Pointer to a SEXP R object representing an R environment.
* @param V Pointer memory area of size `nrowV * ncolV` storing `V`. * @param V Pointer memory area of size `nrowV * ncolV` storing `V`.
* @param nrowV Nr. of rows of `V`.
* @param ncolV Nr. of columns of `V`.
* @param G Pointer memory area of size `nrowG * ncolG` storing `G`. * @param G Pointer memory area of size `nrowG * ncolG` storing `G`.
* @param nrowG Nr. of rows of `G`.
* @param ncolG Nr. of columns of `G`.
* @param loss Current loss L(V). * @param loss Current loss L(V).
* @param err Errof for break condition (0.0 befor first iteration). * @param err Errof for break condition (0.0 befor first iteration).
* @param tau Current step-size. * @param tau Current step-size.
*/ */
void callLogger(SEXP logger, SEXP env, void callLogger(SEXP logger, SEXP env,
const int attempt, const int iter, const int attempt, const int iter,
const double* L, const int lenL, const mat* L, const mat* V, const mat* G,
const double* V, const int nrowV, const int ncolV,
const double* G, const int nrowG, const int ncolG,
const double loss, const double err, const double tau) { const double loss, const double err, const double tau) {
/* Create R objects to be passed to R logger function. */ /* Create R objects to be passed to R logger function. */
// Attempt is converted from 0-indexed to 1-indexed as R index. // Attempt is converted from 0-indexed to 1-indexed as R index.
@ -36,13 +32,13 @@ void callLogger(SEXP logger, SEXP env,
SEXP r_iter = PROTECT(ScalarInteger(iter + 1)); SEXP r_iter = PROTECT(ScalarInteger(iter + 1));
/* Create R representations of L, V and G */ /* Create R representations of L, V and G */
SEXP r_L = PROTECT(allocVector(REALSXP, lenL)); SEXP r_L = PROTECT(allocVector(REALSXP, L->nrow));
SEXP r_V = PROTECT(allocMatrix(REALSXP, nrowV, ncolV)); SEXP r_V = PROTECT(allocMatrix(REALSXP, V->nrow, V->ncol));
SEXP r_G = PROTECT(allocMatrix(REALSXP, nrowG, ncolG)); SEXP r_G = PROTECT(allocMatrix(REALSXP, G->nrow, G->ncol));
/* Copy data to R objects */ /* Copy data to R objects */
memcpy(REAL(r_L), L, lenL * sizeof(double)); memcpy(REAL(r_L), L->elem, L->nrow * sizeof(double));
memcpy(REAL(r_V), V, nrowV * ncolV * sizeof(double)); memcpy(REAL(r_V), V->elem, V->nrow * V->ncol * sizeof(double));
memcpy(REAL(r_G), G, nrowG * ncolG * sizeof(double)); memcpy(REAL(r_G), G->elem, G->nrow * G->ncol * sizeof(double));
/* Build data list passed to logger */ /* Build data list passed to logger */
SEXP data = PROTECT(allocVector(VECSXP, 6)); SEXP data = PROTECT(allocVector(VECSXP, 6));

View File

@ -2,121 +2,113 @@
#include "cve.h" #include "cve.h"
// TODO: clarify void cve(const mat *X, const mat *Y, const double h,
static inline double gaussKernel(const double x, const double scale) {
return exp(scale * x * x);
}
void cve_sub(const int n, const int p, const int q,
const double *X, const double *Y, const double h,
const unsigned int method, const unsigned int method,
const double momentum, const double momentum,
const double tau_init, const double tol_init, const double tau_init, const double tol_init,
const double slack, const double gamma, const double slack, const double gamma,
const int maxIter, const int attempts, const int maxIter, const int attempts,
double *V, double *L, mat *V, mat *L,
SEXP logger, SEXP loggerEnv) { SEXP logger, SEXP loggerEnv) {
int attempt = 0, iter, i, nn = (n * (n - 1)) / 2; // TODO: param and dim. validation.
int n = X->nrow, p = X->ncol, q = V->ncol;
int attempt = 0, iter;
double loss, loss_last, loss_best, err, tau; double loss, loss_last, loss_best, err, tau;
double tol = tol_init * sqrt((double)(2 * q)); double tol = tol_init * sqrt((double)(2 * q));
double gKscale = -0.5 / (h * h);
double agility = -2.0 * (1.0 - momentum) / (h * h); double agility = -2.0 * (1.0 - momentum) / (h * h);
double c = agility / (double)n; double c = agility / (double)n;
// TODO: check parameters! dim, ...
/* Create further intermediate or internal variables. */ /* Create further intermediate or internal variables. */
double *Q = (double*)R_alloc(p * p, sizeof(double)); mat *lvecD_e = (void*)0;
double *V_best = (double*)R_alloc(p * q, sizeof(double)); mat *Ysquared = (void*)0;
double *L_best = (double*)R_alloc(n, sizeof(double)); mat *XV = (void*)0;
double *V_tau = (double*)R_alloc(p * q, sizeof(double)); mat *lvecD = (void*)0; // TODO: combine. aka in-place lvecToSym
double *X_diff = (double*)R_alloc(nn * p, sizeof(double)); mat *D = (void*)0; // TODO: combine. aka in-place lvecToSym
double *X_proj = (double*)R_alloc(nn * p, sizeof(double)); mat *lvecK = (void*)0; // TODO: combine. aka in-place lvecToSym
double *y1 = (double*)R_alloc(n, sizeof(double)); mat *K = (void*)0; // TODO: combine. aka in-place lvecToSym
double *vecD = (double*)R_alloc(nn, sizeof(double)); mat *colSumsK = (void*)0;
double *vecK = (double*)R_alloc(nn, sizeof(double)); mat *W = (void*)0;
double *vecS = (double*)R_alloc(nn, sizeof(double)); mat *y1 = (void*)0;
double *colSums = (double*)R_alloc(n, sizeof(double)); mat *y2 = (void*)0;
double *G = (double*)R_alloc(p * q, sizeof(double)); mat *S = (void*)0;
double *A = (double*)R_alloc(p * p, sizeof(double)); mat *tmp1 = (void*)0;
mat *tmp2 = (void*)0;
mat *G = (void*)0;
mat *A = (void*)0;
mat *V_tau = (void*)0;
mat *V_best = (void*)0;
mat *L_best = (void*)0;
double *V_init = (void*)0; /* Allocate appropiate amount of working memory. */
if (attempts < 1) { int workLen = 2 * (p + 1) * p;
V_init = (double*)R_alloc(p * q, sizeof(double)); if (workLen < n) {
memcpy(V_init, V, p * q * sizeof(double)); workLen = n;
} }
/* Determine size of working memory used by subroutines. */
const int workLen = getWorkLen(n, p, q);
double *workMem = (double*)R_alloc(workLen, sizeof(double)); double *workMem = (double*)R_alloc(workLen, sizeof(double));
/* Compute X_diff, this is static for the entire algorithm. */ lvecD_e = rowDiffSquareSums(X, lvecD_e);
rowDiffs(X, n, p, X_diff); Ysquared = hadamard(1.0, Y, Y, 0.0, Ysquared);
do { do {
/* (Re)set learning rate. */ /* (Re)set learning rate. */
tau = tau_init; tau = tau_init;
/* Check if start value for `V` was supplied. */ /* Check if start value for `V` was supplied. */
if (V_init == (void*)0) { if (attempts > 0) {
/* Sample start value from stiefel manifold. */ /* Sample start value from stiefel manifold. */
rStiefel(p, q, V, workMem, workLen); V = rStiefel(p, q, V, workMem);
} else {
/* (Re)Set start value of `V` to `V_init`. */
memcpy(V, V_init, p * q * sizeof(double));
} }
/* Create projection matrix `Q <- I - V V^T` for initial `V`. */ /* Embed X_i's in V space */
nullProj(V, p, q, Q); XV = matrixprod(1.0, X, V, 0.0, XV);
/* Compute embedded distances */
/* Compute Distance vector. */ lvecD = lincomb(1.0, lvecD_e, -1.0, rowDiffSquareSums(XV, lvecD));
matrixprod(X, n, p, Q, p, p, X_proj); // here X_proj is only `(n, p)`.
rowDiffSquareSums(X_proj, n, p, vecD);
/* Apply kernel to distances. */ /* Apply kernel to distances. */
for (i = 0; i < nn; ++i) { lvecK = applyKernel(lvecD, h, gauss, lvecK);
vecK[i] = gaussKernel(vecD[i], gKscale); /* Transform lower vectors lvecD, lvecK into sym. matrices. */
D = lvecToSym(lvecD, 0.0, D);
K = lvecToSym(lvecK, 1.0, K);
/* Compute column sums of kernel matrix K */
colSumsK = colSums(K, colSumsK);
/* Normalize K columns to obtain weight matrix W */
W = colApply(K, '/', colSumsK, W);
/* first and second order weighted responces */
y1 = matrixprod(1.0, W, Y, 0.0, y1);
y2 = matrixprod(1.0, W, Ysquared, 0.0, y2);
/* Compute losses */
L = hadamard(-1.0, y1, y1, 1.0, copy(y2, L));
/* Compute initial loss */
if (method == simple) {
loss_last = mean(L);
/* Calculate the scaling matrix S */
S = laplace(adjacence(L, Y, y1, D, W, gauss, S), workMem);
} else if (method == weighted) {
colSumsK = elemApply(colSumsK, '-', 1.0, colSumsK);
loss_last = dot(L, '/', colSumsK);
c = agility / sum(colSumsK);
/* Calculate the scaling matrix S */
S = laplace(adjacence(L, Y, y1, D, K, gauss, S), workMem);
} else {
// TODO: error handling!
} }
/* Gradient */
/* Compute col(row) sums of kernal vector (sym. packed lower tri tmp1 = matrixprod(1.0, S, X, 0.0, tmp1);
* matrix.), because `K == K^T` the rowSums are equal to colSums. */ tmp2 = crossprod(1.0, X, tmp1, 0.0, tmp2);
rowSumsSymVec(vecK, n, 1.0, colSums); G = matrixprod(c, tmp2, V, 0.0, G);
/* Compute loss given the kernel vector and its column sums.
* Additionally the first momentum `y1` is computed and stored in
* the working memory (only intermidiate result, needed for `vecS`). */
loss_last = cost(method, n, Y, vecK, colSums, y1, L);
/* Calc the scaling vector used for final computation of grad. */
scaling(method, n, Y, y1, L, vecD, vecK, colSums, vecS);
/* Compute the eucledian gradient `G`. */
rowSweep(X_diff, nn, p, "*", vecS, X_proj);
crossprod(X_diff, nn, p, X_proj, nn, p, workMem);
matrixprod(workMem, p, p, V, p, q, G);
if (method == CVE_METHOD_WEIGHTED) {
/* Compute summ of all kernel applied distances by summing the
* colSums of the kernel matrix. */
// c = -(double)n; // to scale with sum(K) - n
// for (i = 0; i < n; ++i) {
// c += colSums[i];
// }
// TODO: check for division by zero, but should not happen!!!
c = agility / (sum(colSums, n) - (double)n);
}
scale(c, G, p * q); // in-place
if (logger) { if (logger) {
callLogger(logger, loggerEnv, callLogger(logger, loggerEnv,
attempt, /* iter <- 0L */ -1, attempt, /* iter <- 0L */ -1,
L, n, L, V, G,
V, p, q,
G, p, q,
loss_last, /* err <- NA */ -1.0, tau); loss_last, /* err <- NA */ -1.0, tau);
} }
/* Compute Skew-Symmetric matrix `A` used in Cayley transform. /* Compute Skew-Symmetric matrix `A` used in Cayley transform.
* `A <- tau * (G V^T - V G^T) + 0 * A`*/ * `A <- tau * (G V^T - V G^T) + 0 * A`*/
skew(p, q, tau, G, V, 0.0, A); A = skew(tau, G, V, 0.0, A);
for (iter = 0; iter < maxIter; ++iter) { for (iter = 0; iter < maxIter; ++iter) {
/* Before Starting next iteration check if the Uer has requested an /* Before Starting next iteration check if the Uer has requested an
@ -126,50 +118,57 @@ void cve_sub(const int n, const int p, const int q,
R_CheckUserInterrupt(); R_CheckUserInterrupt();
/* Move `V` along the gradient direction. */ /* Move `V` along the gradient direction. */
cayleyTransform(p, q, A, V, V_tau, workMem); V_tau = cayleyTransform(A, V, V_tau, workMem);
/* Create projection matrix for `V_tau`. */ // Rprintf("Start attempt(%2d), iter (%2d): err: %f, loss: %f, tau: %f\n",
nullProj(V_tau, p, q, Q); // attempt, iter, dist(V, V_tau), loss_last, tau);
/* Compute Distance vector. */
matrixprod(X, n, p, Q, p, p, X_proj); // here X_proj only `(n, p)`.
rowDiffSquareSums(X_proj, n, p, vecD);
/* Embed X_i's in V space */
XV = matrixprod(1.0, X, V_tau, 0.0, XV);
/* Compute embedded distances */
lvecD = lincomb(1.0, lvecD_e, -1.0, rowDiffSquareSums(XV, lvecD));
/* Apply kernel to distances. */ /* Apply kernel to distances. */
for (i = 0; i < nn; ++i) { lvecK = applyKernel(lvecD, h, gauss, lvecK);
vecK[i] = gaussKernel(vecD[i], gKscale); /* Transform lower vectors lvecD, lvecK into sym. matrices. */
D = lvecToSym(lvecD, 0.0, D);
K = lvecToSym(lvecK, 1.0, K);
/* Compute column sums of kernel matrix K */
colSumsK = colSums(K, colSumsK);
/* Normalize K columns to obtain weight matrix W */
W = colApply(K, '/', colSumsK, W);
/* first and second order weighted responces */
y1 = matrixprod(1.0, W, Y, 0.0, y1);
y2 = matrixprod(1.0, W, Ysquared, 0.0, y2);
/* Compute losses */
L = hadamard(-1.0, y1, y1, 1.0, copy(y2, L));
/* Compute loss */
if (method == simple) {
loss = mean(L);
} else if (method == weighted) {
colSumsK = elemApply(colSumsK, '-', 1.0, colSumsK);
loss = dot(L, '/', colSumsK);
} else {
// TODO: error handling!
} }
/* Compute col(row) sums of kernal vector (sym. packed lower tri
* matrix.), because `K == K^T` the rowSums are equal to colSums. */
rowSumsSymVec(vecK, n, 1.0, colSums);
/* Compute loss given the kernel vector and its column sums.
* Additionally the first momentum `y1` is computed and stored in
* the working memory (only intermidiate result, needed for vecS).*/
loss = cost(method, n, Y, vecK, colSums, y1, L);
/* Check if step is appropriate, iff not reduce learning rate. */ /* Check if step is appropriate, iff not reduce learning rate. */
if ((loss - loss_last) > loss_last * slack) { if ((loss - loss_last) > loss_last * slack) {
tau *= gamma; tau *= gamma;
scale(gamma, A, p * p); A = elemApply(A, '*', gamma, A); // scale A by gamma
continue; continue;
} }
/* Compute error, use workMem (keep first `n`, they store `y1`). */ /* Compute error, use workMem. */
skew(p, q, 1.0, V, V_tau, 0.0, workMem); err = dist(V, V_tau);
err = norm(workMem, p, p, "F");
/* Shift next step to current step and store loss to last. */ /* Shift next step to current step and store loss to last. */
memcpy(V, V_tau, p * q * sizeof(double)); V = copy(V_tau, V);
loss_last = loss; loss_last = loss;
if (logger) { if (logger) {
callLogger(logger, loggerEnv, callLogger(logger, loggerEnv,
attempt, iter, attempt, iter,
L, n, L, V, G,
V, p, q,
G, p, q,
loss, err, tau); loss, err, tau);
} }
@ -178,41 +177,35 @@ void cve_sub(const int n, const int p, const int q,
break; break;
} }
/* Continue computing the gradient. */ if (method == simple) {
/* Calc the scaling vector used for final computation of grad. */ /* Calculate the scaling matrix S */
scaling(method, n, Y, y1, L, vecD, vecK, colSums, vecS); S = laplace(adjacence(L, Y, y1, D, W, gauss, S), workMem);
} else if (method == weighted) {
/* Compute the eucledian gradient `G`. */ /* Calculate the scaling matrix S */
rowSweep(X_diff, nn, p, "*", vecS, X_proj); S = laplace(adjacence(L, Y, y1, D, K, gauss, S), workMem);
crossprod(X_diff, nn, p, X_proj, nn, p, workMem); c = agility / sum(colSumsK);
// /* Update without momentum */ } else {
// matrixprod(workMem, p, p, V, p, q, G); // TODO: error handling!
// scale(-2. / (((double)n) * h * h), G, p * q); // in-place
/* G <- momentum * G + agility * workMem V */
if (method == CVE_METHOD_WEIGHTED) {
/* Compute summ of all kernel applied distances by summing the
* colSums of the kernel matrix. */
// TODO: check for division by zero, but should not happen!!!
c = agility / (sum(colSums, n) - (double)n);
} }
F77_NAME(dgemm)("N", "N", &p, &q, &p,
&c, workMem, &p, V, &p, /* Gradient */
&momentum, G, &p); tmp1 = matrixprod(1.0, S, X, 0.0, tmp1);
tmp2 = crossprod(1.0, X, tmp1, 0.0, tmp2);
G = matrixprod(c, tmp2, V, momentum, G);
/* Compute Skew-Symmetric matrix `A` used in Cayley transform. /* Compute Skew-Symmetric matrix `A` used in Cayley transform.
* `A <- tau * (G V^T - V G^T) + 0 * A`*/ * `A <- tau * (G V^T - V G^T) + 0 * A`*/
skew(p, q, tau, G, V, 0.0, A); A = skew(tau, G, V, 0.0, A);
} }
/* Check if current attempt improved previous ones */ /* Check if current attempt improved previous ones */
if (attempt == 0 || loss < loss_best) { if (attempt == 0 || loss < loss_best) {
loss_best = loss; loss_best = loss;
memcpy(V_best, V, p * q * sizeof(double)); V_best = copy(V, V_best);
memcpy(L_best, L, n * sizeof(double)); L_best = copy(L, L_best);
} }
} while (++attempt < attempts); } while (++attempt < attempts);
memcpy(V, V_best, p * q * sizeof(double)); V = copy(V_best, V);
memcpy(L, L_best, n * sizeof(double)); L = copy(L_best, L);
} }

View File

@ -12,107 +12,90 @@
#define CVE_MEM_CHUNK_SIZE 2032 #define CVE_MEM_CHUNK_SIZE 2032
#define CVE_MEM_CHUNK_SMALL 1016 #define CVE_MEM_CHUNK_SMALL 1016
#define BLOCK_SIZE 8
/* Bis masks for method types */ /**
#define CVE_METHOD_WEIGHTED 1 * @struct Matrix of dimensions `nrow x ncol`.
*/
typedef struct matrix {
int nrow; /**< Number of rows */
int ncol; /**< Number of columns */
void *origin; /**< Reference to origin, see `asMat()`. */
double *elem; /**< Column-major array of matrix elements. */
} mat;
// typedef struct { typedef enum {
// unsigned int nrow; simple,
// unsigned int ncol; weighted
// unsigned int memsize; } method;
// double *data;
// } mat;
// mat* Matrix(const unsigned int nrow, const unsigned int ncol); typedef enum {
gauss
} kernel;
void cve_sub(const int n, const int p, const int q,
const double *X, const double *Y, const double h, void cve(const mat *X, const mat *Y, const double h,
const unsigned int method, const unsigned int method,
const double momentum, const double momentum,
const double tau_init, const double tol_init, const double tau_init, const double tol_init,
const double slack, const double gamma, const double slack, const double gamma,
const int maxIter, int attempts, const int maxIter, const int attempts,
double *V, double *L, mat *V, mat *L,
SEXP logger, SEXP loggerEnv); SEXP logger, SEXP loggerEnv);
void callLogger(SEXP logger, SEXP env, void callLogger(SEXP logger, SEXP env,
const int attempt, const int epoch, const int attempt, const int iter,
const double* L, const int lenL, const mat* L, const mat* V, const mat* G,
const double* V, const int nrowV, const int ncolV,
const double* G, const int nrowG, const int ncolG,
const double loss, const double err, const double tau); const double loss, const double err, const double tau);
/* CVE sub-routines */ /******************************************************************************/
int getWorkLen(const int n, const int p, const int q); /** rStiefel.c **/
double cost(const unsigned int method, /******************************************************************************/
const int n, /* Random element from Stiefel manifold. */
const double *Y, mat* rStiefel(const int p, const int q, mat *V, double *workMem);
const double *vecK,
const double *colSums,
double *y1, double *L);
void scaling(const unsigned int method,
const int n,
const double *Y, const double *y1, const double *L,
const double *vecD, const double *vecK,
const double *colSums,
double *vecS);
/* rStiefel */ /******************************************************************************/
void rStiefel(const int p, const int q, double *V, /** matrix.c **/
double *workMem, int workLen); /******************************************************************************/
/* Create and Copy matrices */
mat* matrix(const int nrow, const int ncol);
mat* zero(const int nrow, const int ncol);
mat* copy(mat *src, mat *dest);
/* Matrix to scalar */
double sum(const mat *A);
double mean(const mat *A);
double squareSum(const mat* A);
double dot(const mat *A, const char op, const mat *B);
double dist(const mat *A, const mat *B);
/* Matrix to vector (`ncol == 1` matrices, aka row vectors) */
mat* rowSums(const mat *A, mat *sums);
mat* colSums(const mat *A, mat *sums);
mat* rowDiffSquareSums(const mat *X, mat *lvecD);
/* Matrix and scalar to Matrix */
mat* elemApply(mat *A, const char op, const double scalar, mat *B);
/* Matrix and vector to Matrix */
mat* colApply(mat *A, const char op, mat *B, mat *C);
/* Matrix and Matrix to Matrix */
mat* lincomb(const double alpha, const mat *A, const double beta, mat *B);
mat* matrixprod(const double alpha, const mat *A, const mat *B,
double beta, mat* C);
mat* crossprod(const double alpha, const mat *A, const mat *B,
double beta, mat* C);
mat* hadamard(const double alpha, const mat* A, const mat *B,
double beta, mat* C);
mat* skew(const double alpha, mat* A, mat *B, double beta, mat* C);
/* Matrix Transformations */
mat* cayleyTransform(mat *A, mat *B, mat *C, double *workMem);
mat* laplace(mat *A, double *workMem);
mat* lvecToSym(const mat* A, const double diag, mat* B);
/* MATRIX */ /******************************************************************************/
double sum(const double *A, const int nelem); /** cve_subroutines.c **/
/******************************************************************************/
double norm(const double *A, const int nrow, const int ncol, /* CVE specific sub-routines */
const char *type); mat* adjacence(const mat *vec_L, const mat *vec_Y, const mat *vec_y1,
const mat *mat_D, const mat *mat_W, kernel kernel,
void matrixprod(const double *A, const int nrowA, const int ncolA, mat *mat_S);
const double *B, const int nrowB, const int ncolB, mat* applyKernel(const mat* A, const double h, kernel kernel, mat* B);
double *C);
void crossprod(const double *A, const int nrowA, const int ncolA,
const double *B, const int nrowB, const int ncolB,
double *C);
void skew(const int nrow, const int ncol,
double alpha, const double *A, const double *B,
double beta,
double *C);
void nullProj(const double *B, const int nrowB, const int ncolB,
double *Q);
void scale(const double s, double *A, const int nelem);
void cayleyTransform(const int p, const int q,
const double *A, const double *B,
double *X, double *workMem);
/* Row and column opperations. */
void rowSums(const double *A, const int nrow, const int ncol,
double *sum);
void colSums(const double *A, const int nrow, const int ncol,
double *sum);
void rowSquareSums(const double *A, const int nrow, const int ncol,
double *sum);
void rowSumsSymVec(const double *Avec, const int nrow,
const double diag,
double *sum);
void rowDiffs(const double* X, const int nrow, const int ncol,
double *diffs);
void rowDiffSquareSums(const double* X, const int nrow, const int ncol,
double *sum);
/* SWEEP */
void rowSweep(const double *A, const int nrow, const int ncol,
const char* op,
const double *v, // vector of length nrow
double *C);
#endif /* CVE_INCLUDE_GUARD_H_ */ #endif /* CVE_INCLUDE_GUARD_H_ */

View File

@ -1,95 +1,257 @@
#include "cve.h" #include "cve.h"
int getWorkLen(const int n, const int p, const int q) { /**
int mpq; /**< Max of p and q */ * Applies the requested kernel element-wise.
int nn = ((n - 1) * n) / 2; *
* @param A matrix to apply the kernel to.
* @param h bandwidth parameter.
* @param kernel the kernel to be used.
* @param B (in/out) matrix `A` with element-wise applied kernel.
*
* @returns ether the passed `B`, or a new created matrix if `B` was NULL.
*/
mat* applyKernel(const mat* A, const double h, kernel kernel, mat* B) {
int i, nn = A->nrow * A->ncol;
int nn4 = 4 * (nn / 4);
double scale;
const double * restrict a;
double * restrict b;
if (p > q) { if (!B) {
mpq = p; B = matrix(A->nrow, A->ncol);
} else { } else if (nn != B->nrow * B->ncol) {
mpq = q; // TODO: error handling!
} }
if (nn * p < (mpq + 1) * mpq) {
return 2 * (mpq + 1) * mpq; a = A->elem;
} else { b = B->elem;
return (nn + mpq) * mpq; switch (kernel) {
case gauss:
scale = -0.5 / (h * h);
for (i = 0; i < nn4; i += 4) {
b[i] = exp(scale * a[i] * a[i]);
b[i + 1] = exp(scale * a[i + 1] * a[i + 1]);
b[i + 2] = exp(scale * a[i + 2] * a[i + 2]);
b[i + 3] = exp(scale * a[i + 3] * a[i + 3]);
} }
for (; i < nn; ++i) {
b[i] = exp(scale * a[i] * a[i]);
}
break;
default:
// TODO: error handling!
break;
}
return B;
} }
double cost(const unsigned int method, /**
const int n, * Scaling matrix `S` defined as
const double *Y, * s_{i j} = (L_i - (Y_j - y1_i)^2) * d_{i j} * w_{i j}
const double *vecK, *
const double *colSums, * Mapping of vectors `L`, `y1` and `Y` combinations.
double *y1, double *L) { *
int i, j, k; * Y[j]
double tmp, sum; * ------ j ----->
* s[0] s[n] . . . s[jn] . . . s[(j-1)n]
* s[1] s[n+1] . . . s[jn+1] . . . s[(j-1)n+1]
* | . . . . . .
* | . . . . . .
* . . . . . .
* L[i], y1[i] i s[i] s[n+i] . . . s[jn+i] . . . s[(j-1)n+i]
* . . . . . .
* | . . . . . .
* v . . . . . .
* s[n-1] s[2n-1] . . . s[n-1] . . . s[nn-1]
*
* @param L per sample loss vector of (lenght `n`).
* @param Y responces (lenght `n`).
* @param y1 weighted responces (lenght `n`).
* @param D distance matrix (dim. `n x n`).
* @param W weight matrix (dim. `n x n`).
* @param kernel the kernel to be used.
*
* @returns passed matrix `S` if not NULL, then a new `n x n` matrix is created.
*
* @example Basically equivalent to the following R function.
* r_LS <- function(L, Y, y1, D, W) {
* # get dimension
* n <- length(L)
* # Indices
* i <- rep(1:n, n)
* j <- rep(1:n, each = n)
* # Compute S
* matrix(L[i] - (Y[j] - y1[i])^2, n) * W * D
* }
*
* @details mapping for indices used in blocked implementation.
*
* n ..... Dimensions of `S`, a `n x n` matrix.
* B ..... BLOCK_SIZE
* rB .... block reminder, aka rB = B % n.
*
* Y[j]
* 0 ----- j -----> n
* 0 +--------------------+
* | ' |
* ' | k B x n |
* ' | v |
* ' +--------------------+
* L[i], y1[i] i | ' |
* ' | k B x n |
* ' | v |
* v +--------------------+
* | k rB x n |
* n +--------------------+
*/
// TODO: fix: cache misses in Y?!
mat* adjacence(const mat *vec_L, const mat *vec_Y, const mat *vec_y1,
const mat *mat_D, const mat *mat_W, kernel kernel,
mat *mat_S) {
int i, j, k, n = vec_L->nrow;
int block_size, block_batch_size;
int max_size = 64 < n ? 64 : n; // Block Size set to 64
for (i = 0; i < n; ++i) { double Y_j, tmp0, tmp1, tmp2, tmp3;
y1[i] = Y[i]; double *Y = vec_Y->elem;
L[i] = Y[i] * Y[i]; double *L = vec_L->elem;
double *y1 = vec_y1->elem;
double *D, *W, *S;
// TODO: Check dims.
if (!mat_S) {
mat_S = matrix(n, n);
} }
for (k = j = 0; j < n; ++j) { for (i = 0; i < n; i += block_size) {
for (i = j + 1; i < n; ++i, ++k) { /* Set blocks (left upper corner) */
y1[i] += Y[j] * vecK[k]; S = mat_S->elem + i;
y1[j] += Y[i] * vecK[k]; D = mat_D->elem + i;
L[i] += Y[j] * Y[j] * vecK[k]; W = mat_W->elem + i;
L[j] += Y[i] * Y[i] * vecK[k]; /* determine block size */
block_size = n - i;
if (block_size > max_size) {
block_size = max_size;
} }
block_batch_size = 4 * (block_size / 4);
/* for each column in the block */
for (j = 0; j < n; ++j, S += n, D += n, W += n) {
Y_j = Y[j];
/* iterate over block rows */
for (k = 0; k < block_batch_size; k += 4) {
tmp0 = Y_j - y1[k];
tmp1 = Y_j - y1[k + 1];
tmp2 = Y_j - y1[k + 2];
tmp3 = Y_j - y1[k + 3];
S[k] = (L[k] - (tmp0 * tmp0)) * D[k] * W[k];
S[k + 1] = (L[k + 1] - (tmp1 * tmp1)) * D[k + 1] * W[k + 1];
S[k + 2] = (L[k + 2] - (tmp2 * tmp2)) * D[k + 2] * W[k + 2];
S[k + 3] = (L[k + 3] - (tmp3 * tmp3)) * D[k + 3] * W[k + 3];
}
for (; k < block_size; ++k) {
tmp0 = Y_j - y1[k];
S[k] = (L[k] - (tmp0 * tmp0)) * D[k] * W[k];
}
}
L += block_size;
y1 += block_size;
} }
for (i = 0; i < n; ++i) { return mat_S;
y1[i] /= colSums[i];
L[i] /= colSums[i];
}
tmp = 0.0;
if (method == CVE_METHOD_WEIGHTED) {
sum = 0.0;
for (i = 0; i < n; ++i) {
tmp += (colSums[i] - 1.0) * (L[i] -= y1[i] * y1[i]);
sum += colSums[i];
}
return tmp / (sum - (double)n); // TODO: check for division by zero!
} else {
for (i = 0; i < n; ++i) {
tmp += (L[i] -= y1[i] * y1[i]);
}
return tmp / (double)n;
}
} }
void scaling(const unsigned int method, // int getWorkLen(const int n, const int p, const int q) {
const int n, // int mpq; /**< Max of p and q */
const double *Y, const double *y1, const double *L, // int nn = ((n - 1) * n) / 2;
const double *vecD, const double *vecK,
const double *colSums,
double *vecS) {
int i, j, k, nn = (n * (n - 1)) / 2;
double tmp;
if (method == CVE_METHOD_WEIGHTED) { // if (p > q) {
for (k = j = 0; j < n; ++j) { // mpq = p;
for (i = j + 1; i < n; ++i, ++k) { // } else {
tmp = Y[j] - y1[i]; // mpq = q;
vecS[k] = (L[i] - (tmp * tmp)); // }
tmp = Y[i] - y1[j]; // if (nn * p < (mpq + 1) * mpq) {
vecS[k] += (L[j] - (tmp * tmp)); // return 2 * (mpq + 1) * mpq;
} // } else {
} // return (nn + mpq) * mpq;
} else { // }
for (k = j = 0; j < n; ++j) { // }
for (i = j + 1; i < n; ++i, ++k) {
tmp = Y[j] - y1[i];
vecS[k] = (L[i] - (tmp * tmp)) / colSums[i];
tmp = Y[i] - y1[j];
vecS[k] += (L[j] - (tmp * tmp)) / colSums[j];
}
}
}
for (k = 0; k < nn; ++k) { // double cost(const unsigned int method,
vecS[k] *= vecK[k] * vecD[k]; // const int n,
} // const double *Y,
} // const double *vecK,
// const double *colSums,
// double *y1, double *L) {
// int i, j, k;
// double tmp, sum;
// for (i = 0; i < n; ++i) {
// y1[i] = Y[i];
// L[i] = Y[i] * Y[i];
// }
// for (k = j = 0; j < n; ++j) {
// for (i = j + 1; i < n; ++i, ++k) {
// y1[i] += Y[j] * vecK[k];
// y1[j] += Y[i] * vecK[k];
// L[i] += Y[j] * Y[j] * vecK[k];
// L[j] += Y[i] * Y[i] * vecK[k];
// }
// }
// for (i = 0; i < n; ++i) {
// y1[i] /= colSums[i];
// L[i] /= colSums[i];
// }
// tmp = 0.0;
// if (method == CVE_METHOD_WEIGHTED) {
// sum = 0.0;
// for (i = 0; i < n; ++i) {
// tmp += (colSums[i] - 1.0) * (L[i] -= y1[i] * y1[i]);
// sum += colSums[i];
// }
// return tmp / (sum - (double)n); // TODO: check for division by zero!
// } else {
// for (i = 0; i < n; ++i) {
// tmp += (L[i] -= y1[i] * y1[i]);
// }
// return tmp / (double)n;
// }
// }
// void scaling(const unsigned int method,
// const int n,
// const double *Y, const double *y1, const double *L,
// const double *vecD, const double *vecK,
// const double *colSums,
// double *vecS) {
// int i, j, k, nn = (n * (n - 1)) / 2;
// double tmp;
// if (method == CVE_METHOD_WEIGHTED) {
// for (k = j = 0; j < n; ++j) {
// for (i = j + 1; i < n; ++i, ++k) {
// tmp = Y[j] - y1[i];
// vecS[k] = (L[i] - (tmp * tmp));
// tmp = Y[i] - y1[j];
// vecS[k] += (L[j] - (tmp * tmp));
// }
// }
// } else {
// for (k = j = 0; j < n; ++j) {
// for (i = j + 1; i < n; ++i, ++k) {
// tmp = Y[j] - y1[i];
// vecS[k] = (L[i] - (tmp * tmp)) / colSums[i];
// tmp = Y[i] - y1[j];
// vecS[k] += (L[j] - (tmp * tmp)) / colSums[j];
// }
// }
// }
// for (k = 0; k < nn; ++k) {
// vecS[k] *= vecK[k] * vecD[k];
// }
// }

View File

@ -1,21 +1,31 @@
#include "cve.h" #include "cve.h"
// SEXP rStiefel_c(SEXP pin, SEXP qin) { /**
// int p = asInteger(pin); * Converts a `SEXP` (S EXPression) into a matrix struct `mat`.
// int q = asInteger(qin); *
* @param S source struct to be converted.
*
* @details Reuses the memory area of the SEXP object, therefore manipulation
* of the returned matrix works in place of the SEXP object. In addition,
* a reference to the original SEXP is stored and will be retriefed from
* `asSEXP()` if the matrix was created through this function.
*/
static mat* asMat(SEXP S) {
// TODO: error checking and conversion
mat* M = (mat*)R_alloc(1, sizeof(mat));
if (isMatrix(S)) {
M->nrow = (int)nrows(S);
M->ncol = (int)ncols(S);
} else {
M->nrow = (int)length(S);
M->ncol = 1;
}
M->origin = S;
M->elem = REAL(S);
return M;
}
// SEXP Vout = PROTECT(allocMatrix(REALSXP, p, q)); SEXP cve_export(SEXP X, SEXP Y, SEXP k, SEXP h,
// int workLen = 2 * (p + 1) * q;
// double *workMem = (double*)R_alloc(workLen, sizeof(double));
// rStiefel(p, q, REAL(Vout), workMem, workLen);
// UNPROTECT(1);
// return Vout;
// }
SEXP cve(SEXP X, SEXP Y, SEXP k, SEXP h,
SEXP method, SEXP method,
SEXP V, // initial SEXP V, // initial
SEXP momentum, SEXP tau, SEXP tol, SEXP momentum, SEXP tau, SEXP tol,
@ -42,19 +52,18 @@ SEXP cve(SEXP X, SEXP Y, SEXP k, SEXP h,
/* Check `attempts`, if not positive use passed values of `V` as /* Check `attempts`, if not positive use passed values of `V` as
* optimization start value without further attempts. * optimization start value without further attempts.
* Therefor, copy from `V` to `Vout`. */ * Therefor, copy from `V` to `Vout`. */
if (asInteger(attempts) < 1L) { if (asInteger(attempts) < 1) {
// TODO: Check for // TODO: Check for
memcpy(REAL(Vout), REAL(V), p * q * sizeof(double)); memcpy(REAL(Vout), REAL(V), p * q * sizeof(double));
} }
/* Call CVE simple subroutine. */ /* Call CVE */
cve_sub(n, p, q, cve(asMat(X), asMat(Y), asReal(h),
REAL(X), REAL(Y), asReal(h),
asInteger(method), asInteger(method),
asReal(momentum), asReal(tau), asReal(tol), asReal(momentum), asReal(tau), asReal(tol),
asReal(slack), asReal(gamma), asReal(slack), asReal(gamma),
asInteger(maxIter), asInteger(attempts), asInteger(maxIter), asInteger(attempts),
REAL(Vout), REAL(Lout), asMat(Vout), asMat(Lout),
logger, loggerEnv); logger, loggerEnv);
/* Build output list object with names "V", "L" */ /* Build output list object with names "V", "L" */

View File

@ -4,7 +4,7 @@
#include <R_ext/Rdynload.h> #include <R_ext/Rdynload.h>
/* .Call calls */ /* .Call calls */
extern SEXP cve(SEXP X, SEXP Y, SEXP k, SEXP h, extern SEXP cve_export(SEXP X, SEXP Y, SEXP k, SEXP h,
SEXP method, SEXP method,
SEXP V, // initial SEXP V, // initial
SEXP momentum, SEXP tau, SEXP tol, SEXP momentum, SEXP tau, SEXP tol,
@ -13,7 +13,7 @@ extern SEXP cve(SEXP X, SEXP Y, SEXP k, SEXP h,
SEXP logger, SEXP loggerEnv); SEXP logger, SEXP loggerEnv);
static const R_CallMethodDef CallEntries[] = { static const R_CallMethodDef CallEntries[] = {
{"cve", (DL_FUNC) &cve, 15}, {"cve_export", (DL_FUNC) &cve_export, 15},
{NULL, NULL, 0} {NULL, NULL, 0}
}; };

File diff suppressed because it is too large Load Diff

View File

@ -1,66 +1,31 @@
#include "cve.h" #include "cve.h"
// /**
// * Performas a QR factorization and computes the Q factor.
// *
// * @param A matrix.
// * @returns The Q factor of the QR factorization `A = QR`.
// */
// SEXP qrQ(SEXP Ain) {
// int i, j, info;
// if (!isMatrix(Ain)) {
// error("Argument must be a matrix.");
// }
// int nrow = nrows(Ain);
// int ncol = ncols(Ain);
// double *A = (double*)R_alloc(nrow * ncol, sizeof(double));
// memcpy(A, REAL(Ain), nrow * ncol * sizeof(double));
// // double *A = REAL(Ain);
// // Scalar factors of elementary reflectors.
// double *tau = (double*)R_alloc(ncol, sizeof(double));
// // Create Working memory area.
// int lenWork = 3 * nrow;
// double *memWork = (double*)R_alloc(lenWork, sizeof(double));
// F77_NAME(dgeqrf)(&nrow, &ncol, A, &nrow, tau,
// memWork, &lenWork, &info);
// SEXP Qout = PROTECT(allocMatrix(REALSXP, nrow, ncol));
// double *Q = REAL(Qout);
// for (j = 0; j < ncol; ++j) {
// for (i = 0; i < nrow; ++i) {
// if (i == j) {
// Q[i + nrow * j] = 1.;
// } else {
// Q[i + nrow * j] = 0.;
// }
// }
// }
// F77_NAME(dormqr)("L", "N", &nrow, &ncol, &ncol, A, &nrow, tau, Q, &nrow,
// memWork, &lenWork, &info);
// UNPROTECT(1);
// return Qout;
// }
/** /**
* Draws a sample from invariant measure on the Stiefel manifold \eqn{S(p, q)}. * Draws a sample from invariant measure on the Stiefel manifold \eqn{S(p, q)}.
* *
* @param p row dimension * @param p row dimension
* @param q col dimension * @param q column dimension
* @return \code{p} times \code{q} semi-orthogonal matrix. * @param V (in/out) matrix of dimensions `p x q` or NULL.
* `V <- qr.Q(qr(matrix(rnorm(p * q, 0, 1), p, q)))` * @param workMem work space array of length greater-equal than `2pq + q`.
*
* @return Passed matrix `V` or new created if `V` is NULL.
*
* @example Performs the same operation as the following `R` code:
* V <- qr.Q(qr(matrix(rnorm(p * q, 0, 1), p, q)))
*
* @details ATTENTION: The length of workMem must be at least `2pq + q`.
*/ */
void rStiefel(const int p, const int q, double *V, mat* rStiefel(const int p, const int q, mat *V, double *workMem) {
double *workMem, int workLen) { int i, j, info, workLen = 2 * p * q + q;
int i, j, info;
int pq = p * q; int pq = p * q;
double *v;
if (!V) {
V = matrix(p, q);
} else if (V->nrow != p || V->ncol != q) {
// TODO: error handling!
}
v = V->elem;
GetRNGstate(); GetRNGstate();
for (i = 0; i < pq; ++i) { for (i = 0; i < pq; ++i) {
@ -76,14 +41,12 @@ void rStiefel(const int p, const int q, double *V,
for (j = 0; j < q; ++j) { for (j = 0; j < q; ++j) {
for (i = 0; i < p; ++i) { for (i = 0; i < p; ++i) {
if (i == j) { v[p * j + i] = i == j ? 1.0 : 0.0;
V[i + p * j] = 1.;
} else {
V[i + p * j] = 0.;
}
} }
} }
F77_NAME(dormqr)("L", "N", &p, &q, &q, workMem, &p, tau, V, &p, F77_NAME(dormqr)("L", "N", &p, &q, &q, workMem, &p, tau, V->elem, &p,
workMem + pq + q, &workLen, &info); workMem + pq + q, &workLen, &info);
return V;
} }

View File

@ -1,160 +0,0 @@
#include "cve.h"
/**
* Computes the row sums of a matrix `A`.
* @param A Pointer to col-major matrix elements, size is `nrow * ncol`.
* @param nrow Number of rows of `A`.
* @param ncol Number of columns of `A`.
* @param sum Pointer to output row sums of size `nrow`.
*/
void rowSums(const double *A, const int nrow, const int ncol,
double *sum) {
int i, j, block_size, block_size_i;
const double *A_block = A;
const double *A_end = A + nrow * ncol;
if (nrow > CVE_MEM_CHUNK_SIZE) {
block_size = CVE_MEM_CHUNK_SIZE;
} else {
block_size = nrow;
}
// Iterate `(block_size_i, ncol)` submatrix blocks.
for (i = 0; i < nrow; i += block_size_i) {
// Reset `A` to new block beginning.
A = A_block;
// Take block size of eveything left and reduce to max size.
block_size_i = nrow - i;
if (block_size_i > block_size) {
block_size_i = block_size;
}
// Compute first blocks column,
for (j = 0; j < block_size_i; ++j) {
sum[j] = A[j];
}
// and sum the following columns to the first one.
for (A += nrow; A < A_end; A += nrow) {
for (j = 0; j < block_size_i; ++j) {
sum[j] += A[j];
}
}
// Step one block forth.
A_block += block_size_i;
sum += block_size_i;
}
}
void colSums(const double *A, const int nrow, const int ncol,
double *colSums) {
int i, j;
int nrowb = 4 * (nrow / 4); // 4 * floor(nrow / 4)
double colSum;
for (j = 0; j < ncol; ++j) {
colSum = 0.0;
for (i = 0; i < nrowb; i += 4) {
colSum += A[i]
+ A[i + 1]
+ A[i + 2]
+ A[i + 3];
}
for (; i < nrow; ++i) {
colSum += A[i];
}
*(colSums++) = colSum;
A += nrow;
}
}
void rowSquareSums(const double *A,
const int nrow, const int ncol,
double *sum) {
int i, j, block_size, block_size_i;
const double *A_block = A;
const double *A_end = A + nrow * ncol;
if (nrow > CVE_MEM_CHUNK_SIZE) {
block_size = CVE_MEM_CHUNK_SIZE;
} else {
block_size = nrow;
}
// Iterate `(block_size_i, ncol)` submatrix blocks.
for (i = 0; i < nrow; i += block_size_i) {
// Reset `A` to new block beginning.
A = A_block;
// Take block size of eveything left and reduce to max size.
block_size_i = nrow - i;
if (block_size_i > block_size) {
block_size_i = block_size;
}
// Compute first blocks column,
for (j = 0; j < block_size_i; ++j) {
sum[j] = A[j] * A[j];
}
// and sum the following columns to the first one.
for (A += nrow; A < A_end; A += nrow) {
for (j = 0; j < block_size_i; ++j) {
sum[j] += A[j] * A[j];
}
}
// Step one block forth.
A_block += block_size_i;
sum += block_size_i;
}
}
void rowSumsSymVec(const double *Avec, const int nrow,
const double diag,
double *sum) {
int i, j;
if (diag == 0.0) {
memset(sum, 0, nrow * sizeof(double));
} else {
for (i = 0; i < nrow; ++i) {
sum[i] = diag;
}
}
for (j = 0; j < nrow; ++j) {
for (i = j + 1; i < nrow; ++i, ++Avec) {
sum[j] += *Avec;
sum[i] += *Avec;
}
}
}
void rowDiffs(const double* X, const int nrow, const int ncol,
double *diffs) {
int i, j, k, l;
const double *Xcol;
for (k = l = 0; l < ncol; ++l) {
Xcol = X + l * nrow;
for (i = 0; i < nrow; ++i) {
for (j = i + 1; j < nrow; ++j) {
diffs[k++] = Xcol[i] - Xcol[j];
}
}
}
}
void rowDiffSquareSums(const double* X, const int nrow, const int ncol,
double *sum) {
int i, j, k, l;
const double *Xcol;
double tmp;
memset(sum, 0, ((nrow * (nrow - 1)) / 2) * sizeof(double));
for (l = 0; l < ncol; ++l) {
Xcol = X + l * nrow;
for (k = i = 0; i < nrow; ++i) {
for (j = i + 1; j < nrow; ++j, ++k) {
tmp = Xcol[i] - Xcol[j];
sum[k] += tmp * tmp;
}
}
}
}

View File

@ -1,51 +0,0 @@
#include "cve.h"
#define ROW_SWEEP_ALG(op) \
/* Iterate `(block_size_i, ncol)` submatrix blocks. */ \
for (i = 0; i < nrow; i += block_size_i) { \
/* Set `A` and `C` to block beginning. */ \
A = A_block; \
C = C_block; \
/* Get current block's row size. */ \
block_size_i = nrow - i; \
if (block_size_i > block_size) { \
block_size_i = block_size; \
} \
/* Perform element wise operation for block. */ \
for (; A < A_end; A += nrow, C += nrow) { \
for (j = 0; j < block_size_i; ++j) { \
C[j] = (A[j]) op (v[j]); \
} \
} \
/* Step one block forth. */ \
A_block += block_size_i; \
C_block += block_size_i; \
v += block_size_i; \
}
/* C[, j] = A[, j] op v for each j = 1 to ncol with op as one of +, -, *, / */
void rowSweep(const double *A, const int nrow, const int ncol,
const char* op,
const double *v, // vector of length nrow
double *C) {
int i, j, block_size, block_size_i;
const double *A_block = A;
double *C_block = C;
const double *A_end = A + nrow * ncol;
if (nrow > CVE_MEM_CHUNK_SMALL) { // small because 3 vectors in cache
block_size = CVE_MEM_CHUNK_SMALL;
} else {
block_size = nrow;
}
if (*op == '+') {
ROW_SWEEP_ALG(+)
} else if (*op == '-') {
ROW_SWEEP_ALG(-)
} else if (*op == '*') {
ROW_SWEEP_ALG(*)
} else if (*op == '/') {
ROW_SWEEP_ALG(/)
}
}

View File

@ -27,7 +27,8 @@ Performance:
- [NOT Feasible] Stochastic Version - [NOT Feasible] Stochastic Version
- [NOT Feasible] Gradient Approximations (using Algebraic Software for alternative Loss function formulations and gradient optimizations) - [NOT Feasible] Gradient Approximations (using Algebraic Software for alternative Loss function formulations and gradient optimizations)
- [NOT Sufficient] Alternative Kernels for reducing samples - [NOT Sufficient] Alternative Kernels for reducing samples
- [ ] (To Be further investigated) "Kronecker" optimization - [x] (To Be further investigated) "Kronecker" optimization
- [ ] Implement "Kronecker" optimization
Features (functions): Features (functions):
- [x] Initial `V.init` parameter (only ONE try, ignore number of `attempts` parameter) - [x] Initial `V.init` parameter (only ONE try, ignore number of `attempts` parameter)
@ -48,6 +49,7 @@ Changes:
Errors: Errors:
- [x] `CVE_C` compare to `CVE_legacy`. - [x] `CVE_C` compare to `CVE_legacy`.
- [x] fix: `predict.dim` not found. - [x] fix: `predict.dim` not found.
- [ ] fix/check: error computation for break condition (in `cve.c`)
# Development # Development
## Build and install. ## Build and install.

View File

@ -23,11 +23,11 @@ subspace.dist <- function(B1, B2){
set.seed(437) set.seed(437)
# Number of simulations # Number of simulations
SIM.NR <- 50 SIM.NR <- 50L
# maximal number of iterations in curvilinear search algorithm # maximal number of iterations in curvilinear search algorithm
MAXIT <- 50 MAXIT <- 50L
# number of arbitrary starting values for curvilinear optimization # number of arbitrary starting values for curvilinear optimization
ATTEMPTS <- 10 ATTEMPTS <- 10L
# set names of datasets # set names of datasets
dataset.names <- c("M1", "M2", "M3", "M4", "M5") dataset.names <- c("M1", "M2", "M3", "M4", "M5")
# Set used CVE method # Set used CVE method

4
test.R
View File

@ -17,10 +17,10 @@ library(CVE)
path <- paste0('~/Projects/CVE/tmp/logger_', method, '_', momentum, '.C.pdf') path <- paste0('~/Projects/CVE/tmp/logger_', method, '_', momentum, '.C.pdf')
# Define logger for `cve()` method. # Define logger for `cve()` method.
logger <- function(iter, attempt, data) { logger <- function(attempt, iter, data) {
# Note the `<<-` assignement! # Note the `<<-` assignement!
loss.history[iter + 1, attempt] <<- data$loss loss.history[iter + 1, attempt] <<- data$loss
error.history[iter + 1, attempt] <<- if (data$err > 0) data$err else NA error.history[iter + 1, attempt] <<- data$err
tau.history[iter + 1, attempt] <<- data$tau tau.history[iter + 1, attempt] <<- data$tau
# Compute true error by comparing to the true `B` # Compute true error by comparing to the true `B`
B.est <- null(data$V) # Function provided by CVE B.est <- null(data$V) # Function provided by CVE