#' Implementation of the CVE method as a Riemann Conjugated Gradient method. #' #' @references A Riemannian Conjugate Gradient Algorithm with Implicit Vector #' Transport for Optimization on the Stiefel Manifold #' @keywords internal #' @export cve_rmsprob <- function(X, Y, k, nObs = sqrt(nrow(X)), h = NULL, tau = 0.1, tol = 1e-4, rho = 0.1, # Momentum update. slack = 0, epochs = 50L, attempts = 10L, epsilon = 1e-7, max.linesearch.iter = 20L, logger = NULL ) { # Set `grad` functions environment to enable if to find this environments # local variabels, needed to enable the manipulation of this local variables # from within `grad`. environment(grad) <- environment() # Get dimensions. n <- nrow(X) # Number of samples. p <- ncol(X) # Data dimensions q <- p - k # Complement dimension of the SDR space. # Save initial learning rate `tau`. tau.init <- tau # Addapt tolearance for break condition. tol <- sqrt(2 * q) * tol # Estaimate bandwidth if not given. if (missing(h) || !is.numeric(h)) { h <- estimate.bandwidth(X, k, nObs) } # Compute persistent data. # Compute lookup indexes for symmetrie, lower/upper # triangular parts and vectorization. pair.index <- elem.pairs(seq(n)) i <- pair.index[1, ] # `i` indices of `(i, j)` pairs j <- pair.index[2, ] # `j` indices of `(i, j)` pairs # Index of vectorized matrix, for lower and upper triangular part. lower <- ((i - 1) * n) + j upper <- ((j - 1) * n) + i # Create all pairewise differences of rows of `X`. X_diff <- X[i, , drop = F] - X[j, , drop = F] # Identity matrix. I_p <- diag(1, p) # Init tracking of current best (according multiple attempts). V.best <- NULL loss.best <- Inf # Start loop for multiple attempts. for (attempt in 1:attempts) { # Sample a `(p, q)` dimensional matrix from the stiefel manifold as # optimization start value. V <- rStiefl(p, q) # Call logger with initial values before starting optimization. if (is.function(logger)) { loss <- grad(X, Y, V, h, loss.only = TRUE, persistent = TRUE) epoch <- 0 # Set epoch count to 0 (only relevant for logging). error <- NA logger(environment()) } M <- matrix(0, p, q) ## Start optimization loop. for (epoch in 1:epochs) { # Compute gradient and loss at current position. loss <- Inf G <- grad(X, Y, V, h, loss.out = TRUE, persistent = TRUE) # Projectd Gradient. A <- projTangentStiefl(V, G) # Projected element squared gradient. Asq <- projTangentStiefl(V, G * G) # Momentum update. M <- (1 - rho) * Asq + rho * projTangentStiefl(V, M) # Parallet transport (on Stiefl manifold) into direction of `G`. V.tau <- retractStiefl(V - tau.init * A / (sqrt(abs(M)) + epsilon)) # Compute error. error <- norm(V %*% t(V) - V.tau %*% t(V.tau), type = "F") # Perform step. V <- V.tau # Call logger after taking a step. if (is.function(logger)) { # Set tau to an step size estimate (only for logging) tau <- tau.init / mean(sqrt(abs(M)) + epsilon) logger(environment()) } # Check break condition. # Note: the devision by `sqrt(2 * k)` is included in `tol`. if (error < tol) { break() } } # Check if current attempt improved previous ones if (loss < loss.best) { loss.best <- loss V.best <- V } } return(list( loss = loss.best, V = V.best, B = null(V.best), h = h )) }