add: suport for successive reduction dimension d reduction

This commit is contained in:
Daniel Kapla 2021-09-10 19:05:28 +02:00
parent a673b50c7a
commit e985e99432
4 changed files with 144 additions and 55 deletions

View File

@ -1,6 +1,7 @@
# Generated by roxygen2: do not edit by hand
S3method(coef,nnsdr)
S3method(predict,nnsdr)
S3method(summary,nnsdr)
export(dataset)
export(dist.grassmann)

View File

@ -1,18 +1,12 @@
#' Extracts the OPG or refined reduction coefficients from an nnsdr class instance
#'
#' @param object nnsdr class instance
#' @param type specifies if the OPG or Refinement estimate is requested.
#' One of `Refinement` or `OPG`, default is `Refinement`.
#' @param ... ignored.
#' @param ... Additional parameters passed down to `object$coef`.
#'
#' @return Matrix
#'
#' @method coef nnsdr
#' @export
coef.nnsdr <- function(object, type, ...) {
if (missing(type)) {
object$coef()
} else {
object$coef(type)
}
coef.nnsdr <- function(object, ...) {
object$coef(...)
}

View File

@ -107,7 +107,7 @@ nnsdr <- setRefClass('nnsdr',
nn.ref = 'ANY',
history.opg = 'ANY',
history.ref = 'ANY',
B.opg = 'ANY',
OPG = 'ANY',
B.ref = 'ANY',
history = function() {
if (is.null(.self$history.opg))
@ -141,29 +141,35 @@ nnsdr <- setRefClass('nnsdr',
methods = list(
initialize = function(input_shapes, d, output_shape = 1L, ...) {
# Set configuration.
.self$config <- c(list(
# Create config.
.config <- c(list(
input_shapes = input_shapes,
d = as.integer(d),
output_shape = output_shape
), list(...))
# Dimensions added later (alows multiple d's)
d <- sort(as.integer(d), decreasing = TRUE)
# Build OPG (Step 1) and Refinement (Step 2) Neuronal Networks
.self$nn.opg <- do.call(build.MLP, c(.self$config, list(
.self$nn.opg <- do.call(build.MLP, c(.config, list(
name = 'OPG', add_reduction = FALSE
)))
.self$nn.ref <- do.call(build.MLP, c(.self$config, list(
name = 'Refinement', add_reduction = TRUE
)))
.self$nn.ref <- Map(function(d) {
do.call(build.MLP, c(.config, list(
name = 'Refinement', add_reduction = TRUE, d = d
)))
}, d)
# Set config (including dimension(s)) after `build.MLP`
.self$config <- c(list(d = d), .config)
# Set initial history field values. If and only if the `history.*`
# fields are `NULL`, then the Nets are NOT trained.
.self$history.opg <- NULL
.self$history.ref <- NULL
# Set (not jet available) reduction estimates
.self$B.opg <- NULL
.self$B.ref <- NULL
# Set (not jet available) OPG directions, the OPG estimate for
# reduction dimension `d` is then `.self$OPG[, 1:d]`.
.self$OPG <- NULL
},
fit = function(inputs, output, epochs = 1L, batch_size = 32L,
@ -196,14 +202,14 @@ nnsdr <- setRefClass('nnsdr',
out <- .self$nn.opg(inputs)
})
G <- as.matrix(tape$gradient(out, inputs[[1]]))
B <- eigen(var(G), symmetric = TRUE)$vectors
B <- B[, 1:.self$config$d, drop = FALSE]
.self$B.opg <- B
.self$OPG <- eigen(var(G), symmetric = TRUE)$vectors
# Check for need to initialize the Refinement Net.
# Check for need to initialize the Refinement Nets.
if (is.null(.self$history.ref)) {
# Get OPG estimate for max reduction dimension
B <- .self$OPG[, seq_len(.self$config$d[1]), drop = FALSE]
# Set Reduction layer
.self$nn.ref$get_layer('reduction')$set_weights(list(B))
.self$nn.ref[[1]]$get_layer('reduction')$set_weights(list(B))
# Check initialization (for random keep random initialization)
if (initializer == 'fromOPG') {
@ -214,7 +220,7 @@ nnsdr <- setRefClass('nnsdr',
W[-(1:nrow(B)), , drop = FALSE]
)
b <- as.array(.self$nn.opg$get_layer('hidden1')$bias)
.self$nn.ref$get_layer('hidden1')$set_weights(list(W, b))
.self$nn.ref[[1]]$get_layer('hidden1')$set_weights(list(W, b))
# Get layer names with weights to be initialized from `nn.opg`
# These are the output layer and all hidden layers except the first
layer.names <- Filter(function(name) {
@ -226,44 +232,108 @@ nnsdr <- setRefClass('nnsdr',
startsWith(name, 'hidden')
}
}, lapply(.self$nn.opg$layers, `[[`, 'name'))
# Copy `nn.opg` weights to `nn.ref`
# Copy `nn.opg` weights to first `nn.ref`
for (name in layer.names) {
.self$nn.ref$get_layer(name)$set_weights(lapply(
.self$nn.ref[[1]]$get_layer(name)$set_weights(lapply(
.self$nn.opg$get_layer(name)$weights, as.array
))
}
}
# Now train all but the smallest Refinement Nets and move
# weight to the next smaller net.
for (i in seq_len(length(.self$nn.ref) - 1)) {
# Train current Net
hist <- .self$nn.ref[[i]]$fit(inputs, output, ...,
epochs = as.integer(tail(epochs, 1)),
batch_size = as.integer(tail(batch_size, 1)),
verbose = as.integer(verbose)
)
.self$history.ref <- rbind(
.self$history.ref,
cbind(d = .self$config$d[i], as.data.frame(hist$history))
)
# Compute reduced reduction for the next smaller refinement
with(tf$GradientTape() %as% tape, {
tape$watch(inputs[[1]])
out <- .self$nn.ref[[i]](inputs)
})
G <- as.matrix(tape$gradient(out, inputs[[1]]))
B <- eigen(var(G), symmetric = TRUE)$vectors
B <- B[, seq_len(.self$config$d[i + 1]), drop = FALSE]
.self$nn.ref[[i + 1]]$get_layer('reduction')$set_weights(list(B))
# Transfer weights from current to next smaller net
W <- as.array(.self$nn.ref[[i]]$get_layer('hidden1')$kernel)
b <- as.array(.self$nn.ref[[i]]$get_layer('hidden1')$bias)
B.last <- as.array(.self$nn.ref[[i]]$get_layer('reduction')$kernel)
.self$nn.ref[[i + 1]]$get_layer('hidden1')$set_weights(list(
t(B) %*% B.last %*% W, b))
# These are the output layer and all hidden layers except the first
layer.names <- Filter(function(name) {
if (name == 'output') {
TRUE
} else if (name == 'hidden1') {
FALSE
} else {
startsWith(name, 'hidden')
}
}, lapply(.self$nn.ref[[i]]$layers, `[[`, 'name'))
# Copy current weights to first next smaller net
for (name in layer.names) {
.self$nn.ref[[i + 1]]$get_layer(name)$set_weights(lapply(
.self$nn.ref[[i]]$get_layer(name)$weights, as.array
))
}
}
} else if (verbose > 0) {
cat("Refinement Net already trained -> continue training.\n")
cat("Refinement Nets already trained -> continue training.\n")
}
# Fit (or continue fitting) the Refinement Net.
hist <- .self$nn.ref$fit(inputs, output, ...,
# Fit (or continue fitting) the (last, smallest) Refinement Net.
hist <- tail(.self$nn.ref, 1)[[1]]$fit(inputs, output, ...,
epochs = as.integer(tail(epochs, 1)),
batch_size = as.integer(tail(batch_size, 1)),
verbose = as.integer(verbose)
)
.self$history.ref <- rbind(
.self$history.ref,
as.data.frame(hist$history)
cbind(d = tail(.self$config$d, 1), as.data.frame(hist$history))
)
# Extract refined reduction estimate
.self$B.ref <- .self$nn.ref$get_layer('reduction')$get_weights()[[1]]
invisible(NULL)
},
predict = function(inputs) {
# Issue warning if the Refinement model (Step 2) used for prediction
# is not trained.
if (is.null(.self$history.ref))
warning('Refinement model not trained.')
predict = function(inputs, type = c('Refinement', 'OPG'),
d = min(.self$config$d)
) {
type <- match.arg(type)
# Convert inputs to tensors
if (is.list(inputs)) {
inputs <- Map(tf$cast, as.list(inputs), dtype = 'float32')
} else {
inputs <- list(tf$cast(inputs, dtype = 'float32'))
}
output <- .self$nn.ref(inputs)
if (type == 'Refinement') {
# Issue warning if the Refinement model (Step 2) used for
# prediction is not trained.
if (is.null(.self$history.ref))
warning('Refinement model not trained.')
# Find correct reduction model
index <- which(.self$config$d == d)
if (!length(index)) {
warning('There is no Refinement model of dim. ', d)
return(NULL)
}
# Predict
output <- .self$nn.ref[[index]](inputs)
} else {
# Issue warning if OPG model (Step 1) is not trained
if (is.null(.self$history.opg))
warning('OPG model not trained.')
# Predict
output <- .self$nn.opg(inputs)
}
if (is.list(output)) {
if (length(output) == 1L) {
@ -282,29 +352,41 @@ nnsdr <- setRefClass('nnsdr',
inputs <- list(tf$cast(inputs, dtype = 'float32'))
}
eval.opg <- .self$nn.opg$evaluate(inputs, output,
return_dict = TRUE, verbose = 0L)
return_dict = TRUE, verbose = 0L)
if (is.null(.self$history.ref))
return(data.frame(eval.opg, row.names = "OPG"))
eval.ref <- .self$nn.ref$evaluate(inputs, output,
return_dict = TRUE, verbose = 0L)
eval.ref <- Reduce(rbind, Map(function(model, d) {
data.frame(d = d,
model$evaluate(inputs, output,
return_dict = TRUE, verbose = 0L))
}, .self$nn.ref, .self$config$d))
# Convert to data.frame
eval.opg <- data.frame(eval.opg, row.names = "OPG")
eval.ref <- data.frame(eval.ref, row.names = "Refinement")
row.names.ref <- if (nrow(eval.ref) == 1) "Refinement"
else paste0("Refinement-", seq_len(nrow(eval.ref)))
eval.ref <- data.frame(eval.ref, row.names = row.names.ref)
# Augment mutualy exclusive columns
eval.opg[setdiff(names(eval.ref), names(eval.opg))] <- NA
eval.ref[setdiff(names(eval.opg), names(eval.ref))] <- NA
# Combine/Bind
rbind(eval.opg, eval.ref)
},
coef = function(type = c('Refinement', 'OPG')) {
coef = function(type = c('Refinement', 'OPG'), d = min(.self$config$d)) {
type <- match.arg(type)
if (type == 'Refinement') {
.self$B.ref
# Extract refined reduction estimate from refinement model
# with bottleneck if dimension `d`.
index <- which(.self$config$d == d)
if (!length(index)) {
warning('There is no Refinement model of dim. ', d)
return(NULL)
}
.self$nn.ref[[index]]$get_layer('reduction')$get_weights()[[1]]
} else {
.self$B.opg
.self$OPG[, seq_len(d), drop = FALSE]
}
},
reset = function(reset = c('both', 'Refinement')) {
@ -313,17 +395,20 @@ nnsdr <- setRefClass('nnsdr',
reinitialize_weights(.self$nn.opg)
reset_optimizer(.self$nn.opg$optimizer)
.self$history.opg <- NULL
.self$B.opg <- NULL
.self$OPG <- NULL
}
for (model in .self$nn.ref) {
reinitialize_weights(model)
reset_optimizer(model$optimizer)
}
reinitialize_weights(.self$nn.ref)
reset_optimizer(.self$nn.ref$optimizer)
.self$history.ref <- NULL
.self$B.ref <- NULL
},
summary = function() {
.self$nn.opg$summary()
cat('\n')
.self$nn.ref$summary()
for (model in .self$nn.ref) {
cat('\n')
model$summary()
}
}
)
)

9
NNSDR/R/predict_nnsdr.R Normal file
View File

@ -0,0 +1,9 @@
#' Predict using the fittet neuronal networks
#'
#' @param object instance of class `nnsdr`
#' @param ... arguments passed `predict` method of class `nnsdr`
#'
#' @export
predict.nnsdr <- function(object, ...) {
object$predict(...)
}