#' runGradientDescent
#'
#' @keywords internal
#'
#' @description Runner for gradient descent (or stochastic gradient descent) for
#' the specified number of epoch
#'
#' @return a list containing two matrices generated by the gradient descent
runGradientDescent <- function(L, R, lambda, epochs, gamma = 0.01, blockNr,
is, js, D, r) {
# flog.debug(paste("Calculating the gradient descent for block", blockNr))
LR <- list(L = L, R = R)
loss_result <- loss(LR$L, LR$R, lambda, D = D)
curLoss <- loss_result$loss
error_matrix <- loss_result$error_matrix
for (epoch in seq_len(epochs)) {
# flog.debug(paste("Calculating gradient descent epoch", epoch, "of",
# epochs, "for block", blockNr))
LR <- gdepoch(LR$L, LR$R, lambda, gamma,
is = is, js = js, D = D,
error_matrix = error_matrix
)
## bold driver step size update
oldLoss <- curLoss
loss_result <- loss(LR$L, LR$R, lambda, D = D)
curLoss <- loss_result$loss
error_matrix <- loss_result$error_matrix
if(is.infinite(curLoss)){
flog.error("The Gradient Descent diverged.")
flog.error("This indicates that the step size is to big.")
flog.error("Try lowering gamma to fix this.")
stop("The Gradient Descent diverged")
}
if (oldLoss < curLoss) {
gamma <- gamma / 2
} else {
gamma <- gamma * 1.05
}
}
return(LR)
}
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.