diff --git a/R/constraints.R b/R/constraints.R index 8533ae6..400801e 100644 --- a/R/constraints.R +++ b/R/constraints.R @@ -23,74 +23,44 @@ #' @seealso [luz_model_sequential()] #' #' @examples -#' # Generate random data: -#' train_x = matrix(rnorm(200), ncol=2) -#' train_y = rnorm(100) -#' # ---- Example with a keras/tensorflow network ---- -#' # Build a small nn: -#' nn <- keras::keras_model_sequential() -#' nn <- keras::layer_dense(nn, units = 10, activation = "tanh", input_shape = 2) -#' nn <- keras::layer_dense(nn, units = 1, activation = "linear") -#' -#' # Add constraints -#' nn_constrained <- add_constraints(nn, constraint_type = "l1_norm") -#' -#' # Compile and train (2 epochs ) -#' keras::compile(nn_constrained, -#' loss = "mse", -#' optimizer = keras::optimizer_adam(), -#' metrics = "mse") -#' -#' history <- keras::fit(nn_constrained, -#' train_x, -#' train_y, -#' verbose = 0, -#' epochs = 2, -#' batch_size = 50, -#' validation_split = 0.2 -#' ) -#' -#' #' # ---- Example with a luz/torch network ---- -#' # Create a torch data loader to be able to train the NN: -#' # Divide in only train and validation -#' all_indices <- 1:nrow(train_x) -#' only_train_indices <- sample(all_indices, size = round(nrow(train_x)) * 0.8) -#' val_indices <- setdiff(all_indices, only_train_indices) -#' -#' # Create lists with x and y values to feed luz::as_dataloader() -#' only_train_x <- as.matrix(train_x[only_train_indices,]) -#' only_train_y <- as.matrix(train_y[only_train_indices]) -#' val_x <- as.matrix(train_x[val_indices,]) -#' val_y <- as.matrix(train_y[val_indices]) -#' -#' only_train_list <- list(x = only_train_x, y = only_train_y) -#' val_list <- list(x = val_x, y = val_y) -#' -#' torch_data <- list( -#' train = luz::as_dataloader(only_train_list, batch_size = 50, shuffle = TRUE), -#' valid = luz::as_dataloader(val_list, batch_size = 50) -#' ) -#' -#' # Build a small nn: -#' nn <- luz_model_sequential( -#' torch::nn_linear(2,10), -#' torch::nn_tanh(), -#' torch::nn_linear(10,1) -#' ) -#' -#' # Train the model adding the constraints inside the pipe -#' # (equivalent to the approach used in the previous example) -#' fitted <- nn |> -#' luz::setup( -#' loss = torch::nn_mse_loss(), -#' optimizer = torch::optim_adam, -#' metrics = list( -#' luz::luz_metric_mse() -#' ) -#' ) |> -#' luz::fit(torch_data$train, epochs = 2, valid_data = torch_data$valid) -#' -#' +#' \dontrun{ +#' if (requireNamespace("keras", quietly=TRUE)) { +#' # ---- Example with a keras/tensorflow network ---- +#' # Build a small nn: +#' nn <- keras::keras_model_sequential() +#' nn <- keras::layer_dense(nn, units = 10, activation = "tanh", input_shape = 2) +#' nn <- keras::layer_dense(nn, units = 1, activation = "linear") +#' +#' # Add constraints +#' nn_constrained <- add_constraints(nn, constraint_type = "l1_norm") +#' +#' # Check that class of the constrained nn is "nn2poly" +#' class(nn_constrained)[1] +#' } +#' } +#' +#' if (requireNamespace("luz", quietly=TRUE)) { +#' # ---- Example with a luz/torch network ---- +#' +#' # Build a small nn +#' nn <- luz_model_sequential( +#' torch::nn_linear(2,10), +#' torch::nn_tanh(), +#' torch::nn_linear(10,1) +#' ) +#' +#' # With luz/torch we need to setup the nn before adding the constraints +#' nn <- luz::setup(module = nn, +#' loss = torch::nn_mse_loss(), +#' optimizer = torch::optim_adam, +#' ) +#' +#' # Add constraints +#' nn <- add_constraints(nn) +#' +#' # Check that class of the constrained nn is "nn2poly" +#' class(nn)[1] +#' } #' #' @export add_constraints <- function(object, type = c("l1_norm", "l2_norm"), ...) { diff --git a/R/helpers.R b/R/helpers.R index 18dcd6b..6d040f3 100644 --- a/R/helpers.R +++ b/R/helpers.R @@ -15,6 +15,7 @@ #' @seealso [add_constraints()] #' #' @examples +#' if (requireNamespace("luz", quietly=TRUE)) { #' # Create a NN using luz/torch as a sequential model #' # with 3 fully connected linear layers, #' # the first one with input = 5 variables, @@ -31,6 +32,10 @@ #' #' nn #' +#' # Check that the nn is of class nn_squential +#' class(nn) +#' } +#' #' #' @export luz_model_sequential <- function(...) { diff --git a/man/add_constraints.Rd b/man/add_constraints.Rd index ee0944b..e165348 100644 --- a/man/add_constraints.Rd +++ b/man/add_constraints.Rd @@ -32,74 +32,42 @@ Models in \code{luz/torch} need to use the \code{\link{luz_model_sequential}} helper in order to have a sequential model in the appropriate form. } \examples{ -# Generate random data: -train_x = matrix(rnorm(200), ncol=2) -train_y = rnorm(100) -# ---- Example with a keras/tensorflow network ---- -# Build a small nn: -nn <- keras::keras_model_sequential() -nn <- keras::layer_dense(nn, units = 10, activation = "tanh", input_shape = 2) -nn <- keras::layer_dense(nn, units = 1, activation = "linear") +\dontrun{ + # ---- Example with a keras/tensorflow network ---- + # Build a small nn: + nn <- keras::keras_model_sequential() + nn <- keras::layer_dense(nn, units = 10, activation = "tanh", input_shape = 2) + nn <- keras::layer_dense(nn, units = 1, activation = "linear") -# Add constraints -nn_constrained <- add_constraints(nn, constraint_type = "l1_norm") + # Add constraints + nn_constrained <- add_constraints(nn, constraint_type = "l1_norm") -# Compile and train (2 epochs ) -keras::compile(nn_constrained, - loss = "mse", - optimizer = keras::optimizer_adam(), - metrics = "mse") - -history <- keras::fit(nn_constrained, - train_x, - train_y, - verbose = 0, - epochs = 2, - batch_size = 50, - validation_split = 0.2 -) - -#' # ---- Example with a luz/torch network ---- -# Create a torch data loader to be able to train the NN: -# Divide in only train and validation -all_indices <- 1:nrow(train_x) -only_train_indices <- sample(all_indices, size = round(nrow(train_x)) * 0.8) -val_indices <- setdiff(all_indices, only_train_indices) - -# Create lists with x and y values to feed luz::as_dataloader() -only_train_x <- as.matrix(train_x[only_train_indices,]) -only_train_y <- as.matrix(train_y[only_train_indices]) -val_x <- as.matrix(train_x[val_indices,]) -val_y <- as.matrix(train_y[val_indices]) - -only_train_list <- list(x = only_train_x, y = only_train_y) -val_list <- list(x = val_x, y = val_y) + # Check that class of the constrained nn is "nn2poly" + class(nn_constrained)[1] +} -torch_data <- list( - train = luz::as_dataloader(only_train_list, batch_size = 50, shuffle = TRUE), - valid = luz::as_dataloader(val_list, batch_size = 50) -) +if (requireNamespace("luz", quietly=TRUE)) { + # ---- Example with a luz/torch network ---- -# Build a small nn: -nn <- luz_model_sequential( - torch::nn_linear(2,10), - torch::nn_tanh(), - torch::nn_linear(10,1) -) + # Build a small nn + nn <- luz_model_sequential( + torch::nn_linear(2,10), + torch::nn_tanh(), + torch::nn_linear(10,1) + ) -# Train the model adding the constraints inside the pipe -# (equivalent to the approach used in the previous example) -fitted <- nn |> - luz::setup( - loss = torch::nn_mse_loss(), - optimizer = torch::optim_adam, - metrics = list( - luz::luz_metric_mse() - ) - ) |> - luz::fit(torch_data$train, epochs = 2, valid_data = torch_data$valid) + # With luz/torch we need to setup the nn before adding the constraints + nn <- luz::setup(module = nn, + loss = torch::nn_mse_loss(), + optimizer = torch::optim_adam, + ) + # Add constraints + nn <- add_constraints(nn) + # Check that class of the constrained nn is "nn2poly" + class(nn)[1] +} } \seealso{ diff --git a/man/luz_model_sequential.Rd b/man/luz_model_sequential.Rd index 31ffaa6..3a4f244 100644 --- a/man/luz_model_sequential.Rd +++ b/man/luz_model_sequential.Rd @@ -23,6 +23,7 @@ Furthermore, this step is also needed to be able to impose the needed constraints when using the \code{luz/torch} framework. } \examples{ +if (requireNamespace("luz", quietly=TRUE)) { # Create a NN using luz/torch as a sequential model # with 3 fully connected linear layers, # the first one with input = 5 variables, @@ -39,6 +40,10 @@ nn <- luz_model_sequential( nn +# Check that the nn is of class nn_squential +class(nn) +} + } \seealso{