From 5cf7ada8304631030ee94aa5bacae00ad9af637d Mon Sep 17 00:00:00 2001 From: Marco Zanotti Date: Fri, 12 Jan 2024 22:53:29 +0100 Subject: [PATCH] add mix methods; fix lm & cubist problem refitting + oosf keeping 'date' in recipe as time trend and removing index.num --- R/tsfor_lecture2_feateng.R | 15 +- dashboard/R/fit_model.R | 154 +++++++++++++++++---- dashboard/R/utils.R | 29 +++- dashboard/tsf_dashboard.Rmd | 267 ++++++++++++++++++++++++------------ 4 files changed, 342 insertions(+), 123 deletions(-) diff --git a/R/tsfor_lecture2_feateng.R b/R/tsfor_lecture2_feateng.R index 0c34e42..adceea5 100644 --- a/R/tsfor_lecture2_feateng.R +++ b/R/tsfor_lecture2_feateng.R @@ -455,16 +455,29 @@ wrkfl_fit_lm_2_lag |> pluck("fit") |> summary() +# LM Spline Workflow +wrkfl_fit_lm_3 <- workflow() |> + add_model(model_spec_lm) |> + add_recipe(rcp_spec) |> + fit(training(splits)) +wrkfl_fit_lm_3 +wrkfl_fit_lm_3 |> + extract_fit_parsnip() |> + pluck("fit") |> + summary() + # * Modeltime ------------------------------------------------------------- # Calibration calibration_tbl <- modeltime_table( wrkfl_fit_lm_1_spline, - wrkfl_fit_lm_2_lag + wrkfl_fit_lm_2_lag, + wrkfl_fit_lm_3 ) |> update_model_description(1, "LM - Spline Recipe") |> update_model_description(2, "LM - Lag Recipe") |> + update_model_description(3, "LM - Base Recipe") |> modeltime_calibrate(new_data = testing(splits)) calibration_tbl diff --git a/dashboard/R/fit_model.R b/dashboard/R/fit_model.R index 36d2016..fa34986 100644 --- a/dashboard/R/fit_model.R +++ b/dashboard/R/fit_model.R @@ -20,9 +20,17 @@ get_default <- function(parameter, return_value = TRUE) { "neighbors" = 5, # KNN "boundary" = "linear", "cost" = 1, "margin" = 0.1, # SVM "rf_mtry" = 5, "rf_trees" = 500, "rf_min_n" = 5, # Random Forest - "boost_mtry" = 5, "boost_trees" = 100, "boost_min_n" = 1, "boost_tree_depth" = 6, # Boosted Trees + "boost_method" = "XGBoost", # Boosted Trees + "boost_mtry" = 5, "boost_trees" = 100, "boost_min_n" = 1, "boost_tree_depth" = 6, "boost_learn_rate" = 0.3, "boost_loss_reduction" = 0, "boost_sample_size" = 1, - "committees" = 1, "cub_neighbors" = 0, "max_rules" = 20 # Cubist + "committees" = 1, "cub_neighbors" = 0, "max_rules" = 20, # Cubist + "ff_hidden_units" = 10, "ff_penalty" = 0, "ff_epochs" = 100, "ff_dropout" = 0.1, "ff_learn_rate" = 0.3, # Feed-Forward + "ffar_non_seasonal_ar" = 1, "ffar_seasonal_ar" = 0, # Feed-Forward AR + "ffar_hidden_units" = 10, "ffar_penalty" = 0, "ffar_epochs" = 100, "ffar_num_networks" = 20, + "arima_boost_mtry" = 5, "arima_boost_trees" = 100, "arima_boost_min_n" = 1, "arima_boost_tree_depth" = 6, # ARIMA-Boost + "arima_boost_learn_rate" = 0.3, "arima_boost_loss_reduction" = 0, "arima_boost_sample_size" = 1, + "prophet_boost_mtry" = 5, "prophet_boost_trees" = 100, "prophet_boost_min_n" = 1, "prophet_boost_tree_depth" = 6, # Prophet-Boost + "prophet_boost_learn_rate" = 0.3, "prophet_boost_loss_reduction" = 0, "prophet_boost_sample_size" = 1 ) if (return_value) { @@ -33,6 +41,41 @@ get_default <- function(parameter, return_value = TRUE) { } +#function to generate the recipe specification +generate_recipe_spec <- function(data, method) { + + method_type <- parse_method(method) + + if (method_type == "ts") { + + rcp_spec <- recipe(value ~ ., data = data) + + } else if (method_type == "ml" | method_type == "dl") { + + rcp_spec <- recipe(value ~ ., data = data) |> + step_timeseries_signature(date) |> + step_mutate(date = as.numeric(date)) |> + step_zv(all_predictors()) |> + step_rm(matches("(iso)|(xts)|(index.num)")) |> + step_dummy(all_nominal(), one_hot = TRUE) + + } else if (method_type == "mix") { + + rcp_spec <- recipe(value ~ ., data = data) |> + step_timeseries_signature(date) |> + step_normalize(date_index.num) |> + step_zv(all_predictors()) |> + step_rm(matches("(iso)|(xts)")) |> + step_dummy(all_nominal(), one_hot = TRUE) + + } else { + stop(paste("Unknown method type", method_type)) + } + + return(rcp_spec) + +} + # function to generate the model specification generate_model_spec <- function(method, params) { @@ -220,17 +263,33 @@ generate_model_spec <- function(method, params) { } else if (method == "Boosted Trees") { - model_spec <- boost_tree( - mode = "regression", - mtry = params$boost_mtry, - trees = params$boost_trees, - min_n = params$boost_min_n, - tree_depth = params$boost_tree_depth, - learn_rate = params$boost_learn_rate, - loss_reduction = params$boost_loss_reduction, - sample_size = params$boost_sample_size - ) |> - set_engine("xgboost") + if (params$boost_method == "XGBoost") { + model_spec <- boost_tree( + mode = "regression", + mtry = params$boost_mtry, + trees = params$boost_trees, + min_n = params$boost_min_n, + tree_depth = params$boost_tree_depth, + learn_rate = params$boost_learn_rate, + loss_reduction = params$boost_loss_reduction, + sample_size = params$boost_sample_size + ) |> + set_engine("xgboost") + } else if (params$boost_method == "LightGBM") { + model_spec <- boost_tree( + mode = "regression", + mtry = params$boost_mtry, + trees = params$boost_trees, + min_n = params$boost_min_n, + tree_depth = params$boost_tree_depth, + learn_rate = params$boost_learn_rate, + loss_reduction = params$boost_loss_reduction, + sample_size = params$boost_sample_size + ) |> + set_engine("lightgbm") + } else { + stop(paste("Unknown Boosting method", params$boost_method)) + } } else if (method == "Cubist") { @@ -241,6 +300,59 @@ generate_model_spec <- function(method, params) { ) |> set_engine("Cubist") + } else if (method == "Feed-Forward") { + + model_spec <- mlp( + mode = "regression", + hidden_units = params$ff_hidden_units, + penalty = params$ff_penalty, + epochs = params$ff_epochs, + dropout = params$ff_dropout, + learn_rate = params$ff_learn_rate + ) |> + set_engine("nnet") + + } else if (method == "Feed-Forward AR") { + + model_spec <- nnetar_reg( + mode = "regression", + non_seasonal_ar = params$ffar_non_seasonal_ar, + seasonal_ar = params$ffar_seasonal_ar, + hidden_units = params$ffar_hidden_units, + penalty = params$ffar_penalty, + epochs = params$ffar_epochs, + num_networks = params$ffar_num_networks + ) |> + set_engine("nnetar") + + } else if (method == "ARIMA-Boost") { + + model_spec <- arima_boost( + mode = "regression", + mtry = params$arima_boost_mtry, + trees = params$arima_boost_trees, + min_n = params$arima_boost_min_n, + tree_depth = params$arima_boost_tree_depth, + learn_rate = params$arima_boost_learn_rate, + loss_reduction = params$arima_boost_loss_reduction, + sample_size = params$arima_boost_sample_size + ) |> + set_engine("auto_arima_xgboost") + + } else if (method == "Prophet-Boost") { + + model_spec <- prophet_boost( + mode = "regression", + mtry = params$prophet_boost_mtry, + trees = params$prophet_boost_trees, + min_n = params$prophet_boost_min_n, + tree_depth = params$prophet_boost_tree_depth, + learn_rate = params$prophet_boost_learn_rate, + loss_reduction = params$prophet_boost_loss_reduction, + sample_size = params$prophet_boost_sample_size + ) |> + set_engine("prophet_xgboost") + } else { stop(paste("Unknown method", method)) } @@ -258,9 +370,7 @@ generate_model_spec <- function(method, params) { fit_model <- function(data, method, params, n_assess, assess_type, seed = 1992) { check_parameters(method, params) - set.seed(seed) - method_type <- parse_method(method) splits <- timetk::time_series_split( data, date_var = date, @@ -270,18 +380,8 @@ fit_model <- function(data, method, params, n_assess, assess_type, seed = 1992) ) train_tbl <- training(splits) |> select(-id, -frequency) - if (method_type == "ts") { - rcp_spec <- recipe(value ~ ., data = train_tbl) - } else if (method_type == "ml") { - rcp_spec <- recipe(value ~ ., data = train_tbl) |> - step_timeseries_signature(date) |> - step_normalize(date_index.num) |> - step_zv(all_predictors()) |> - step_rm(matches("(iso)|(xts)|(lbl)")) |> - step_rm(date) - } else { - stop(paste("Unknown method type", method_type)) - } + # recipe specification + rcp_spec <- generate_recipe_spec(train_tbl, method) # model specification model_spec <- generate_model_spec(method, params) diff --git a/dashboard/R/utils.R b/dashboard/R/utils.R index dde96e9..7325089 100644 --- a/dashboard/R/utils.R +++ b/dashboard/R/utils.R @@ -7,7 +7,8 @@ set_options <- function() { tsf.dashboard.methods = list( "ts" = c("Naive", "Seasonal Naive", "Rolling Average", "ETS", "Theta", "SARIMA", "TBATS", "STLM", "Prophet"), "ml" = c("Linear Regression", "Elastic Net", "MARS", "KNN", "SVM", "Random Forest", "Boosted Trees", "Cubist"), - "dl" = c("MLP", "NNETAR"), + "dl" = c("Feed-Forward", "COMING SOON!"), + "mix" = c("Feed-Forward AR", "ARIMA-Boost", "Prophet-Boost"), "ens" = c("Average", "Weighted Average", "Median", "Linear Regression") ), tsf.dashboard.methods_params = list( @@ -38,10 +39,26 @@ set_options <- function() { "SVM" = c("boundary", "cost", "margin"), "Random Forest" = c("rf_mtry", "rf_trees", "rf_min_n"), "Boosted Trees" = c( + "boost_method", "boost_mtry", "boost_trees", "boost_min_n", "boost_tree_depth", "boost_learn_rate", "boost_loss_reduction", "boost_sample_size" ), - "Cubist" = c("committees", "cub_neighbors", "max_rules") + "Cubist" = c("committees", "cub_neighbors", "max_rules"), + "Feed-Forward" = c("ff_hidden_units", "ff_penalty", "ff_epochs", "ff_dropout", "ff_learn_rate"), + "Feed-Forward AR" = c( + "ffar_non_seasonal_ar", "ffar_seasonal_ar", + "ffar_hidden_units", "ffar_penalty", "ffar_epochs", "ffar_num_networks" + ), + "ARIMA-Boost" = c( + "arima_boost_mtry", "arima_boost_trees", "arima_boost_min_n", + "arima_boost_tree_depth", "arima_boost_learn_rate", "arima_boost_loss_reduction", + "arima_boost_sample_size" + ), + "Prophet-Boost" = c( + "prophet_boost_mtry", "prophet_boost_trees", "prophet_boost_min_n", + "prophet_boost_tree_depth", "prophet_boost_learn_rate", "prophet_boost_loss_reduction", + "prophet_boost_sample_size" + ) ), tsf.dashboard.transfs = c("log", "boxcox", "norm", "stand", "diff", "sdiff"), tsf.dashboard.test_transfs = c("test_log", "test_diff", "test_sdiff") @@ -87,15 +104,19 @@ parse_frequency <- function(frequency) { parse_method <- function(method) { mtd <- getOption("tsf.dashboard.methods") - if (method %in% mtd$ts) { res <- "ts" } else if (method %in% mtd$ml) { res <- "ml" + } else if (method %in% mtd$dl) { + res <- "dl" + } else if (method %in% mtd$mix) { + res <- "mix" + } else if (method %in% mtd$ens) { + res <- "ens" } else { stop(paste("Unknown method", method)) } - return(res) } diff --git a/dashboard/tsf_dashboard.Rmd b/dashboard/tsf_dashboard.Rmd index 089420c..e5e0a37 100644 --- a/dashboard/tsf_dashboard.Rmd +++ b/dashboard/tsf_dashboard.Rmd @@ -17,16 +17,15 @@ runtime: shiny - - - - + + + @@ -63,8 +62,10 @@ datasets <- c( methods <- getOption("tsf.dashboard.methods") ts_methods <- methods$ts ml_methods <- methods$ml +dl_methods <- methods$dl +mix_methods <- methods$mix ens_methods <- methods$ens -methods <- c(ts_methods, ml_methods) +methods <- c(ts_methods, ml_methods, dl_methods, mix_methods) methods_params <- getOption("tsf.dashboard.methods_params") methods_params_cl <- methods_params |> @@ -541,7 +542,12 @@ dropdownButton( pickerInput( inputId = "method", label = h3("Forecast Algorithm"), multiple = FALSE, - choices = list(`Time Series` = ts_methods, `Machine Learning` = ml_methods), + choices = list( + `Time Series` = ts_methods, + `Machine Learning` = ml_methods, + `Deep Learning` = dl_methods, + `Mixed Algorithms` = mix_methods + ), selected = ts_methods[1] ) @@ -714,6 +720,11 @@ conditionalPanel( conditionalPanel( condition = "input.method == 'Boosted Trees'", h5("Algorithm hyperparameters: "), + prettyRadioButtons( + inputId = "boost_method", label = "Boosting Method", + choices = c("XGBoost", "LightGBM"), + inline = TRUE, selected = get_default("boost_method") + ), numericInput(inputId = "boost_mtry", label = "Random Predictors", value = get_default("boost_mtry"), min = 0, max = Inf, step = 1), numericInput(inputId = "boost_trees", label = "Trees", value = get_default("boost_trees"), min = 1, max = Inf, step = 1), numericInput(inputId = "boost_min_n", label = "Min Node Size", value = get_default("boost_min_n"), min = 1, max = Inf, step = 1), @@ -731,6 +742,61 @@ conditionalPanel( sliderInput(inputId = "cub_neighbors", label = "Neighbors", value = get_default("cub_neighbors"), min = 0, max = 9, step = 1), numericInput(inputId = "max_rules", label = "Max Rules", value = get_default("max_rules"), min = 1, max = Inf, step = 1) ) + +# Feed-Forward +conditionalPanel( + condition = "input.method == 'Feed-Forward'", + h5("Algorithm hyperparameters: "), + numericInput(inputId = "ff_hidden_units", label = "Hidden Units", value = get_default("ff_hidden_units"), min = 0, max = Inf, step = 1), + numericInput(inputId = "ff_penalty", label = "Decay", value = get_default("ff_penalty"), min = 0, max = 1), + numericInput(inputId = "ff_epochs", label = "Epochs", value = get_default("ff_epochs"), min = 1, max = Inf, step = 1), + numericInput(inputId = "ff_dropout", label = "Dropout", value = get_default("ff_dropout"), min = 0, max = 1), + numericInput(inputId = "ff_learn_rate", label = "Learning Rate", value = get_default("ff_learn_rate"), min = 0, max = 1), +) + +# Feed-Forward AR +conditionalPanel( + condition = "input.method == 'Feed-Forward AR'", + h5("Algorithm hyperparameters: "), + sliderInput(inputId = "ffar_non_seasonal_ar", label = "p", value = get_default("ffar_non_seasonal_ar"), min = 0, max = 5, step = 1), + sliderInput(inputId = "ffar_seasonal_ar", label = "P", value = get_default("ffar_seasonal_ar"), min = 0, max = 5, step = 1), + numericInput(inputId = "ffar_hidden_units", label = "Hidden Units", value = get_default("ffar_hidden_units"), min = 0, max = Inf, step = 1), + numericInput(inputId = "ffar_penalty", label = "Decay", value = get_default("ffar_penalty"), min = 0, max = 1), + numericInput(inputId = "ffar_epochs", label = "Epochs", value = get_default("ffar_epochs"), min = 1, max = Inf, step = 1), + numericInput(inputId = "ffar_num_networks", label = "Num Networks", value = get_default("ffar_num_networks"), min = 1, max = Inf, step = 1) +) + +# ARIMA-Boost +conditionalPanel( + condition = "input.method == 'ARIMA-Boost'", + h5("Algorithm hyperparameters: "), + numericInput(inputId = "arima_boost_mtry", label = "Random Predictors", value = get_default("arima_boost_mtry"), min = 0, max = Inf, step = 1), + numericInput(inputId = "arima_boost_trees", label = "Trees", value = get_default("arima_boost_trees"), min = 1, max = Inf, step = 1), + numericInput(inputId = "arima_boost_min_n", label = "Min Node Size", value = get_default("arima_boost_min_n"), min = 1, max = Inf, step = 1), + numericInput(inputId = "arima_boost_tree_depth", label = "Tree Depth", value = get_default("arima_boost_tree_depth"), min = 1, max = Inf), + numericInput(inputId = "arima_boost_learn_rate", label = "Learning Rate", value = get_default("arima_boost_learn_rate"), min = 0, max = 1), + numericInput(inputId = "arima_boost_loss_reduction", label = "Min Loss Reduction", value = get_default("arima_boost_loss_reduction"), min = 0, max = 1), + numericInput(inputId = "arima_boost_sample_size", label = "Sample Size", value = get_default("arima_boost_sample_size"), min = 0, max = 1) +) + +# Prophet-Boost +conditionalPanel( + condition = "input.method == 'Prophet-Boost'", + h5("Algorithm hyperparameters: "), + numericInput(inputId = "prophet_boost_mtry", label = "Random Predictors", value = get_default("prophet_boost_mtry"), min = 0, max = Inf, step = 1), + numericInput(inputId = "prophet_boost_trees", label = "Trees", value = get_default("prophet_boost_trees"), min = 1, max = Inf, step = 1), + numericInput(inputId = "prophet_boost_min_n", label = "Min Node Size", value = get_default("prophet_boost_min_n"), min = 1, max = Inf, step = 1), + numericInput(inputId = "prophet_boost_tree_depth", label = "Tree Depth", value = get_default("prophet_boost_tree_depth"), min = 1, max = Inf), + numericInput(inputId = "prophet_boost_learn_rate", label = "Learning Rate", value = get_default("prophet_boost_learn_rate"), min = 0, max = 1), + numericInput(inputId = "prophet_boost_loss_reduction", label = "Min Loss Reduction", value = get_default("prophet_boost_loss_reduction"), min = 0, max = 1), + numericInput(inputId = "prophet_boost_sample_size", label = "Sample Size", value = get_default("prophet_boost_sample_size"), min = 0, max = 1) +) + +# Coming Soon! +conditionalPanel( + condition = "input.method == 'COMING SOON!'", + h5("New Deep Learning algorithms will be released soon!") +) ``` ```{r} @@ -849,7 +915,7 @@ output$plot_resid_acf <- renderPlotly({ select(.index, .residuals) |> set_names(c("date", "value")) |> timetk::plot_acf_diagnostics( - .date_var = date, .value = value, + .date_var = date, .value = value, .lags = 60, .interactive = TRUE, .title = NULL, .y_lab = NULL, ) }) @@ -859,86 +925,85 @@ plotlyOutput(outputId = "plot_resid_acf") ```{r} # testing -# data_selected <- get_data(datasets[1]) -# ts_freq <- data_selected$frequency |> unique() |> parse_frequency() -# input <- list( -# n_future = 12, -# n_assess = 24, -# assess_type = "Rolling", -# method = "ETS", -# error = "auto", -# trend = "auto", -# season = "auto", -# damping = "auto", -# smooth_level = 0.1, -# smooth_trend = 0.1, -# smooth_season = 0.1 -# ) -# input <- list( -# n_future = 12, -# n_assess = 24, -# assess_type = "Rolling", -# method = "Elastic Net", -# penalty = 1, -# mixture = 0.5 -# ) -# input <- list( -# n_future = 12, -# n_assess = 24, -# assess_type = "Rolling", -# method = "Rolling Average", -# window_size = 12 -# ) -# -# fitted_model <- fit_model( -# data = data_selected, method = input$method, params = input, -# n_assess = input$n_assess, assess_type = input$assess_type, seed = 1992 -# ) -# -# forecast_results <- generate_forecast( -# fitted_model = fitted_model, data = data_selected, -# method = input$method, n_future = input$n_future, -# n_assess = input$n_assess, assess_type = input$assess_type -# ) -# -# data_selected |> -# fit_model( -# method = input$method, -# params = input, -# n_assess = input$n_assess, -# assess_type = input$assess_type, -# seed = 1992 -# ) |> -# generate_forecast( -# data = data_selected, -# method = input$method, -# n_future = input$n_future, -# n_assess = input$n_assess, -# assess_type = input$assess_type -# ) -# -# -# -# data = data_selected -# method = input$method -# params = input -# n_assess = input$n_assess -# assess_type = input$assess_type -# seed = 1992 -# data_splits = fitted_model$splits -# fitted_model = fitted_model$fit -# n_future = input$n_future -# -# forecast_results$splits |> -# tk_time_series_cv_plan() |> -# plot_time_series_cv_plan(date, value) -# forecast_results$fit -# forecast_results$residuals -# forecast_results$accuracy -# forecast_results$test_forecast |> plot_modeltime_forecast() -# forecast_results$oos_forecast |> plot_modeltime_forecast() -``` +data_selected <- get_data(datasets[1]) +ts_freq <- data_selected$frequency |> unique() |> parse_frequency() +input <- list( + n_future = 12, + n_assess = 24, + assess_type = "Rolling", + method = "ETS", + error = "auto", + trend = "auto", + season = "auto", + damping = "auto", + smooth_level = 0.1, + smooth_trend = 0.1, + smooth_season = 0.1 +) +input <- list( + n_future = 12, + n_assess = 24, + assess_type = "Rolling", + method = "Elastic Net", + penalty = 1, + mixture = 0.5 +) +input <- list( + n_future = 12, + n_assess = 24, + assess_type = "Rolling", + method = "Rolling Average", + window_size = 12 +) + +fitted_model <- fit_model( + data = data_selected, method = input$method, params = input, + n_assess = input$n_assess, assess_type = input$assess_type, seed = 1992 +) +forecast_results <- generate_forecast( + fitted_model = fitted_model, data = data_selected, + method = input$method, n_future = input$n_future, + n_assess = input$n_assess, assess_type = input$assess_type +) + +data_selected |> + fit_model( + method = input$method, + params = input, + n_assess = input$n_assess, + assess_type = input$assess_type, + seed = 1992 + ) |> + generate_forecast( + data = data_selected, + method = input$method, + n_future = input$n_future, + n_assess = input$n_assess, + assess_type = input$assess_type + ) + + + +data = data_selected +method = input$method +params = input +n_assess = input$n_assess +assess_type = input$assess_type +seed = 1992 +data_splits = fitted_model$splits +fitted_model = fitted_model$fit +n_future = input$n_future + +forecast_results$splits |> + tk_time_series_cv_plan() |> + plot_time_series_cv_plan(date, value) +forecast_results$fit +forecast_results$residuals +forecast_results$accuracy +forecast_results$test_forecast |> plot_modeltime_forecast() +forecast_results$oos_forecast |> plot_modeltime_forecast() +``` @@ -980,7 +1045,12 @@ dropdownButton( pickerInput( inputId = "tune_method", label = h3("Forecast Algorithm"), multiple = FALSE, - choices = list(`Time Series` = ts_methods, `Machine Learning` = ml_methods), + choices = list( + `Time Series` = ts_methods, + `Machine Learning` = ml_methods, + `Deep Learning` = dl_methods, + `Mixed Algorithms` = mix_methods + ), selected = ml_methods[2] ) @@ -1054,7 +1124,12 @@ dropdownButton( pickerInput( inputId = "comp_method", label = h3("Forecast Algorithm"), multiple = TRUE, - choices = list(`Time Series` = ts_methods, `Machine Learning` = ml_methods), + choices = list( + `Time Series` = ts_methods, + `Machine Learning` = ml_methods, + `Deep Learning` = dl_methods, + `Mixed Algorithms` = mix_methods + ), selected = ts_methods[c(4, 6)], options = list("actions-box" = TRUE) ) @@ -1115,7 +1190,12 @@ dropdownButton( pickerInput( inputId = "ens_method", label = h3("Forecast Algorithms"), - choices = list(`Time Series` = ts_methods, `Machine Learning` = ml_methods), + choices = list( + `Time Series` = ts_methods, + `Machine Learning` = ml_methods, + `Deep Learning` = dl_methods, + `Mixed Algorithms` = mix_methods + ), selected = ts_methods[c(4, 6)], multiple = TRUE, options = list( "actions-box" = FALSE, @@ -1187,8 +1267,13 @@ dropdownButton( ) pickerInput( - inputId = "scn_method", label = h3("Forecast Algorithms"), multiple = FALSE, - choices = list(`Time Series` = ts_methods, `Machine Learning` = ml_methods), + inputId = "scn_method", label = h3("Forecast Algorithm"), multiple = FALSE, + choices = list( + `Time Series` = ts_methods, + `Machine Learning` = ml_methods, + `Deep Learning` = dl_methods, + `Mixed Algorithms` = mix_methods + ), selected = ts_methods[4] )