Skip to content

Commit

Permalink
added test data for the function
Browse files Browse the repository at this point in the history
  • Loading branch information
vzhomeexperiments committed Oct 31, 2018
1 parent ad3c231 commit 56e77f0
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 8 deletions.
9 changes: 5 additions & 4 deletions _RL/TradeTriggerRL.R
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
# packages used *** make sure to install these packages
library(tidyverse) #install.packages("tidyverse")
library(lubridate) #install.packages("lubridate")
library(ReinforcementLearning) #devtools::install_github("nproellochs/ReinforcementLearning")
library(ReinforcementLearning) #install.packages("ReinforcementLearning")
library(magrittr)

# ----------- Main Steps -----------------
Expand Down Expand Up @@ -76,7 +76,8 @@ for (i in 1:length(vector_systems)) {
# tryCatch() function will not abort the entire for loop in case of the error in one iteration
tryCatch({
# execute this code below for debugging:
# i <- 5
# i <- 7 #policy off
# i <- 2 #policy on

# extract current magic number id
trading_system <- vector_systems[i]
Expand All @@ -102,8 +103,8 @@ for (i in 1:length(vector_systems)) {
# NOTE: more research is required to find best parameters TDL TDL TDL
#control <- list(alpha = 0.5, gamma = 0.5, epsilon = 0.5)
#control <- list(alpha = 0.9, gamma = 0.9, epsilon = 0.9)
control <- list(alpha = 0.1, gamma = 0.2, epsilon = 0.5)
#control <- list(alpha = 0.3, gamma = 0.6, epsilon = 0.1)
#control <- list(alpha = 0.7, gamma = 0.5, epsilon = 0.9)
control <- list(alpha = 0.3, gamma = 0.6, epsilon = 0.1)

# perform reinforcement learning and return policy
policy_tr_systDF <- generate_RL_policy(trading_systemDF, states = states,actions = actions,
Expand Down
10 changes: 8 additions & 2 deletions _RL/generate_RL_policy.R
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
generate_RL_policy <- function(x, states, actions, control){
require(tidyverse)
require(ReinforcementLearning)
require(magrittr)
# uncomment to debug code inside the function
# x <- read_rds("_TEST_DATA/data_trades.rds")
# x <- trading_systemDF
Expand Down Expand Up @@ -63,15 +64,20 @@ generate_RL_policy <- function(x, states, actions, control){
s_new = "NextState", control = control, iter = 1, model = model)
#model$Q
#print(i)

}

#plot(model)

# extract custom policy from the obtained dataset
df_Q <- model$Q %>% as.data.frame() %>%
# create column with market periods
mutate(TradeState = row.names(.)) %>%
# interpret policy as defined logic, value at ON must be >= 0!
mutate(Policy = ifelse(ON <= 0, "OFF", ifelse(ON > OFF, "ON", ifelse(OFF > ON, "OFF", NA)))) %>%
select(TradeState, Policy)

select(TradeState, Policy)
# record this object for the function debugging
# write_rds(df_Q, "_TEST_DATA/TradeStatePolicy.rds")
#plot(model)
return(df_Q)

Expand Down
7 changes: 5 additions & 2 deletions _RL/record_policy.R
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,16 @@
#' @param last_result - character vector of the last result of the trade
#'
#' @return nothing is returned
#' @export function creates csv file
#' @export - function creates csv file
#' @example record_policy(x = policy_tr_systDF, trading_system = trading_system, path_sandbox = path_T4)
#'
record_policy <- function(x, last_result, trading_system, path_sandbox){
require(tidyverse)
require(magrittr)
# debugging
# trading_system <- 8118105
# trading_system <- 8118101
# last_result <- "tradeloss"
# x <- read_rds("_TEST_DATA/TradeStatePolicy.rds")
# x <- policy_tr_systDF
# path_sandbox <- "C:/Program Files (x86)/FxPro - Terminal3/MQL4/Files/"
# derive which terminal should be enabled (using path to sandbox) and using variable 'addition'
Expand Down
Binary file added _TEST_DATA/TradeStatePolicy.rds
Binary file not shown.

0 comments on commit 56e77f0

Please sign in to comment.