-
Notifications
You must be signed in to change notification settings - Fork 0
/
02-BuildingFormalModels.Rmd
297 lines (202 loc) · 7.91 KB
/
02-BuildingFormalModels.Rmd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
# Practical exercise 2 - From verbal to formal models
The aim of this practical exercise is to go from verbal to formal models.
We will not just write a formula, we will implement these models as algorithms in R.
By implementing the models of algorithms,
* we are forced to make them very explicit in their assumptions;
* we become able to simulate the models in a variety of different situations and therefore better understand their implications
So, the steps for today's exercise are:
1. choose two of the models and formalize them, that is, produce an algorithm that enacts the strategy, so we can simulate them.
2. [optional] implement the algorithms as functions: getting an input and producing an output, so we can more easily implement them across various contexts (e.g. varying amount of trials, input, etc). See R4DataScience, if you need a refresher: https://r4ds.had.co.nz/functions.html
3. implement a Random Bias agent (choosing "head" 70% of the times) and get your agents to play against it for 120 trials (and save the data)
4. implement a Win-Stay-Lose-Shift agent (keeping the same choice if it won, changing it if it lost) and do the same.
5. Now scale up the simulation: have 100 agents for each of your strategy playing against both Random Bias and Win-Stay-Lose-Shift and save their data.
6. Figure out a good way to visualize the data to assess which strategy performs better, whether that changes over time and generally explore what the agents are doing.
## Defining general conditions
```{r setting general parameters}
pacman::p_load(tidyverse, patchwork)
trials <- 120
agents <- 100
```
## Implementing a random agent
Remember a random agent is an agent that picks at random between "head" and "tail" independently on what the opponent is doing.
A random agent might be perfectly random (50% chance of choosing "head", same for "tail") or biased. The variable "rate" determines the rate of choosing "head".
```{r implementing random agent}
rate <- 0.5
RandomAgent <- rbinom(trials, 1, rate) # we simply sample randomly from a binomial
# Now let's plot how it's choosing
d1 <- tibble(trial = seq(trials), choice = RandomAgent)
p1 <- ggplot(d1, aes(trial, choice)) +
geom_line() +
theme_classic()
p1
# What if we were to compare it to an agent being biased?
rate <- 0.8
RandomAgent <- rbinom(trials, 1, rate) # we simply sample randomly from a binomial
# Now let's plot how it's choosing
d2 <- tibble(trial = seq(trials), choice = RandomAgent)
p2 <- ggplot(d2, aes(trial, choice)) +
geom_line() +
theme_classic()
p1 + p2
# Tricky to see, let's try writing the cumulative rate:
d1$cumulativerate <- cumsum(d1$choice) / seq_along(d1$choice)
d2$cumulativerate <- cumsum(d2$choice) / seq_along(d2$choice)
p3 <- ggplot(d1, aes(trial, cumulativerate)) +
geom_line() +
ylim(0,1) +
theme_classic()
p4 <- ggplot(d2, aes(trial, cumulativerate)) +
geom_line() +
ylim(0,1) +
theme_classic()
p3 + p4
## Now in the same plot
d1$rate <- 0.5
d2$rate <- 0.8
d <- rbind(d1,d2)
p5 <- ggplot(d, aes(trial, cumulativerate, color = rate, group = rate)) +
geom_line() +
ylim(0,1) +
theme_classic()
p5
# now as a function
RandomAgent_f <- function(input, rate){
n <- length(input)
choice <- rbinom(n, 1, rate)
return(choice)
}
input <- rep(1,trials) # it doesn't matter, it's not taken into account
choice <- RandomAgent_f(input, rate)
d3 <- tibble(trial = seq(trials), choice)
ggplot(d3, aes(trial, choice)) + geom_line() + theme_classic()
## What if there's noise?
RandomAgentNoise_f <- function(input, rate, noise){
n <- length(input)
choice <- rbinom(n, 1, rate)
if (rbinom(1, 1, noise) == 1) {choice = rbinom(1,1,0.5)}
return(choice)
}
```
## Implementing a Win-Stay-Lose-Shift agent
```{r}
# as a function
WSLSAgent_f <- function(prevChoice, Feedback){
if (Feedback == 1) {
choice = prevChoice
} else if (Feedback == 0) {
choice = 1 - prevChoice
}
return(choice)
}
WSLSAgentNoise_f <- function(prevChoice, Feedback, noise){
if (Feedback == 1) {
choice = prevChoice
} else if (Feedback == 0) {
choice = 1 - prevChoice
}
if (rbinom(1, 1, noise) == 1) {choice <- rbinom(1, 1, .5)}
return(choice)
}
WSLSAgent <- WSLSAgent_f(1, 0)
# Against a random agent
Self <- rep(NA, trials)
Other <- rep(NA, trials)
Self[1] <- RandomAgent_f(1, 0.5)
Other <- RandomAgent_f(seq(trials), rate)
for (i in 2:trials) {
if (Self[i - 1] == Other[i - 1]) {
Feedback = 1
} else {Feedback = 0}
Self[i] <- WSLSAgent_f(Self[i - 1], Feedback)
}
sum(Self == Other)
df <- tibble(Self, Other, trial = seq(trials), Feedback = as.numeric(Self == Other))
ggplot(df) + theme_classic() +
geom_line(color = "red", aes(trial, Self)) +
geom_line(color = "blue", aes(trial, Other))
ggplot(df) + theme_classic() +
geom_line(color = "red", aes(trial, Feedback)) +
geom_line(color = "blue", aes(trial, 1 - Feedback))
df$cumulativerateSelf <- cumsum(df$Feedback) / seq_along(df$Feedback)
df$cumulativerateOther <- cumsum(1 - df$Feedback) / seq_along(df$Feedback)
ggplot(df) + theme_classic() +
geom_line(color = "red", aes(trial, cumulativerateSelf)) +
geom_line(color = "blue", aes(trial, cumulativerateOther))
# Against a Win-Stay-Lose Shift
Self <- rep(NA, trials)
Other <- rep(NA, trials)
Self[1] <- RandomAgent_f(1, 0.5)
Other[1] <- RandomAgent_f(1, 0.5)
for (i in 2:trials) {
if (Self[i - 1] == Other[i - 1]) {
Feedback = 1
} else {Feedback = 0}
Self[i] <- WSLSAgent_f(Self[i - 1], Feedback)
Other[i] <- WSLSAgent_f(Other[i - 1], 1 - Feedback)
}
sum(Self == Other)
df <- tibble(Self, Other, trial = seq(trials), Feedback = as.numeric(Self == Other))
ggplot(df) + theme_classic() +
geom_line(color = "red", aes(trial, Self)) +
geom_line(color = "blue", aes(trial, Other))
ggplot(df) + theme_classic() +
geom_line(color = "red", aes(trial, Feedback)) +
geom_line(color = "blue", aes(trial, 1 - Feedback))
df$cumulativerateSelf <- cumsum(df$Feedback) / seq_along(df$Feedback)
df$cumulativerateOther <- cumsum(1 - df$Feedback) / seq_along(df$Feedback)
ggplot(df) + theme_classic() +
geom_line(color = "red", aes(trial, cumulativerateSelf)) +
geom_line(color = "blue", aes(trial, cumulativerateOther))
```
## Now we scale it up
```{r}
trials = 120
agents = 100
# WSLS vs agents with varying rates
for (rate in seq(from = 0.5, to = 1, by = 0.05)) {
for (agent in seq(agents)) {
Self <- rep(NA, trials)
Other <- rep(NA, trials)
Self[1] <- RandomAgent_f(1, 0.5)
Other <- RandomAgent_f(seq(trials), rate)
for (i in 2:trials) {
if (Self[i - 1] == Other[i - 1]) {
Feedback = 1
} else {Feedback = 0}
Self[i] <- WSLSAgent_f(Self[i - 1], Feedback)
}
temp <- tibble(Self, Other, trial = seq(trials), Feedback = as.numeric(Self == Other), agent, rate)
if (agent == 1 & rate == 0.5) {df <- temp} else {df <- bind_rows(df, temp)}
}
}
## WSLS with another WSLS
for (agent in seq(agents)) {
Self <- rep(NA, trials)
Other <- rep(NA, trials)
Self[1] <- RandomAgent_f(1, 0.5)
Other[1] <- RandomAgent_f(1, 0.5)
for (i in 2:trials) {
if (Self[i - 1] == Other[i - 1]) {
Feedback = 1
} else {Feedback = 0}
Self[i] <- WSLSAgent_f(Self[i - 1], Feedback)
Other[i] <- WSLSAgent_f(Other[i - 1], 1 - Feedback)
}
temp <- tibble(Self, Other, trial = seq(trials), Feedback = as.numeric(Self == Other), agent, rate)
if (agent == 1 ) {df1 <- temp} else {df1 <- bind_rows(df1, temp)}
}
```
### And we visualize it
```{r}
ggplot(df, aes(trial, Feedback, group = rate, color = rate)) +
geom_smooth(se = F) + theme_classic()
ggplot(df1) +
geom_smooth(aes(trial, Feedback), color = "red", se = F) +
geom_smooth(aes(trial, 1 - Feedback), color = "blue", se = F) +
theme_classic()
ggplot(df1) +
geom_smooth(aes(trial, Feedback), color = "red", se = F) +
geom_smooth(aes(trial, 1 - Feedback), color = "blue", se = F) +
ylim(0,1) +
theme_classic()
```