-
Notifications
You must be signed in to change notification settings - Fork 2
/
application.conf
192 lines (164 loc) · 5.3 KB
/
application.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
webservice {
port = 8000
interface = 0.0.0.0
instance.name = "reference"
}
akka {
loggers = ["akka.event.slf4j.Slf4jLogger"]
actor {
default-dispatcher {
fork-join-executor {
}
}
}
dispatchers {
# A dispatcher for actors performing blocking io operations
# Prevents the whole system from being slowed down when waiting for responses from external resources for instance
io-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
# Using the forkjoin defaults, this can be tuned if we wish
}
# A dispatcher for actors handling API operations
# Keeps the API responsive regardless of the load of workflows being run
api-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
}
# A dispatcher for engine actors
# Because backends behaviour is unpredictable (potentially blocking, slow) the engine runs
# on its own dispatcher to prevent backends from affecting its performance.
engine-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
}
# A dispatcher used by supported backend actors
backend-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
}
# Note that without further configuration, all other actors run on the default dispatcher
}
}
spray.can {
server {
request-timeout = 40s
}
client {
request-timeout = 40s
connecting-timeout = 40s
}
}
system {
// If 'true', a SIGINT will trigger Cromwell to attempt to abort all currently running jobs before exiting
abort-jobs-on-terminate = false
// Max number of retries per job that the engine will attempt in case of a retryable failure received from the backend
max-retries = 10
// If 'true' then when Cromwell starts up, it tries to restart incomplete workflows
workflow-restart = true
// Cromwell will cap the number of running workflows at N
max-concurrent-workflows = 5000
// Cromwell will launch up to N submitted workflows at a time, regardless of how many open workflow slots exist
max-workflow-launch-count = 50
// Number of seconds between workflow launches
new-workflow-poll-rate = 20
// Since the WorkflowLogCopyRouter is initialized in code, this is the number of workers
number-of-workflow-log-copy-workers = 10
}
workflow-options {
// These workflow options will be encrypted when stored in the database
encrypted-fields: []
// AES-256 key to use to encrypt the values in `encrypted-fields`
base64-encryption-key: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA="
// Directory where to write per workflow logs
workflow-log-dir: "/scratch1/fs1/gtac-mgi/CLE/soma/cromwell-workflow-logs"
// When true, per workflow logs will be deleted after copying
workflow-log-temporary: true
// Workflow-failure-mode determines what happens to other calls when a call fails. Can be either ContinueWhilePossible or NoNewCalls.
// Can also be overridden in workflow options. Defaults to NoNewCalls. Uncomment to change:
//workflow-failure-mode: "ContinueWhilePossible"
}
call-caching {
enabled = false
invalidate-bad-cache-results = false
}
backend {
default = "LSF"
providers {
Local {
actor-factory = "cromwell.backend.impl.sfs.config.ConfigBackendLifecycleActorFactory"
config {
run-in-background = true
runtime-attributes = "String? docker"
submit = "/bin/bash ${script}"
submit-docker = "docker run --rm -v ${cwd}:${docker_cwd} -i ${docker} /bin/bash < ${script}"
root: "/scratch1/fs1/gtac-mgi/CLE/soma/cromwell-executions"
filesystems {
local {
localization: [
"hard-link", "soft-link", "copy"
]
}
}
}
}
LSF {
actor-factory = "cromwell.backend.impl.sfs.config.ConfigBackendLifecycleActorFactory"
config {
runtime-attributes = """
Int cpu = 1
Int memory_gb = 8
String? queue
String? docker_image
String? resource
String? job_group
String? dragen_env
"""
submit = """
${dragen_env} \
bsub \
-J ${job_name} \
-cwd ${cwd} \
-o ${out} \
-e ${err} \
-G 'compute-gtac-mgi' \
-a "${docker_image}" \
-q ${queue} \
-g ${job_group} \
-M ${memory_gb}G \
-n ${cpu} \
-R "span[hosts=1] select[mem>${memory_gb}G] rusage[mem=${memory_gb}G]" \
/bin/bash ${script}
"""
submit-docker = """
${dragen_env} \
bsub \
-J ${job_name} \
-cwd ${cwd} \
-o ${out} \
-e ${err} \
-G 'compute-gtac-mgi' \
-a "${docker_image}" \
-q ${queue} \
-g ${job_group} \
-M ${memory_gb}G \
-n ${cpu} \
-R "span[hosts=1] select[mem>${memory_gb}G] rusage[mem=${memory_gb}G]" \
/bin/bash ${script}
"""
kill = "bkill ${job_id}"
check-alive = "bjobs -noheader -o \"stat\" ${job_id} | /bin/grep 'PEND\\|RUN'"
job-id-regex = "Job <(\\d+)>.*"
root: "/scratch1/fs1/gtac-mgi/CLE/soma/cromwell-executions"
}
}
}
}
services {
KeyValue {
class = "cromwell.services.keyvalue.impl.SqlKeyValueServiceActor"
}
MetadataService {
class = "cromwell.services.metadata.impl.MetadataServiceActor"
}
}