forked from dschutterop/Graphite-graylog
-
Notifications
You must be signed in to change notification settings - Fork 0
/
elasticsearch_indices.py
149 lines (124 loc) · 4.96 KB
/
elasticsearch_indices.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
#!/usr/bin/python
#
# == Synopsis
#
# Script to get index data from elasticsearch
# through the ES API.
#
# Because I work with multiple scripts to push data
# into Carbon, I insert the data into a separate
# Carbon database (elasticsearch.index)
#
# === Workflow
# This script grabs JSON from your elasticsearch
# cluster (_cluster/health), transforms data into
# valid Carbon metrics and delivers it into carbon
#
# Carbon only needs three things:
# <metric> <value> <timestamp>
#
# So what we do is grab the elasticsearch
# key as the metric, grab the elasticsearch value
# as the value and make up our own timestamp. Well,
# actually, that's done through time()
#
# Author : D. Schutterop
# Email : daniel@schutterop.nl
# Version : v0.1
#
# === HowTo
#
# Please replace the value of elaHost with the hostname
# or IP address of one of your ElasticSearch hosts
# (or DNS RR, load balanced addresses or whatever)
#
# Replace the value of grpHost (and port) of the Carbon
# server and, if you want, change the grpDatabase to
# something that makes sense to you.
#
# Fire the script and see the data appear in Graphite
# (Creation of the database files may take some time...
#
#
import json,requests,time,socket,os,sys
runInterval = 15
elaHost = 'elasticsearch.localdomain'
elaPort = 9200
grpHost = 'graphite.localdomain'
grpPort = 2003
grpDatabase = 'elasticsearch.index'
#Suppress SSL warnings generated when contacting Foreman
requests.packages.urllib3.disable_warnings()
def elaGetData(elaHost,elaPort):
elaUrl = "http://%s:%s/_cat/indices?bytes=b" % (elaHost,elaPort)
elaHeaders = {'Content-type': 'application/json'}
elaRequest = requests.get(elaUrl, verify=False, headers=elaHeaders)
return json.loads(elaRequest.text)
def grpPutMessage(grpMetricKey,grpMetricValue):
metricPrepend = grpDatabase
metricAppend = grpMetricKey
metricKey = "%s.%s" % (metricPrepend,grpMetricKey)
metricTime = int(time.time())
metricValue = grpMetricValue
return "%s %s %s" % (metricKey,metricValue,metricTime)
def run(runInterval):
while True:
grpSocket = socket.socket()
grpSocket.connect((grpHost,grpPort))
elaData = elaGetData(elaHost,elaPort)
message = ''
greenIndex = 0
yellowIndex = 0
redIndex = 0
numDocs = 0
docsDeleted = 0
priStoreSizeB = 0
storeSizeB = 0
primaryShards = 0
replicas = 0
for listItem in elaData:
numDocs += int(listItem['docs.count'])
docsDeleted += int(listItem['docs.deleted'])
storeSizeB += int(listItem['store.size'])
priStoreSizeB += int(listItem['pri.store.size'])
primaryShards += int(listItem['pri'])
replicas += int(listItem['rep'])
if listItem['health'] == 'green':
greenIndex += 1
elif listItem['health'] == 'yellow':
yellowIndex += 1
elif listItem['health'] == 'red':
redIndex += 1
storeSizeG = round((storeSizeB / 1073741824.0),2)
storeSizeM = round((storeSizeB / 1000000),2)
priStoreSizeG = round((priStoreSizeB / 1073741824.0),2)
priStoreSizeM = round((priStoreSizeB / 1000000),2)
repStoreSizeB = storeSizeB - priStoreSizeB
repStoreSizeG = round((repStoreSizeB / 1073741824.0),2)
repStoreSizeM = round((repStoreSizeB / 1000000),2)
message = "\n %s %s" % (grpPutMessage('total_indices',len(elaData)),message)
message = "\n %s %s" % (grpPutMessage('green_index',greenIndex),message)
message = "\n %s %s" % (grpPutMessage('yellow_index',yellowIndex),message)
message = "\n %s %s" % (grpPutMessage('red_index',redIndex),message)
message = "\n %s %s" % (grpPutMessage('num_docs',numDocs),message)
message = "\n %s %s" % (grpPutMessage('deleted_docs',docsDeleted),message)
message = "\n %s %s" % (grpPutMessage('total_store_size_bytes',storeSizeB),message)
message = "\n %s %s" % (grpPutMessage('total_store_size_gigabytes',storeSizeG),message)
message = "\n %s %s" % (grpPutMessage('total_store_size_megabytes',storeSizeM),message)
message = "\n %s %s" % (grpPutMessage('primary_store_size_bytes',priStoreSizeB),message)
message = "\n %s %s" % (grpPutMessage('primary_store_size_gigabytes',priStoreSizeG),message)
message = "\n %s %s" % (grpPutMessage('primary_store_size_megabytes',priStoreSizeM),message)
message = "\n %s %s" % (grpPutMessage('replica_store_size_bytes',repStoreSizeB),message)
message = "\n %s %s" % (grpPutMessage('replica_store_size_gigabytes',repStoreSizeG),message)
message = "\n %s %s" % (grpPutMessage('replica_store_size_megabytes',repStoreSizeM),message)
message = "\n %s %s" % (grpPutMessage('primary_shards',primaryShards),message)
message = "\n %s %s" % (grpPutMessage('replicas',replicas),message)
grpSocket.sendall(message)
grpSocket.close()
time.sleep(runInterval)
if __name__ == "__main__":
procPid = os.fork()
if procPid != 0:
sys.exit(0)
print ("Running %s every %s seconds in the background." % (__file__,runInterval))
run(runInterval)