Skip to content
This repository has been archived by the owner on Jul 9, 2024. It is now read-only.

Commit

Permalink
Merge pull request #6 from ofesseler/dev
Browse files Browse the repository at this point in the history
v0.2.5
  • Loading branch information
ofesseler authored Dec 14, 2016
2 parents 524b261 + 6842db0 commit 0e16052
Show file tree
Hide file tree
Showing 7 changed files with 228 additions and 6 deletions.
18 changes: 13 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,11 +78,19 @@ with `gluster volume info` this is obsolete
| volProfile.cumulativeStatus.duration | Count | implemented |
| volProfile.cumulativeStatus.totalRead | Count | implemented |
| volProfile.cumulativeStatus.totalWrite | Count | implemented |
| volProfile.cumulativeStats.fopStats.fop.Name | WRITE, STATFS, FLUSH, OPENDIR, CREATE, LOOKUP, READDIR, FINODELK, ENTRYLK, FXATTROP | pending |
| volProfile.cumulativeStats.fopStats.fop.hits | count | pending |
| volProfile.cumulativeStats.fopStats.fop.avgLatency | Gauge | pending |
| volProfile.cumulativeStats.fopStats.fop.minLatency | Gauge | pending |
| volProfile.cumulativeStats.fopStats.fop.maxLatency | Gauge | pending |
| volProfile.cumulativeStats.fopStats.fop.Name | WRITE, STATFS, FLUSH, OPENDIR, CREATE, LOOKUP, READDIR, FINODELK, ENTRYLK, FXATTROP | pending |
| volProfile.cumulativeStats.fopStats.fop.hits | count | implemented |
| volProfile.cumulativeStats.fopStats.fop.avgLatency | Gauge | implemented |
| volProfile.cumulativeStats.fopStats.fop.minLatency | Gauge | implemented |
| volProfile.cumulativeStats.fopStats.fop.maxLatency | Gauge | implemented |


### Command `gluster volume status all detail`
| Name | type | Labels | impl. state |
|------|------|--------|-------------|
| volStatus.volumes.volume[].node[].sizeFree | Gauge | hostname, path, volume | implemented |
| volStatus.volumes.volume[].node[].sizeTotal | Gauge | hostname, path, volume | implemented |


## Troubleshooting
If the following message appears while trying to get some information out of your gluster. Increase scrape interval in `prometheus.yml` to at least 30s.
Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.2.4
0.2.5
13 changes: 13 additions & 0 deletions gluster_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,3 +77,16 @@ func ExecVolumeProfileGvInfoCumulative(volumeName string) (structs.VolProfile, e
}
return volumeProfile.VolProfile, nil
}

// ExecVolumeStatusAllDetail executes "gluster volume status all detail" at the local machine
// returns VolumeStatusXML struct and error
func ExecVolumeStatusAllDetail() (structs.VolumeStatusXML, error) {
args := []string{"volume", "status", "all", "detail"}
bytesBuffer := execGlusterCommand(args...)
volumeStatus, err := structs.VolumeStatusAllDetailXMLUnmarshall(bytesBuffer)
if err != nil {
log.Errorf("Something went wrong while unmarshalling xml: %v", err)
return volumeStatus, err
}
return volumeStatus, nil
}
33 changes: 33 additions & 0 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,18 @@ var (
[]string{"volume"}, nil,
)

nodeSizeFreeBytes = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "node_size_free_bytes"),
"Free bytes reported for each node on each instance. Labels are to distinguish origins",
[]string{"hostname", "path", "volume"}, nil,
)

nodeSizeTotalBytes = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "node_size_total_bytes"),
"Total bytes reported for each node on each instance. Labels are to distinguish origins",
[]string{"hostname", "path", "volume"}, nil,
)

brickCount = prometheus.NewDesc(
prometheus.BuildFQName(namespace, "", "brick_count"),
"Number of bricks at last query.",
Expand Down Expand Up @@ -126,6 +138,8 @@ func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
ch <- brickDataRead
ch <- brickDataWritten
ch <- peersConnected
ch <- nodeSizeFreeBytes
ch <- nodeSizeTotalBytes
ch <- brickFopHits
ch <- brickFopLatencyAvg
ch <- brickFopLatencyMin
Expand Down Expand Up @@ -229,6 +243,25 @@ func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
}
}
}

// executes gluster status all detail
volumeStatusAll, err := ExecVolumeStatusAllDetail()
if err != nil {
log.Errorf("couldn't parse xml of peer status: %v", err)
}
for _, vol := range volumeStatusAll.VolStatus.Volumes {
for _, node := range vol.Volume.Node {
if node.Status != 1 {
}
ch <- prometheus.MustNewConstMetric(
nodeSizeTotalBytes, prometheus.CounterValue, float64(node.SizeTotal), node.Hostname, node.Path, vol.Volume.VolName,
)

ch <- prometheus.MustNewConstMetric(
nodeSizeFreeBytes, prometheus.CounterValue, float64(node.SizeFree), node.Hostname, node.Path, vol.Volume.VolName,
)
}
}
}

// ContainsVolume checks a slice if it cpntains a element
Expand Down
44 changes: 44 additions & 0 deletions structs/xmlStructs.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,3 +187,47 @@ func VolumeProfileGvInfoCumulativeXMLUnmarshall(cmdOutBuff *bytes.Buffer) (Volum
xml.Unmarshal(b, &vol)
return vol, nil
}

type VolumeStatusXML struct {
XMLName xml.Name `xml:"cliOutput"`
OpRet int `xml:"opRet"`
OpErrno int `xml:"opErrno"`
OpErrstr string `xml:"opErrstr"`
VolStatus struct {
Volumes []struct {
Volume struct {
VolName string `xml:"volName"`
NodeCount int `xml:"nodeCount"`
Node []struct {
Hostname string `xml:"hostname"`
Path string `xml:"path"`
PeerID string `xml:"peerid"`
Status int `xml:"status"`
Port int `xml:"port"`
Ports struct {
TCP int `xml:"tcp"`
RDMA string `xml:"rdma"`
} `xml:"ports"`
Pid int `xml:"pid"`
SizeTotal uint64 `xml:"sizeTotal"`
SizeFree uint64 `xml:"sizeFree"`
Device string `xml:"device"`
BlockSize int `xml:"blockSize"`
MntOptions string `xml:"mntOptions"`
FsName string `xml:"fsName"`
} `xml:"node"`
} `xml:"volume"`
} `xml:"volumes"`
} `xml:"volStatus"`
}

func VolumeStatusAllDetailXMLUnmarshall(cmdOutBuff *bytes.Buffer) (VolumeStatusXML, error) {
var vol VolumeStatusXML
b, err := ioutil.ReadAll(cmdOutBuff)
if err != nil {
log.Error(err)
return vol, err
}
xml.Unmarshal(b, &vol)
return vol, nil
}
38 changes: 38 additions & 0 deletions structs/xmlStructs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,44 @@ func TestPeerStatusXMLUnmarshall(t *testing.T) {
t.Log("gluster peer status test was successful.")
}

func TestVolumeStatusAllDetailXMLUnmarshall(t *testing.T) {
testXMLPath := "../test/gluster_volume_status_all_detail.xml"
t.Log("Test xml unmarshal for 'gluster peer status' with file: ", testXMLPath)
dat, err := ioutil.ReadFile(testXMLPath)
if err != nil {
t.Errorf("error reading testxml in Path: %v", testXMLPath)
}
volumeStatus, err := VolumeStatusAllDetailXMLUnmarshall(bytes.NewBuffer(dat))
if err != nil {
t.Error(err)
}

if volumeStatus.OpErrno != 0 {
t.Error(volumeStatus.OpErrstr)
}

for _, vol := range volumeStatus.VolStatus.Volumes {
if vol.Volume.NodeCount != 4 {
t.Errorf("nodecount mismatch %v instead of 4", vol.Volume.NodeCount)
}

for _, node := range vol.Volume.Node {
if node.BlockSize != 4096 {
t.Errorf("blockSize mismatch %v and 4096 expected", node.BlockSize)
}

}

if vol.Volume.Node[0].SizeFree != 19517558784 {
t.Errorf("SizeFree doesn't match 19517558784: %v", vol.Volume.Node[0].SizeFree)
}

if vol.Volume.Node[0].SizeTotal != 20507914240 {
t.Errorf("SizeFree doesn't match 20507914240: %v", vol.Volume.Node[0].SizeTotal)
}
}
}

func TestVolumeProfileGvInfoCumulativeXMLUnmarshall(t *testing.T) {
testXMLPath := "../test/gluster_volume_profile_gv_test_info_cumulative.xml"
t.Log("Test xml unmarshal for 'gluster volume profile gv_test info' with file: ", testXMLPath)
Expand Down
86 changes: 86 additions & 0 deletions test/gluster_volume_status_all_detail.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>0</opRet>
<opErrno>0</opErrno>
<opErrstr/>
<volStatus>
<volumes>
<volume>
<volName>gv_test</volName>
<nodeCount>4</nodeCount>
<node>
<hostname>node1.example.local</hostname>
<path>/mnt/gluster/gv_test</path>
<peerid>a049c424-bd82-4436-abd4-ef3fc37c76ba</peerid>
<status>1</status>
<port>49153</port>
<ports>
<tcp>49153</tcp>
<rdma>N/A</rdma>
</ports>
<pid>1342</pid>
<sizeTotal>20507914240</sizeTotal>
<sizeFree>19517558784</sizeFree>
<device>/dev/loop0</device>
<blockSize>4096</blockSize>
<mntOptions>rw,relatime,data=ordered</mntOptions>
<fsName>ext4</fsName>
</node>
<node>
<hostname>node2.example.local</hostname>
<path>/mnt/gluster/gv_test</path>
<peerid>f6fa44e7-5139-4f6e-8404-6d2ce7d66231</peerid>
<status>1</status>
<port>49153</port>
<ports>
<tcp>49153</tcp>
<rdma>N/A</rdma>
</ports>
<pid>1303</pid>
<sizeTotal>20507914240</sizeTotal>
<sizeFree>19517558784</sizeFree>
<device>/dev/loop0</device>
<blockSize>4096</blockSize>
<mntOptions>rw,relatime,data=ordered</mntOptions>
<fsName>ext4</fsName>
</node>
<node>
<hostname>node3.example.local</hostname>
<path>/mnt/gluster/gv_test</path>
<peerid>073c4354-f8eb-4474-95b3-c2bc235ca44d</peerid>
<status>1</status>
<port>49153</port>
<ports>
<tcp>49153</tcp>
<rdma>N/A</rdma>
</ports>
<pid>1284</pid>
<sizeTotal>20507914240</sizeTotal>
<sizeFree>19517558784</sizeFree>
<device>/dev/loop0</device>
<blockSize>4096</blockSize>
<mntOptions>rw,relatime,data=ordered</mntOptions>
<fsName>ext4</fsName>
</node>
<node>
<hostname>node4.example.local</hostname>
<path>/mnt/gluster/gv_test</path>
<peerid>1d5d9c25-211c-4db6-8fd6-274cf3774d88</peerid>
<status>1</status>
<port>49153</port>
<ports>
<tcp>49153</tcp>
<rdma>N/A</rdma>
</ports>
<pid>1312</pid>
<sizeTotal>20507914240</sizeTotal>
<sizeFree>19517566976</sizeFree>
<device>/dev/loop0</device>
<blockSize>4096</blockSize>
<mntOptions>rw,relatime,data=ordered</mntOptions>
<fsName>ext4</fsName>
</node>
</volume>
</volumes>
</volStatus>
</cliOutput>

0 comments on commit 0e16052

Please sign in to comment.