Skip to content

Commit

Permalink
[feature](merge-cloud) Add several cloud metrics
Browse files Browse the repository at this point in the history
  • Loading branch information
SWJTU-ZhangLei committed Mar 21, 2024
1 parent 5fcc563 commit 61e62b1
Show file tree
Hide file tree
Showing 14 changed files with 388 additions and 138 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,8 @@
import org.apache.logging.log4j.Logger;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ForkJoinPool;

/*
* CloudTabletStatMgr is for collecting tablet(replica) statistics from backends.
Expand All @@ -46,10 +45,8 @@
public class CloudTabletStatMgr extends MasterDaemon {
private static final Logger LOG = LogManager.getLogger(CloudTabletStatMgr.class);

private ForkJoinPool taskPool = new ForkJoinPool(Runtime.getRuntime().availableProcessors());

// <(dbId, tableId) -> CloudTableStats>
private ConcurrentHashMap<Pair<Long, Long>, CloudTableStats> cloudTableStatsMap = new ConcurrentHashMap<>();
private volatile HashMap<Pair<Long, Long>, CloudTableStats> cloudTableStatsMap = new HashMap<>();

public CloudTabletStatMgr() {
super("cloud tablet stat mgr", Config.tablet_stat_update_interval_second * 1000);
Expand Down Expand Up @@ -136,7 +133,7 @@ protected void runAfterCatalogReady() {

// after update replica in all backends, update index row num
start = System.currentTimeMillis();
ConcurrentHashMap<Pair<Long, Long>, CloudTableStats> newCloudTableStatsMap = new ConcurrentHashMap<>();
HashMap<Pair<Long, Long>, CloudTableStats> newCloudTableStatsMap = new HashMap<>();
for (Long dbId : dbIds) {
Database db = Env.getCurrentInternalCatalog().getDbNullable(dbId);
if (db == null) {
Expand Down Expand Up @@ -241,7 +238,7 @@ private GetTabletStatsResponse getTabletStats(GetTabletStatsRequest request)
return response;
}

public ConcurrentHashMap<Pair<Long, Long>, CloudTableStats> getCloudTableStatsMap() {
public HashMap<Pair<Long, Long>, CloudTableStats> getCloudTableStatsMap() {
return this.cloudTableStatsMap;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,6 @@
import org.apache.doris.common.DdlException;
import org.apache.doris.common.util.MasterDaemon;
import org.apache.doris.ha.FrontendNodeType;
import org.apache.doris.metric.GaugeMetricImpl;
import org.apache.doris.metric.Metric.MetricUnit;
import org.apache.doris.metric.MetricLabel;
import org.apache.doris.metric.MetricRepo;
import org.apache.doris.resource.Tag;
import org.apache.doris.system.Backend;
Expand Down Expand Up @@ -110,7 +107,7 @@ private void checkToAddCluster(Map<String, ClusterPB> remoteClusterIdToPB, Set<S
ClusterStatus clusterStatus = remoteClusterIdToPB.get(addId).hasClusterStatus()
? remoteClusterIdToPB.get(addId).getClusterStatus() : ClusterStatus.NORMAL;
newTagMap.put(Tag.CLOUD_CLUSTER_STATUS, String.valueOf(clusterStatus));
MetricRepo.registerClusterMetrics(clusterName, clusterId);
MetricRepo.registerCloudMetrics(clusterId, clusterName);
//toAdd.forEach(i -> i.setTagMap(newTagMap));
List<Backend> toAdd = new ArrayList<>();
for (Cloud.NodeInfoPB node : remoteClusterIdToPB.get(addId).getNodesList()) {
Expand Down Expand Up @@ -474,33 +471,18 @@ private void updateCloudMetrics() {
Map<String, List<Backend>> clusterIdToBackend = cloudSystemInfoService.getCloudClusterIdToBackend();
Map<String, String> clusterNameToId = cloudSystemInfoService.getCloudClusterNameToId();
for (Map.Entry<String, String> entry : clusterNameToId.entrySet()) {
long aliveNum = 0L;
int aliveNum = 0;
List<Backend> bes = clusterIdToBackend.get(entry.getValue());
if (bes == null || bes.size() == 0) {
LOG.info("cant get be nodes by cluster {}, bes {}", entry, bes);
continue;
}
for (Backend backend : bes) {
MetricRepo.CLOUD_CLUSTER_BACKEND_ALIVE.computeIfAbsent(backend.getAddress(), key -> {
GaugeMetricImpl<Integer> backendAlive = new GaugeMetricImpl<>("backend_alive", MetricUnit.NOUNIT,
"backend alive or not");
backendAlive.addLabel(new MetricLabel("cluster_id", entry.getValue()));
backendAlive.addLabel(new MetricLabel("cluster_name", entry.getKey()));
backendAlive.addLabel(new MetricLabel("address", key));
MetricRepo.DORIS_METRIC_REGISTER.addMetrics(backendAlive);
return backendAlive;
}).setValue(backend.isAlive() ? 1 : 0);
MetricRepo.updateClusterBackendAlive(entry.getKey(), entry.getValue(),
backend.getAddress(), backend.isAlive());
aliveNum = backend.isAlive() ? aliveNum + 1 : aliveNum;
}

MetricRepo.CLOUD_CLUSTER_BACKEND_ALIVE_TOTAL.computeIfAbsent(entry.getKey(), key -> {
GaugeMetricImpl<Long> backendAliveTotal = new GaugeMetricImpl<>("backend_alive_total",
MetricUnit.NOUNIT, "backend alive num in cluster");
backendAliveTotal.addLabel(new MetricLabel("cluster_id", entry.getValue()));
backendAliveTotal.addLabel(new MetricLabel("cluster_name", key));
MetricRepo.DORIS_METRIC_REGISTER.addMetrics(backendAliveTotal);
return backendAliveTotal;
}).setValue(aliveNum);
MetricRepo.updateClusterBackendAliveTotal(entry.getKey(), entry.getValue(), aliveNum);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ public void updateCloudClusterMap(List<Backend> toAdd, List<Backend> toDel) {
if (be == null) {
be = new ArrayList<>();
clusterIdToBackend.put(clusterId, be);
MetricRepo.registerClusterMetrics(clusterName, clusterId);
MetricRepo.registerCloudMetrics(clusterId, clusterName);
}
Set<String> existed = be.stream().map(i -> i.getHost() + ":" + i.getHeartbeatPort())
.collect(Collectors.toSet());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,8 @@ public M getOrAdd(String name) {
return nameToMetric.computeIfAbsent(name, metricSupplier);
}

public Map<String, M> getMetrics() {
return nameToMetric;
}

}
110 changes: 110 additions & 0 deletions fe/fe-core/src/main/java/org/apache/doris/metric/CloudMetrics.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

package org.apache.doris.metric;

import org.apache.doris.common.Config;
import org.apache.doris.metric.Metric.MetricUnit;

import com.codahale.metrics.Histogram;
import com.codahale.metrics.MetricRegistry;

public class CloudMetrics {
protected static String CLOUD_CLUSTER_DELIMITER = "@delimiter#";
protected static AutoMappedMetric<LongCounterMetric> CLUSTER_REQUEST_ALL_COUNTER;
protected static AutoMappedMetric<LongCounterMetric> CLUSTER_QUERY_ALL_COUNTER;
protected static AutoMappedMetric<LongCounterMetric> CLUSTER_QUERY_ERR_COUNTER;

protected static AutoMappedMetric<GaugeMetricImpl<Double>> CLUSTER_REQUEST_PER_SECOND_GAUGE;
protected static AutoMappedMetric<GaugeMetricImpl<Double>> CLUSTER_QUERY_PER_SECOND_GAUGE;
protected static AutoMappedMetric<GaugeMetricImpl<Double>> CLUSTER_QUERY_ERR_RATE_GAUGE;

protected static AutoMappedMetric<Histogram> CLUSTER_QUERY_LATENCY_HISTO;

protected static AutoMappedMetric<GaugeMetricImpl<Integer>> CLUSTER_BACKEND_ALIVE;
protected static AutoMappedMetric<GaugeMetricImpl<Integer>> CLUSTER_BACKEND_ALIVE_TOTAL;

protected static void init() {
if (!Config.isCloudMode()) {
return;
}
CLUSTER_REQUEST_ALL_COUNTER = new AutoMappedMetric<>(name -> {
LongCounterMetric counter = new LongCounterMetric("request_total", MetricUnit.REQUESTS,
"total request");
MetricRepo.DORIS_METRIC_REGISTER.addMetrics(counter);
return counter;
});

CLUSTER_QUERY_ALL_COUNTER = new AutoMappedMetric<>(name -> {
LongCounterMetric counter = new LongCounterMetric("query_total", MetricUnit.REQUESTS,
"total query");
MetricRepo.DORIS_METRIC_REGISTER.addMetrics(counter);
return counter;
});

CLUSTER_QUERY_ERR_COUNTER = new AutoMappedMetric<>(name -> {
LongCounterMetric counter = new LongCounterMetric("query_err", MetricUnit.REQUESTS,
"total error query");
MetricRepo.DORIS_METRIC_REGISTER.addMetrics(counter);
return counter;
});

CLUSTER_REQUEST_PER_SECOND_GAUGE = new AutoMappedMetric<>(name -> {
GaugeMetricImpl<Double> gauge = new GaugeMetricImpl<Double>("rps", MetricUnit.NOUNIT,
"request per second");
MetricRepo.DORIS_METRIC_REGISTER.addMetrics(gauge);
return gauge;
});

CLUSTER_QUERY_PER_SECOND_GAUGE = new AutoMappedMetric<>(name -> {
GaugeMetricImpl<Double> gauge = new GaugeMetricImpl<Double>("qps", MetricUnit.NOUNIT,
"query per second");
MetricRepo.DORIS_METRIC_REGISTER.addMetrics(gauge);
return gauge;
});

CLUSTER_QUERY_ERR_RATE_GAUGE = new AutoMappedMetric<>(name -> {
GaugeMetricImpl<Double> gauge = new GaugeMetricImpl<Double>("query_err_rate", MetricUnit.NOUNIT,
"query error rate");
MetricRepo.DORIS_METRIC_REGISTER.addMetrics(gauge);
return gauge;
});

CLUSTER_BACKEND_ALIVE = new AutoMappedMetric<>(name -> {
GaugeMetricImpl<Integer> gauge = new GaugeMetricImpl<Integer>("backend_alive", MetricUnit.NOUNIT,
"backend alive or not");
MetricRepo.DORIS_METRIC_REGISTER.addMetrics(gauge);
return gauge;
});

CLUSTER_BACKEND_ALIVE_TOTAL = new AutoMappedMetric<>(name -> {
GaugeMetricImpl<Integer> gauge = new GaugeMetricImpl<Integer>("backend_alive_total", MetricUnit.NOUNIT,
"backend alive num in cluster");
MetricRepo.DORIS_METRIC_REGISTER.addMetrics(gauge);
return gauge;
});

CLUSTER_QUERY_LATENCY_HISTO = new AutoMappedMetric<>(key -> {
String[] values = key.split(CLOUD_CLUSTER_DELIMITER);
String clusterId = values[0];
String clusterName = values[1];
String metricName = MetricRegistry.name("query", "latency", "ms", "cluster_id="
+ clusterId, "cluster_name=" + clusterName);
return MetricRepo.METRIC_REGISTER.histogram(metricName);
});
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ private void setHistogramJsonMetric(StringBuilder sb, String metric, String quan
}

@Override
public void getNodeInfo() {
public void visitNodeInfo() {
if (Env.getCurrentEnv().isMaster()) {
setNodeInfo(sb, "node_info", "is_master", null, 1, false);
}
Expand All @@ -208,6 +208,11 @@ public void getNodeInfo() {
Env.getCurrentEnv().getBrokerMgr().getAllBrokers().stream().filter(b -> !b.isAlive).count(), true);
}

@Override
public void visitCloudTableStats() {
return;
}

private void setNodeInfo(StringBuilder sb, String metric, String type,
String status, long value, boolean lastMetric) {
sb.append("{\n\t\"tags\":\n\t{\n");
Expand Down
5 changes: 5 additions & 0 deletions fe/fe-core/src/main/java/org/apache/doris/metric/Metric.java
Original file line number Diff line number Diff line change
Expand Up @@ -84,5 +84,10 @@ public List<MetricLabel> getLabels() {
return labels;
}

public Metric<T> setLabels(List<MetricLabel> newLabels) {
this.labels = newLabels;
return this;
}

public abstract T getValue();
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,11 @@

package org.apache.doris.metric;

import org.apache.doris.common.Config;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TimerTask;

/*
Expand All @@ -30,6 +34,10 @@ public class MetricCalculator extends TimerTask {
private long lastRequestCounter = -1;
private long lastQueryErrCounter = -1;

private Map<String, Long> clusterLastRequestCounter = new HashMap<>();
private Map<String, Long> clusterLastQueryCounter = new HashMap<>();
private Map<String, Long> clusterLastQueryErrCounter = new HashMap<>();

@Override
public void run() {
update();
Expand All @@ -42,6 +50,7 @@ private void update() {
lastQueryCounter = MetricRepo.COUNTER_QUERY_ALL.getValue();
lastRequestCounter = MetricRepo.COUNTER_REQUEST_ALL.getValue();
lastQueryErrCounter = MetricRepo.COUNTER_QUERY_ERR.getValue();
initCloudMetrics();
return;
}

Expand All @@ -65,6 +74,7 @@ private void update() {
MetricRepo.GAUGE_QUERY_ERR_RATE.setValue(errRate < 0 ? 0.0 : errRate);
lastQueryErrCounter = currentErrCounter;

updateCloudMetrics(interval);
lastTs = currentTs;

// max tablet compaction score of all backends
Expand All @@ -77,4 +87,69 @@ private void update() {
}
MetricRepo.GAUGE_MAX_TABLET_COMPACTION_SCORE.setValue(maxCompactionScore);
}

private void initCloudMetrics() {
if (!Config.isCloudMode()) {
return;
}
Map<String, LongCounterMetric> requsetAllMetrics = CloudMetrics.CLUSTER_REQUEST_ALL_COUNTER.getMetrics();
if (requsetAllMetrics != null) {
requsetAllMetrics.forEach((clusterId, metric) -> {
clusterLastRequestCounter.put(clusterId, metric.getValue());
});
}

Map<String, LongCounterMetric> queryAllMetrics = CloudMetrics.CLUSTER_QUERY_ALL_COUNTER.getMetrics();
if (queryAllMetrics != null) {
queryAllMetrics.forEach((clusterId, metric) -> {
clusterLastQueryCounter.put(clusterId, metric.getValue());
});
}

Map<String, LongCounterMetric> queryErrMetrics = CloudMetrics.CLUSTER_QUERY_ERR_COUNTER.getMetrics();
if (queryErrMetrics != null) {
queryErrMetrics.forEach((clusterId, metric) -> {
clusterLastQueryErrCounter.put(clusterId, metric.getValue());
});
}
}

private void updateCloudMetrics(long interval) {
if (!Config.isCloudMode()) {
return;
}

Map<String, LongCounterMetric> requsetAllMetrics = CloudMetrics.CLUSTER_REQUEST_ALL_COUNTER.getMetrics();
if (requsetAllMetrics != null) {
requsetAllMetrics.forEach((clusterId, metric) -> {
double rps = (double) (metric.getValue() - clusterLastRequestCounter.getOrDefault(clusterId, 0L))
/ interval;
rps = Double.max(rps, 0);
MetricRepo.updateClusterRequestPerSecond(clusterId, rps, metric.getLabels());
clusterLastRequestCounter.replace(clusterId, metric.getValue());
});
}

Map<String, LongCounterMetric> queryAllMetrics = CloudMetrics.CLUSTER_QUERY_ALL_COUNTER.getMetrics();
if (queryAllMetrics != null) {
queryAllMetrics.forEach((clusterId, metric) -> {
double rps = (double) (metric.getValue() - clusterLastQueryCounter.getOrDefault(clusterId, 0L))
/ interval;
rps = Double.max(rps, 0);
MetricRepo.updateClusterQueryPerSecond(clusterId, rps, metric.getLabels());
clusterLastQueryCounter.replace(clusterId, metric.getValue());
});
}

Map<String, LongCounterMetric> queryErrMetrics = CloudMetrics.CLUSTER_QUERY_ERR_COUNTER.getMetrics();
if (queryErrMetrics != null) {
queryErrMetrics.forEach((clusterId, metric) -> {
double rps = (double) (metric.getValue() - clusterLastQueryErrCounter.getOrDefault(clusterId, 0L))
/ interval;
rps = Double.max(rps, 0);
MetricRepo.updateClusterQueryErrRate(clusterId, rps, metric.getLabels());
clusterLastQueryCounter.replace(clusterId, metric.getValue());
});
}
}
}
Loading

0 comments on commit 61e62b1

Please sign in to comment.