Skip to content

Commit

Permalink
✨ feat(production): Add ELK stack for k8s
Browse files Browse the repository at this point in the history
  • Loading branch information
bmd1905 committed Sep 3, 2024
1 parent a2cf7cb commit 393738e
Show file tree
Hide file tree
Showing 10 changed files with 318 additions and 8 deletions.
34 changes: 33 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -476,6 +476,38 @@ Or, you can manually check the Discord channel.

This setup provides comprehensive monitoring capabilities for your Kubernetes cluster. With Prometheus collecting metrics and Grafana visualizing them, you can effectively track performance, set up alerts for potential issues, and gain valuable insights into your infrastructure and applications.

### Logging with Fluent Bit + ElaSticsearch + Kibana

First, create a namepsace for logging:

```bash
kubectl create ns logging
```

**1. Install Fluent Bit**

```bash
helm upgrade --install -f ./deployments/monitoring/fluent-bit.expanded.yaml fluent-bit ./deployments/monitoring/fluent-bit -n logging
```


**2. Install Elasticsearch**

```bash
helm upgrade --install -f ./deployments/monitoring/elasticsearch.expanded.yaml elasticsearch ./deployments/monitoring/elasticsearch -n logging
```

**3. Install Kibana**

```bash
helm upgrade --install -f ./deployments/monitoring/kibana.expanded.yaml kibana ./deployments/monitoring/kibana -n logging
```

```bash
kubectl apply -f ./deployments/monitoring/kibana-simple
```


## Contributing
We welcome contributions to PromptAlchemy! Please see our CONTRIBUTING.md for more information on how to get started.

Expand All @@ -494,4 +526,4 @@ If you use PromptAlchemy in your research, please cite it as follows:
```

## Contact
For questions, issues, or collaborations, please open an issue on our GitHub repository or contact the maintainers directly.
For questions, issues, or collaborations, please open an issue on our GitHub repository or contact the maintainers directly.
6 changes: 3 additions & 3 deletions cluster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -301,9 +301,9 @@ main() {
deploy_nginx_ingress
configure_api_key_secret
grant_permissions
deploy_redis
deploy_litellm
deploy_open_webui
# deploy_redis
# deploy_litellm
# deploy_open_webui

success "Cluster setup complete! Access the Open WebUI at http://$EXTERNAL_IP.nip.io"
}
Expand Down
18 changes: 18 additions & 0 deletions deployments/ELK/elastic.expanded.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
replicas: 1
minimumMasterNodes: 1
resouces:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 1000m
memory: 2Gi
volumeClaimTemplate:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 100Gi
http:
enabled: true
tls:
enabled: true
19 changes: 19 additions & 0 deletions deployments/ELK/filebeat.expanded.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
daemonset:
filebeatConfig:
filebeat.yml: |
filebeat.inputs:
- type: container
paths:
- /var/log/containers/*.log
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
output.logstash:
hosts: ["elk-logstash-logstash-headless.logging.svc.cluster.local:5044"]
timeout: 15
logging.level: info
12 changes: 12 additions & 0 deletions deployments/ELK/kibana.expanded.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
elasticsearchHosts: "https://elasticsearch-master:9200"
replicas: 1
resources:
requests:
cpu: "200m"
memory: "200Mi"
limits:
cpu: "1000m"
memory: "2Gi"
kibanaConfig:
kibana.yml: |
elasticsearch.ssl.verificationMode: none
165 changes: 165 additions & 0 deletions deployments/ELK/kube-logging.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
Folder Structure
--------------------------------------------------
./
filebeat.expanded.yaml
run.sh
elastic.expanded.yaml
logstash.expanded.yaml
kibana.expanded.yaml


File Contents
--------------------------------------------------


./filebeat.expanded.yaml
File type: .yaml
daemonset:
filebeatConfig:
filebeat.yml: |
filebeat.inputs:
- type: container
paths:
- /var/log/containers/*.log
processors:
- add_kubernetes_metadata:
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"

output.logstash:
hosts: ["elk-logstash-logstash-headless.logging.svc.cluster.local:5044"]
timeout: 15

logging.level: info

--------------------------------------------------
File End
--------------------------------------------------


./run.sh
File type: .sh
#!/bin/bash

# Create namespace and switch context
kubectl create ns logging
kubens logging

# Install Elasticsearch
helm install elk-elasticsearch elastic/elasticsearch -f elastic.expanded.yaml --namespace logging --create-namespace

# Wait for Elasticsearch to be ready
echo "Waiting for Elasticsearch to be ready..."
kubectl wait --for=condition=ready pod -l app=elasticsearch-master --timeout=300s

# Create a secret for Logstash to access Elasticsearch
kubectl create secret generic logstash-elasticsearch-credentials \
--from-literal=username=elastic \
--from-literal=password=$(kubectl get secrets --namespace=logging elasticsearch-master-credentials -ojsonpath='{.data.password}' | base64 -d)

# Install Kibana
helm install elk-kibana elastic/kibana -f kibana.expanded.yaml

# Install Logstash
helm install elk-logstash elastic/logstash -f logstash.expanded.yaml

# Install Filebeat
helm install elk-filebeat elastic/filebeat -f filebeat.expanded.yaml

echo "ELK stack installation complete."
echo "Elasticsearch credentials are stored in the 'logstash-elasticsearch-credentials' secret"

--------------------------------------------------
File End
--------------------------------------------------


./elastic.expanded.yaml
File type: .yaml
replicas: 1
minimumMasterNodes: 1
resouces:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 1000m
memory: 2Gi
volumeClaimTemplate:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 100Gi
http:
enabled: true
tls:
enabled: true

--------------------------------------------------
File End
--------------------------------------------------


./logstash.expanded.yaml
File type: .yaml
logstashConfig:
logstash.yml: |
http.host: 0.0.0.0
xpack.monitoring.enabled: false

logstashPipeline:
logstash.conf: |
input {
beats {
port => 5044
}
}
output {
elasticsearch {
hosts => ["https://elasticsearch-master-headless.logging.svc.cluster.local:9200"]
manage_template => false
index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
document_type => "%{[@metadata][type]}"
ssl => true
ssl_certificate_verification => false
user => '${ELASTICSEARCH_USERNAME}'
password => '${ELASTICSEARCH_PASSWORD}'
}
}
extraEnvs:
- name: ELASTICSEARCH_USERNAME
valueFrom:
secretKeyRef:
name: logstash-elasticsearch-credentials
key: username
- name: ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: logstash-elasticsearch-credentials
key: password

--------------------------------------------------
File End
--------------------------------------------------


./kibana.expanded.yaml
File type: .yaml
elasticsearchHosts: "https://elasticsearch-master:9200"
replicas: 1
resources:
requests:
cpu: "200m"
memory: "200Mi"
limits:
cpu: "1000m"
memory: "2Gi"
kibanaConfig:
kibana.yml: |
elasticsearch.ssl.verificationMode: none

--------------------------------------------------
File End
--------------------------------------------------
35 changes: 35 additions & 0 deletions deployments/ELK/logstash.expanded.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
logstashConfig:
logstash.yml: |
http.host: 0.0.0.0
xpack.monitoring.enabled: false
logstashPipeline:
logstash.conf: |
input {
beats {
port => 5044
}
}
output {
elasticsearch {
hosts => ["https://elasticsearch-master-headless.logging.svc.cluster.local:9200"]
manage_template => false
index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
document_type => "%{[@metadata][type]}"
ssl => true
ssl_certificate_verification => false
user => '${ELASTICSEARCH_USERNAME}'
password => '${ELASTICSEARCH_PASSWORD}'
}
}
extraEnvs:
- name: ELASTICSEARCH_USERNAME
valueFrom:
secretKeyRef:
name: logstash-elasticsearch-credentials
key: username
- name: ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: logstash-elasticsearch-credentials
key: password
29 changes: 29 additions & 0 deletions deployments/ELK/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
#!/bin/bash

# Create namespace and switch context
kubectl create ns logging
kubens logging

# Install Elasticsearch
helm install elk-elasticsearch elastic/elasticsearch -f elastic.expanded.yaml --namespace logging --create-namespace

# Wait for Elasticsearch to be ready
echo "Waiting for Elasticsearch to be ready..."
kubectl wait --for=condition=ready pod -l app=elasticsearch-master --timeout=300s

# Create a secret for Logstash to access Elasticsearch
kubectl create secret generic logstash-elasticsearch-credentials \
--from-literal=username=elastic \
--from-literal=password=$(kubectl get secrets --namespace=logging elasticsearch-master-credentials -ojsonpath='{.data.password}' | base64 -d)

# Install Kibana
helm install elk-kibana elastic/kibana -f kibana.expanded.yaml

# Install Logstash
helm install elk-logstash elastic/logstash -f logstash.expanded.yaml

# Install Filebeat
helm install elk-filebeat elastic/filebeat -f filebeat.expanded.yaml

echo "ELK stack installation complete."
echo "Elasticsearch credentials are stored in the 'logstash-elasticsearch-credentials' secret"
4 changes: 2 additions & 2 deletions iac/terraform/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,11 @@ resource "google_container_cluster" "primary" {
enable_autopilot = false

// Specify the initial number of nodes
initial_node_count = 2
initial_node_count = 3

// Node configuration
node_config {
machine_type = "e2-highcpu-4" // 4 vCPUs, 4 GB RAM
machine_type = "e2-standard-2" // 2 vCPUs, 8 GB RAM
disk_size_gb = 30
}
}
4 changes: 2 additions & 2 deletions open-webui/kubernetes/manifest/base/webui-ingress.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@ metadata:
spec:
# tls:
# - hosts:
# - 34.142.157.145.nip.io
# - 0.0.0.0.nip.io
# secretName: webui-tls
rules:
- host: 34.142.157.145.nip.io
- host: 0.0.0.0.nip.io
http:
paths:
- path: /
Expand Down

0 comments on commit 393738e

Please sign in to comment.