diff --git a/infrastructure/cluster/flux-system/kustomization.yaml b/infrastructure/cluster/flux-system/kustomization.yaml index 986fa64..f3b734f 100644 --- a/infrastructure/cluster/flux-system/kustomization.yaml +++ b/infrastructure/cluster/flux-system/kustomization.yaml @@ -4,3 +4,4 @@ resources: - gotk-components.yaml - gotk-sync.yaml - platform.yaml +- services.yaml diff --git a/infrastructure/cluster/flux-system/services.yaml b/infrastructure/cluster/flux-system/services.yaml new file mode 100644 index 0000000..cb11b20 --- /dev/null +++ b/infrastructure/cluster/flux-system/services.yaml @@ -0,0 +1,14 @@ +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: services + namespace: flux-system +spec: + interval: 10m0s + path: ./infrastructure/services + prune: true + dependsOn: + - name: platform + sourceRef: + kind: GitRepository + name: flux-system \ No newline at end of file diff --git a/infrastructure/services/openai-chat-service/deployment.yaml b/infrastructure/services/openai-chat-service/deployment.yaml new file mode 100644 index 0000000..a27a53f --- /dev/null +++ b/infrastructure/services/openai-chat-service/deployment.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: openai-chat-service + namespace: default + labels: + app: openai-chat-service +spec: + replicas: 2 + selector: + matchLabels: + app: openai-chat-service + template: + metadata: + labels: + app: openai-chat-service + spec: + containers: + - name: openai-chat-service + image: ghcr.io/lreimer/k8s-native-java-ai-openai-chat-service:main + imagePullPolicy: Always + ports: + - name: http + containerPort: 8080 + - name: management + containerPort: 9000 + resources: + requests: + memory: "128Mi" + cpu: "250m" + limits: + memory: "256Mi" + cpu: "500m" + probes: + readinessProbe: + httpGet: + path: /q/health/ready + port: management + initialDelaySeconds: 10 + periodSeconds: 10 + livenessProbe: + httpGet: + path: /q/health/live + port: management + initialDelaySeconds: 10 + periodSeconds: 10 + env: + - name: QUARKUS_LANGCHAIN4J_OPENAI_API_KEY + valueFrom: + secretKeyRef: + name: openai-api-key + key: OPENAI_API_KEY + # - name: QUARKUS_LANGCHAIN4J_OPENAI_BASE_URL + # value: "http://openai-proxy-service.default.svc.cluster.local:10000" + \ No newline at end of file diff --git a/infrastructure/services/openai-chat-service/kustomization.yaml b/infrastructure/services/openai-chat-service/kustomization.yaml new file mode 100644 index 0000000..dffc32f --- /dev/null +++ b/infrastructure/services/openai-chat-service/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- deployment.yaml +- service.yaml diff --git a/infrastructure/services/openai-chat-service/service.yaml b/infrastructure/services/openai-chat-service/service.yaml new file mode 100644 index 0000000..fa724be --- /dev/null +++ b/infrastructure/services/openai-chat-service/service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: openai-chat-service + namespace: default +spec: + selector: + app: openai-chat-service + ports: + - protocol: TCP + port: 8080 + targetPort: http + type: LoadBalancer \ No newline at end of file