Skip to content

Commit

Permalink
chore(deployment): preparation to remove objects.py
Browse files Browse the repository at this point in the history
  • Loading branch information
idan-starkware committed Dec 17, 2024
1 parent 02520dc commit 9d9e090
Show file tree
Hide file tree
Showing 12 changed files with 118 additions and 116 deletions.
1 change: 1 addition & 0 deletions config/sequencer/presets/config-batcher.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,5 @@
"strk_fee_token_address": "0x7",
"batcher_config.storage.db_config.path_prefix": "/data",
"batcher_config.storage.db_config.enforce_file_exists": false,
"validator_id" : "0x1"
}
1 change: 0 additions & 1 deletion deployments/images/base/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
# More info on Cargo Chef: https://github.com/LukeMathWalker/cargo-chef

FROM ubuntu:22.04 AS base
# WORKDIR /app

COPY scripts/install_build_tools.sh .
COPY scripts/dependencies.sh .
Expand Down
11 changes: 6 additions & 5 deletions deployments/images/sequencer/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,21 +1,22 @@
# syntax = devthefuture/dockerfile-x
#syntax = devthefuture/dockerfile-x

INCLUDE deployments/images/base/Dockerfile


# Compile the sequencer_node crate in release mode, ensuring dependencies are locked.
FROM base AS builder
WORKDIR /app
COPY . .
RUN cargo build --release --package starknet_sequencer_node
RUN cargo build --bin starknet_sequencer_node

FROM base AS sequencer

ENV ID=1000
WORKDIR /app
COPY --from=builder /target/release/starknet_sequencer_node /app/target/release/starknet_sequencer_node
COPY --from=builder /app/target/debug/starknet_sequencer_node ./target/debug/starknet_sequencer_node

# Copy sequencer config
COPY config/sequencer config/sequencer
COPY config/sequencer/default_config.json /config/sequencer/

# Create a new user "sequencer".
RUN set -ex; \
Expand All @@ -30,4 +31,4 @@ EXPOSE 8080 8081 8082
USER ${ID}

# Set the entrypoint to use tini to manage the process.
ENTRYPOINT ["tini", "--", "/app/target/release/starknet_sequencer_node"]
ENTRYPOINT ["tini", "--", "/app/target/debug/starknet_sequencer_node"]
178 changes: 88 additions & 90 deletions deployments/sequencer/app/service.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
import json
import typing

from itertools import chain
from constructs import Construct
from cdk8s import Names, ApiObjectMetadata
from cdk8s import Names
from imports import k8s
from imports.com.google import cloud as google
from imports.k8s import IngressTls

from services import topology, const

Expand All @@ -17,22 +16,23 @@ def __init__(
id: str,
*,
namespace: str,
topology: topology.ServiceTopology,
service_topology: topology.ServiceTopology,
):
super().__init__(scope, id)

self.namespace = namespace
self.label = {"app": Names.to_label_value(self, include_hash=False)}
self.topology = topology
self.node_config = topology.config.get_config()
self.host = f"{self.node.id}.{self.namespace}.sw-dev.io"
self.service_topology = service_topology
self.node_config = service_topology.config.get_merged_config()

k8s.KubeNamespace(self, "namespace", metadata=k8s.ObjectMeta(name=self.namespace))

k8s.KubeConfigMap(
self,
"configmap",
metadata=k8s.ObjectMeta(name=f"{self.node.id}-config"),
data=dict(config=json.dumps(self.topology.config.get_config())),
data=dict(config=json.dumps(self.service_topology.config.get_config())),
)

k8s.KubeService(
Expand All @@ -49,64 +49,27 @@ def __init__(
self,
"deployment",
spec=k8s.DeploymentSpec(
replicas=self.topology.deployment.replicas,
replicas=self.service_topology.deployment.replicas,
selector=k8s.LabelSelector(match_labels=self.label),
template=k8s.PodTemplateSpec(
metadata=k8s.ObjectMeta(labels=self.label),
spec=k8s.PodSpec(
security_context=k8s.PodSecurityContext(fs_group=1000),
volumes=self._get_volumes(),
containers=[
k8s.Container(
name=self.node.id,
image=container.image,
image=self.service_topology.images.get("sequencer"),
image_pull_policy="IfNotPresent",
# command=["sleep", "infinity"],
args=container.args,
args=const.CONTAINER_ARGS,
ports=self._get_container_ports(),
startup_probe=self._get_http_probe(),
readiness_probe=self._get_http_probe(),
liveness_probe=self._get_http_probe(),
volume_mounts=[
k8s.VolumeMount(
name=mount.name,
mount_path=mount.mount_path,
read_only=mount.read_only,
)
for mount in container.volume_mounts
],
volume_mounts=self._get_volume_mounts(),
)
for container in self.topology.deployment.containers
],
volumes=list(
chain(
(
(
k8s.Volume(
name=f"{self.node.id}-{volume.name}",
config_map=k8s.ConfigMapVolumeSource(
name=f"{self.node.id}-{volume.name}"
),
)
for volume in self.topology.deployment.configmap_volumes
)
if self.topology.deployment.configmap_volumes is not None
else None
),
(
(
k8s.Volume(
name=f"{self.node.id}-{volume.name}",
persistent_volume_claim=k8s.PersistentVolumeClaimVolumeSource(
claim_name=f"{self.node.id}-{volume.name}",
read_only=volume.read_only,
),
)
for volume in self.topology.deployment.pvc_volumes
)
if self.topology.deployment is not None
else None
),
)
),
),
),
),
Expand All @@ -118,37 +81,18 @@ def __init__(
metadata=k8s.ObjectMeta(
name=f"{self.node.id}-ingress",
labels=self.label,
annotations=self.topology.ingress.annotations,
annotations={
"kubernetes.io/tls-acme": "true",
"cert-manager.io/common-name": self.host,
"cert-manager.io/issue-temporary-certificate": "true",
"cert-manager.io/issuer": "letsencrypt-prod",
"acme.cert-manager.io/http01-edit-in-place": "true",
},
),
spec=k8s.IngressSpec(
ingress_class_name=self.topology.ingress.class_name,
tls=[
k8s.IngressTls(hosts=tls.hosts, secret_name=f"{self.node.id}-tls")
for tls in self.topology.ingress.tls or []
],
rules=[
k8s.IngressRule(
host=rule.host,
http=k8s.HttpIngressRuleValue(
paths=[
k8s.HttpIngressPath(
path=path.path,
path_type=path.path_type,
backend=k8s.IngressBackend(
service=k8s.IngressServiceBackend(
name=path.backend_service_name,
port=k8s.ServiceBackendPort(
number=path.backend_service_port_number
),
)
),
)
for path in rule.paths or []
]
),
)
for rule in self.topology.ingress.rules or []
],
ingress_class_name="premium-rwo",
tls=self._get_ingress_tls(),
rules=self._get_ingress_rules()
),
)

Expand All @@ -157,11 +101,11 @@ def __init__(
"pvc",
metadata=k8s.ObjectMeta(name=f"{self.node.id}-data", labels=self.label),
spec=k8s.PersistentVolumeClaimSpec(
storage_class_name=self.topology.pvc.storage_class_name,
access_modes=self.topology.pvc.access_modes,
volume_mode=self.topology.pvc.volume_mode,
storage_class_name=self.service_topology.pvc.storage_class_name,
access_modes=self.service_topology.pvc.access_modes,
volume_mode=self.service_topology.pvc.volume_mode,
resources=k8s.ResourceRequirements(
requests={"storage": k8s.Quantity.from_string(self.topology.pvc.storage)}
requests={"storage": k8s.Quantity.from_string(self.service_topology.pvc.storage)}
),
),
)
Expand All @@ -181,7 +125,7 @@ def _get_container_ports(self):
) for port in ["http_server_config.port", "monitoring_endpoint_config.port"]
]

def _get_container_resources(self):
def _get_container_resources(self): # TODO: implement method to calc resources based on config
pass

def _get_service_ports(self):
Expand Down Expand Up @@ -213,12 +157,66 @@ def _get_http_probe(
timeout_seconds=timeout_seconds,
)

def _get_ingress_rules(self):
pass
def _get_volume_mounts(self) -> typing.List[k8s.VolumeMount]:
return [
k8s.VolumeMount(
name=f"{self.node.id}-config",
mount_path="/config/sequencer/presets/",
read_only=True
),
k8s.VolumeMount(
name=f"{self.node.id}-data",
mount_path="/data",
read_only=False
)
]

def _get_ingress_paths(self):
pass
def _get_volumes(self):
return [
k8s.Volume(
name=f"{self.node.id}-config",
config_map=k8s.ConfigMapVolumeSource(
name=f"{self.node.id}-config"
)
),
k8s.Volume(
name=f"{self.node.id}-data",
persistent_volume_claim=k8s.PersistentVolumeClaimVolumeSource(
claim_name=f"{self.node.id}-data",
read_only=False
)
)
]

def _get_ingress_rules(self) -> typing.List[k8s.IngressRule]:
return [
k8s.IngressRule(
host=self.host,
http=k8s.HttpIngressRuleValue(
paths=[
k8s.HttpIngressPath(
path="/monitoring",
path_type="Prefix",
backend=k8s.IngressBackend(
service=k8s.IngressServiceBackend(
name=f"{self.node.id}-service",
port=k8s.ServiceBackendPort(
number=self._get_config_attr("monitoring_endpoint_config.port")
),
)
),
)
]
),
)
]

def _get_ingress_tls(self) -> typing.List[IngressTls]:
return [
k8s.IngressTls(
hosts=[self.host],
secret_name=f"{self.node.id}-tls"
)
]

def _get_ingress_tls(self):
pass

5 changes: 2 additions & 3 deletions deployments/sequencer/config/sequencer.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@


class SequencerDevConfig(Config):
def __init__(self, mount_path: str, config_file_path: str = ""):
def __init__(self, config_file_path: str = ""):
super().__init__(
global_config=json.loads(
open(os.path.join(CONFIG_DIR, "default_config.json"), "r").read()
Expand All @@ -19,8 +19,7 @@ def __init__(self, mount_path: str, config_file_path: str = ""):
json.loads(open(os.path.join(CONFIG_DIR, "presets", "config.json"), "r").read())
if not config_file_path
else json.loads(open(os.path.abspath(config_file_path)).read())
),
mount_path=mount_path,
)
)

def validate(self):
Expand Down
Binary file not shown.
Binary file modified deployments/sequencer/imports/k8s/_jsii/k8s@0.0.0.jsii.tgz
Binary file not shown.
6 changes: 3 additions & 3 deletions deployments/sequencer/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,10 @@ def __post_init__(self):

class SequencerNode(Chart):
def __init__(
self, scope: Construct, name: str, namespace: str, topology: topology.ServiceTopology
self, scope: Construct, name: str, namespace: str, service_topology: topology.ServiceTopology
):
super().__init__(scope, name, disable_resource_name_hashes=True, namespace=namespace)
self.service = ServiceApp(self, name, namespace=namespace, topology=topology)
self.service = ServiceApp(self, name, namespace=namespace, service_topology=service_topology)


def main():
Expand All @@ -45,7 +45,7 @@ def main():
scope=app,
name="sequencer-node",
namespace=args.namespace,
topology=system_preset,
service_topology=system_preset,
)

app.synth()
Expand Down
2 changes: 2 additions & 0 deletions deployments/sequencer/services/const.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,5 @@ class ServiceType(str, Enum):
PROBE_FAILURE_THRESHOLD = 5
PROBE_PERIOD_SECONDS = 10
PROBE_TIMEOUT_SECONDS = 5

CONTAINER_ARGS = ["--config_file", "/config/sequencer/presets/config"]
6 changes: 4 additions & 2 deletions deployments/sequencer/services/objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,16 +43,18 @@ class PersistentVolumeClaim:
class Config:
global_config: Dict[Any, Any]
config: Dict[Any, Any]
mount_path: str

def _merged_config(self) -> Dict[Any, Any]:
_config = self.global_config.copy()
_config.update(self.config)
return _config

def get_config(self):
def get_merged_config(self):
return self._merged_config()

def get_config(self):
return self.config

def validate(self):
pass

Expand Down
7 changes: 3 additions & 4 deletions deployments/sequencer/services/topology.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@

@dataclasses.dataclass
class ServiceTopology:
cluster_name: topology_helpers.cluster_name
images: topology_helpers.images

images: typing.Optional[typing.Mapping] = dataclasses.field(
default_factory=topology_helpers.get_images
)
deployment: typing.Optional[objects.Deployment] = dataclasses.field(
default_factory=topology_helpers.get_deployment
)
Expand All @@ -26,7 +26,6 @@ class ServiceTopology:
default_factory=topology_helpers.get_ingress
)


class SequencerDev(ServiceTopology):
pass

Expand Down
Loading

0 comments on commit 9d9e090

Please sign in to comment.