diff --git a/service_capacity_modeling/capacity_planner.py b/service_capacity_modeling/capacity_planner.py index 118ef4b..497b92a 100644 --- a/service_capacity_modeling/capacity_planner.py +++ b/service_capacity_modeling/capacity_planner.py @@ -504,10 +504,8 @@ def _plan_certain( allowed_drives.update(hardware.drives.keys()) # Get current instance object if exists - if desires.current_capacity.current_instance_type is not "": - for instance in hardware.instances.values(): - if instance.name == desires.current_capacity.current_instance_type: - desires.current_capacity.current_instance = instance + if desires.current_cluster_capacity: + desires.current_cluster_capacity.cluster_instance = hardware.instances[desires.current_cluster_capacity.cluster_instance_name] plans = [] if model.run_hardware_simulation(): diff --git a/service_capacity_modeling/interface.py b/service_capacity_modeling/interface.py index 27e354c..2424ef3 100644 --- a/service_capacity_modeling/interface.py +++ b/service_capacity_modeling/interface.py @@ -619,11 +619,11 @@ class DataShape(ExcludeUnsetModel): ) -class CurrentCapacity(ExcludeUnsetModel): - current_instance_type: str = "" - current_cluster_size: int = 0 - current_instance: Instance = None # type: ignore - cpu_utilization: Interval = certain_float(0.0) +class CurrentClusterCapacity(ExcludeUnsetModel): + cluster_instance_name: str + cluster_instance: Optional[Instance] + cluster_instance_count: int + cpu_utilization: Interval class CapacityDesires(ExcludeUnsetModel): @@ -641,7 +641,7 @@ class CapacityDesires(ExcludeUnsetModel): data_shape: DataShape = DataShape() # What is the current microarchitectural/system configuration of the system - current_capacity: CurrentCapacity = CurrentCapacity() + current_cluster_capacity: Optional[CurrentClusterCapacity] # When users are providing latency estimates, what is the typical # instance core frequency we are comparing to. Databases use i3s a lot diff --git a/service_capacity_modeling/models/org/netflix/cassandra.py b/service_capacity_modeling/models/org/netflix/cassandra.py index f5b7530..16f8d9a 100644 --- a/service_capacity_modeling/models/org/netflix/cassandra.py +++ b/service_capacity_modeling/models/org/netflix/cassandra.py @@ -78,11 +78,11 @@ def _estimate_cassandra_requirement( return the zonal capacity requirement """ # Keep half of the cores free for background work (compaction, backup, repair) - if desires.current_capacity.cpu_utilization.high is not None \ - and desires.current_capacity.current_instance is not None \ - and required_cluster_size is not None: - needed_cores = (desires.current_capacity.current_instance.cpu * required_cluster_size * - zones_per_region) * (desires.current_capacity.cpu_utilization.high / 20) + if desires.current_cluster_capacity is not None \ + and desires.current_cluster_capacity.cluster_instance is not None \ + and required_cluster_size is not None: + needed_cores = (desires.current_cluster_capacity.cluster_instance.cpu * required_cluster_size * + zones_per_region) * (desires.current_cluster_capacity.cpu_utilization.high / 20) else: needed_cores = sqrt_staffed_cores(desires) * 2 # Keep half of the bandwidth available for backup diff --git a/tests/netflix/test_cassandra.py b/tests/netflix/test_cassandra.py index 21d4a18..80c7709 100644 --- a/tests/netflix/test_cassandra.py +++ b/tests/netflix/test_cassandra.py @@ -1,5 +1,5 @@ from service_capacity_modeling.capacity_planner import planner -from service_capacity_modeling.interface import AccessConsistency, CurrentCapacity +from service_capacity_modeling.interface import AccessConsistency, CurrentClusterCapacity from service_capacity_modeling.interface import CapacityDesires from service_capacity_modeling.interface import certain_float from service_capacity_modeling.interface import certain_int @@ -310,9 +310,9 @@ def test_plan_certain(): """ worn_desire = CapacityDesires( service_tier=1, - current_capacity=CurrentCapacity( - current_instance_type="i4i.8xlarge", - current_cluster_size=8, + current_cluster_capacity=CurrentClusterCapacity( + cluster_instance_name="i4i.8xlarge", + cluster_instance_count=8, cpu_utilization=Interval( low=10.12, mid=13.2, high=14.194801291058118, confidence=1 ), @@ -348,7 +348,6 @@ def test_plan_certain(): "required_cluster_size": 8, }, ) - print(cap_plan) lr_clusters = cap_plan[0].candidate_clusters.zonal[0] assert lr_clusters.count == 8