Skip to content

Commit

Permalink
Merge pull request #39 from maheshrayas/ignore-kube-saver
Browse files Browse the repository at this point in the history
feat: Ignore downscale/upscale if ignore annotation is set
  • Loading branch information
maheshrayas committed Apr 10, 2024
2 parents e450832 + d9ef0b3 commit f7a7eb2
Show file tree
Hide file tree
Showing 4 changed files with 139 additions and 61 deletions.
147 changes: 86 additions & 61 deletions src/downscaler/resource/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,80 +23,105 @@ pub struct ScalingMachinery {
}

impl ScalingMachinery {
fn should_downscale(&self) -> bool {
if let Some(annotations) = self.annotations.as_ref() {
if let Some(is_downscaled) = annotations.get("kubesaver.com/is_downscaled") {
if is_downscaled == "false" {
return true;
}
}
}
false
}

fn should_upscale(&self) -> Option<i32> {
if let Some(annotations) = self.annotations.as_ref() {
if let Some(is_downscaled) = annotations.get("kubesaver.com/is_downscaled") {
if is_downscaled == "true" {
if let Some(original_count) = annotations.get("kubesaver.com/original_count") {
if let Ok(scale_up) = original_count.parse::<i32>() {
return Some(scale_up);
}
}
}
}
}
None
}

async fn action_for_downscale(&self, c: Client) -> Result<Option<ScaledResources>, Error> {
info!("downscaling {} : {}", &self.resource_type, &self.name);
let patch_result = self
.patching(
c.clone(),
&self.original_replicas,
self.tobe_replicas,
"true",
self.scale_state.clone(),
)
.await?;
Ok(Some(patch_result))
}

async fn action_for_upscale(
&self,
c: Client,
scale_up: i32,
) -> Result<Option<ScaledResources>, Error> {
info!("upscaling {} : {}", &self.resource_type, &self.name);
let patch_result = self
.patching(
c.clone(),
&scale_up.to_string(),
Some(scale_up),
"false",
self.scale_state.clone(),
)
.await?;
Ok(Some(patch_result))
}

fn should_downscale_first_time(&self) -> bool {
self.annotations.is_none()
|| self
.annotations
.as_ref()
.map_or(true, |a| a.get("kubesaver.com/is_downscaled").is_none())
}

pub async fn scaling_machinery(
&self,
c: Client,
is_uptime: bool,
) -> Result<Option<ScaledResources>, Error> {
// check if the resource has an annotation kubesaver.com/ignore:"true"
if let Some(ignore_annotations) = self
.annotations
.as_ref()
.and_then(|a| a.get("kubesaver.com/ignore"))
{
if ignore_annotations.eq("true") {
return Ok(None);
}
}
if !is_uptime {
// check if the resource has annotations
if self.annotations.is_none()
|| self
.annotations
.to_owned()
.unwrap()
.get("kubesaver.com/is_downscaled")
.is_none()
{
// first time action
info!("downscaling {} : {}", &self.resource_type, &self.name,);
return Ok(Some(
self.patching(
if self.should_downscale_first_time() {
info!("downscaling {} : {}", &self.resource_type, &self.name);
let patch_result = self
.patching(
c.clone(),
&self.original_replicas,
self.tobe_replicas,
"true",
self.scale_state.clone(),
)
.await?,
));
} else if let Some(x) = self
.annotations
.as_ref()
.unwrap()
.get("kubesaver.com/is_downscaled")
{
// if the resources are already upscaled by the kube-saver and now its the time to be downscaled
if x == "false" {
info!("downscaling {} : {}", &self.resource_type, &self.name);
return Ok(Some(
self.patching(
c.clone(),
&self.original_replicas,
self.tobe_replicas,
"true",
self.scale_state.clone(),
)
.await?,
));
}
}
} else {
// its a uptime
// should be up and running
// check if annotation is true
let y = self.annotations.as_ref().unwrap();
if let Some(x) = y.get("kubesaver.com/is_downscaled") {
let scale_up: i32 = y
.get("kubesaver.com/original_count")
.unwrap()
.parse()
.unwrap();
if x == "true" {
info!("upscaling {} : {} ", &self.resource_type, &self.name);
// this is needed becoz the next day I want to downscale after the end time
return Ok(Some(
self.patching(
c.clone(),
&scale_up.to_string(), // after scaleup, keep the kubesaver.com/original_count as the real non-zero count.
Some(scale_up),
"false",
self.scale_state.clone(),
)
.await?,
));
}
.await?;
return Ok(Some(patch_result));
} else if self.should_downscale() {
return self.action_for_downscale(c.clone()).await;
}
} else if let Some(scale_up) = self.should_upscale() {
return self.action_for_upscale(c, scale_up).await;
}
Ok(None)
}
Expand Down
30 changes: 30 additions & 0 deletions tests/data/test-ignore-downscale-14.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
apiVersion: v1
kind: Namespace
metadata:
name: kuber14
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: kuber14
name: test-kuber14-deploy1
annotations:
kubesaver.com/ignore: "true"
labels:
app: go-app-kuber14
spec:
replicas: 2
selector:
matchLabels:
app: go-app
template:
metadata:
labels:
app: go-app
spec:
containers:
- name: go-app
image: maheshrayas/goapp:1.0
ports:
- containerPort: 8090
---
16 changes: 16 additions & 0 deletions tests/downscaler/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -348,3 +348,19 @@ async fn test4_hpa_scale_all_resources_replicas_1() {
//Deployment must be scaled back to original replicas
assert_eq!(d.spec.unwrap().replicas, Some(3));
}

#[tokio::test]
async fn test5_check_if_ignored() {
let f = File::open("tests/rules/rules14.yaml").unwrap();
let r: Rules = serde_yaml::from_reader(f).unwrap();
let client = Client::try_default()
.await
.expect("Failed to read kubeconfig");
r.process_rules(client.clone(), None, None, SCALED_STATE.clone())
.await
.ok();
// kube-saver must ignore
let api: Api<Deployment> = Api::namespaced(client.clone(), "kuber14");
let d = api.get("test-kuber14-deploy1").await.unwrap();
assert_eq!(d.spec.unwrap().replicas, Some(2));
}
7 changes: 7 additions & 0 deletions tests/rules/rules14.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
rules:
- id: rules-downscale-kuber14
uptime: Mon-Sun 22:59-23:00 Australia/Sydney
jmespath: "metadata.name == 'kuber14'"
resource:
- Namespace
replicas: 0

0 comments on commit f7a7eb2

Please sign in to comment.