Skip to content

Commit

Permalink
Suppress extraneous updates of node status runtime readiness with no …
Browse files Browse the repository at this point in the history
…status change (#160)

* frugal node status runtime readiness updates

* refactored runtime service readiness setter to make it consistent with other setters

* initialize runtime condition only if runtime status is probed

* initialize runtime conditions with unknown readiness

* when initializing, the order of runtime conditions matters - vm first, container next

Co-authored-by: Howell Chen <howell@example.test>
Co-authored-by: Xiaoning Ding <xiaoning.ding@futurewei.com>
  • Loading branch information
3 people authored Apr 5, 2020
1 parent 096725b commit ede0558
Show file tree
Hide file tree
Showing 2 changed files with 154 additions and 69 deletions.
125 changes: 56 additions & 69 deletions pkg/kubelet/nodestatus/setters.go
Original file line number Diff line number Diff line change
Expand Up @@ -536,99 +536,86 @@ func RuntimeServiceCondition(nowFunc func() time.Time, // typically Kubelet.cloc
) Setter {
return func(node *v1.Node) error {
currentTime := metav1.NewTime(nowFunc())
var containerRuntimeCondition *v1.NodeCondition
var vmRuntimeCondition *v1.NodeCondition

// Check if node runtime ready condition already exists and if it does, just pick it up for update.
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == v1.NodeContainerRuntimeReady {
containerRuntimeCondition = &node.Status.Conditions[i]
}
if node.Status.Conditions[i].Type == v1.NodeVmRuntimeReady {
vmRuntimeCondition = &node.Status.Conditions[i]
}
}

newContainerCondition := false
newVmCondition := false

// If the NodeMemoryPressure condition doesn't exist, create one
if vmRuntimeCondition == nil {
vmRuntimeCondition = &v1.NodeCondition{
Type: v1.NodeVmRuntimeReady,
Status: v1.ConditionUnknown,
}
newVmCondition = true
}

if containerRuntimeCondition == nil {
containerRuntimeCondition = &v1.NodeCondition{
Type: v1.NodeContainerRuntimeReady,
Status: v1.ConditionUnknown,
}
newContainerCondition = true
}

// Update the heartbeat time
containerRuntimeCondition.LastHeartbeatTime = currentTime
vmRuntimeCondition.LastHeartbeatTime = currentTime

runtimeStatuses, err := runtimeServiceStateFunc()
if err != nil {
return err
}

// todo: we may need to lower log level to reduce unimportant logging data in production
klog.Infof("runtime service status map: %v", runtimeStatuses)

// get the runtime status by workload types
// get the runtime status of container & vm
var containerRuntimeStatus, vmRuntimeStatus map[string]bool
for workloadType, runtimeServicesStatus := range runtimeStatuses {
klog.Infof("runtime service [%s] map: [%v]", workloadType, runtimeServicesStatus)
switch {
case workloadType == "container":
containerRuntimeCondition = getCurrentRuntimeReadiness(containerRuntimeCondition, workloadType, runtimeServicesStatus,
recordEventFunc, currentTime)

containerRuntimeStatus = runtimeServicesStatus
case workloadType == "vm":
vmRuntimeCondition = getCurrentRuntimeReadiness(vmRuntimeCondition, workloadType, runtimeServicesStatus,
recordEventFunc, currentTime)
vmRuntimeStatus = runtimeServicesStatus
}
}

if newVmCondition {
vmRuntimeCondition.LastTransitionTime = currentTime
node.Status.Conditions = append(node.Status.Conditions, *vmRuntimeCondition)
}
if newContainerCondition {
containerRuntimeCondition.LastTransitionTime = currentTime
node.Status.Conditions = append(node.Status.Conditions, *containerRuntimeCondition)
subConditionSetter := func(node *v1.Node, conditionType v1.NodeConditionType, workloadType string, runtimeStatus map[string]bool) {
var condition *v1.NodeCondition

// Check if node runtime ready condition already exists and if it does, just pick it up for update.
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == conditionType {
condition = &node.Status.Conditions[i]
}
}

newCondition := false
// If the specified condition doesn't exist, create one
if condition == nil {
condition = &v1.NodeCondition{
Type: conditionType,
Status: v1.ConditionUnknown,
LastTransitionTime: currentTime,
}

newCondition = true
}

// Update the heartbeat time
condition.LastHeartbeatTime = currentTime

if runtimeStatus != nil {
if runtimeIsReady(runtimeStatus) {
if condition.Status != v1.ConditionTrue {
condition.Status = v1.ConditionTrue
condition.Reason = fmt.Sprintf("At least one %s runtime is ready", workloadType)
condition.LastTransitionTime = currentTime
recordEventFunc(v1.EventTypeNormal, fmt.Sprintf("%s is ready", conditionType))
}
} else if condition.Status != v1.ConditionFalse {
condition.Status = v1.ConditionFalse
condition.Status = v1.ConditionFalse
condition.Reason = fmt.Sprintf("None of %s runtime is ready", workloadType)
condition.LastTransitionTime = currentTime
recordEventFunc(v1.EventTypeNormal, fmt.Sprintf("%s is not ready", conditionType))
}
}

if newCondition {
node.Status.Conditions = append(node.Status.Conditions, *condition)
}
}

subConditionSetter(node, v1.NodeVmRuntimeReady, "vm", vmRuntimeStatus)
subConditionSetter(node, v1.NodeContainerRuntimeReady, "container", containerRuntimeStatus)
return nil
}
}

func getCurrentRuntimeReadiness(runtimeCondition *v1.NodeCondition, workloadType string,
runtimeServiceStatus map[string]bool, recordEventFunc func(eventType, event string),
currentTime metav1.Time) *v1.NodeCondition {
statusSet := false
func runtimeIsReady(runtimeServiceStatus map[string]bool) bool {
for _, status := range runtimeServiceStatus {
if status == true {
runtimeCondition.Status = v1.ConditionTrue
runtimeCondition.Reason = fmt.Sprintf("At least one %s runtime is ready", workloadType)
recordEventFunc(v1.EventTypeNormal, fmt.Sprintf("%s is ready", runtimeCondition.Type))
statusSet = true
break
return true
}
}

if statusSet != true {
runtimeCondition.Status = v1.ConditionFalse
runtimeCondition.Reason = fmt.Sprintf("None of %s runtime is ready", workloadType)
recordEventFunc(v1.EventTypeNormal, fmt.Sprintf("%s is not ready", runtimeCondition.Type))
}

runtimeCondition.LastTransitionTime = currentTime

return runtimeCondition
return false
}

// MemoryPressureCondition returns a Setter that updates the v1.NodeMemoryPressure condition on the node.
Expand Down
98 changes: 98 additions & 0 deletions pkg/kubelet/nodestatus/setters_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,93 @@ const (
testKubeletHostname = "127.0.0.1"
)



func TestRuntimeServiceCondition(t *testing.T) {
zeroTime := time.Time{}
checkTime := time.Date(2019, 10, 24, 0, 0, 0, 0, time.UTC)
recordEventFunc := func(eventType, event string) {}
nowFunc := func() time.Time { return checkTime }

cases := []struct{
desc string
node *v1.Node
runtimeServiceStateFunc func() (map[string]map[string]bool, error)
expectedConditions []v1.NodeCondition
}{
{
desc: "fresh empty node status should get unknown conditions",
node: &v1.Node{
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
},
},
},
runtimeServiceStateFunc: func() (map[string]map[string]bool, error) {
return map[string] map[string]bool{
}, nil
},
expectedConditions: []v1.NodeCondition{
makeRuntimeServiceCondition("VmRuntimeReady", v1.ConditionUnknown, "", "", checkTime, checkTime),
makeRuntimeServiceCondition("ContainerRuntimeReady", v1.ConditionUnknown, "", "", checkTime, checkTime),
},
},
{
desc: "condition should keep the same except for LastHeartbeatTime",
node: &v1.Node{
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
makeRuntimeServiceCondition("VmRuntimeReady", v1.ConditionFalse, "test runtime turned off", "", zeroTime, zeroTime),
makeRuntimeServiceCondition("ContainerRuntimeReady", v1.ConditionTrue, "test runtime turned on", "", zeroTime, zeroTime),
},
},
},
runtimeServiceStateFunc: func() (map[string]map[string]bool, error) {
return map[string] map[string]bool{
"container": { "fake-container": true },
"vm": {"fake-vm": false},
}, nil
},
expectedConditions: []v1.NodeCondition{
makeRuntimeServiceCondition("VmRuntimeReady", v1.ConditionFalse, "test runtime turned off", "", zeroTime, checkTime),
makeRuntimeServiceCondition("ContainerRuntimeReady", v1.ConditionTrue, "test runtime turned on", "", zeroTime, checkTime),
},
},
{
desc: "condition should all change",
node: &v1.Node{
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
makeRuntimeServiceCondition("ContainerRuntimeReady", v1.ConditionTrue, "test runtime turned on", "", zeroTime, zeroTime),
makeRuntimeServiceCondition("VmRuntimeReady", v1.ConditionFalse, "test runtime turned off", "", zeroTime, zeroTime),
},
},
},
runtimeServiceStateFunc: func() (map[string]map[string]bool, error) {
return map[string] map[string]bool{
"container": { "fake-container": false },
"vm": {"fake-vm": true},
}, nil
},
expectedConditions: []v1.NodeCondition{
makeRuntimeServiceCondition("ContainerRuntimeReady", v1.ConditionFalse, "None of container runtime is ready", "", checkTime, checkTime),
makeRuntimeServiceCondition("VmRuntimeReady", v1.ConditionTrue, "At least one vm runtime is ready", "", checkTime, checkTime),
},
},
}

for _, c := range cases {
t.Run(c.desc, func(t *testing.T) {
setter := RuntimeServiceCondition(nowFunc, c.runtimeServiceStateFunc, recordEventFunc)
if err := setter(c.node); err != nil {
t.Fatalf("unexpected error: #{err}")
}

assert.True(t, apiequality.Semantic.DeepEqual(c.expectedConditions, c.node.Status.Conditions), "%s", diff.ObjectDiff(c.expectedConditions, c.node.Status.Conditions))
})
}
}

// TODO(mtaufen): below is ported from the old kubelet_node_status_test.go code, potentially add more test coverage for NodeAddress setter in future
func TestNodeAddress(t *testing.T) {
cases := []struct {
Expand Down Expand Up @@ -1717,3 +1804,14 @@ func makeDiskPressureCondition(pressure bool, transition, heartbeat time.Time) *
LastHeartbeatTime: metav1.NewTime(heartbeat),
}
}

func makeRuntimeServiceCondition(typ v1.NodeConditionType, status v1.ConditionStatus, reason, message string, transition, heartbeat time.Time) v1.NodeCondition {
return v1.NodeCondition{
Type: typ,
Status: status,
Reason: reason,
Message: message,
LastTransitionTime: metav1.NewTime(transition),
LastHeartbeatTime: metav1.NewTime(heartbeat),
}
}

0 comments on commit ede0558

Please sign in to comment.