Skip to content

Commit

Permalink
Merge 6c749ce into 3125877
Browse files Browse the repository at this point in the history
  • Loading branch information
jbtk authored Jul 9, 2024
2 parents 3125877 + 6c749ce commit 07b218b
Show file tree
Hide file tree
Showing 6 changed files with 178 additions and 1,744 deletions.
2 changes: 1 addition & 1 deletion cluster/gce/manifests/cluster-autoscaler.manifest
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
"containers": [
{
"name": "cluster-autoscaler",
"image": "registry.k8s.io/autoscaling/cluster-autoscaler:v1.26.1",
"image": "registry.k8s.io/autoscaling/cluster-autoscaler:v1.30.0",
"livenessProbe": {
"httpGet": {
"path": "/health-check",
Expand Down
45 changes: 14 additions & 31 deletions test/e2e/autoscaling/autoscaling_timer.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package autoscaling

import (
"context"
"strings"
"time"

v1 "k8s.io/api/core/v1"
Expand All @@ -31,7 +30,6 @@ import (
admissionapi "k8s.io/pod-security-admission/api"

"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"github.com/onsi/gomega/gmeasure"
)

Expand All @@ -52,39 +50,22 @@ var _ = SIGDescribe(feature.ClusterSizeAutoscalingScaleUp, framework.WithSlow(),
})

ginkgo.Context("from 1 pod and 3 nodes to 8 pods and >=4 nodes", func() {
const nodesNum = 3 // Expect there to be 3 nodes before and after the test.
var nodeGroupName string // Set by BeforeEach, used by AfterEach to scale this node group down after the test.
var nodes *v1.NodeList // Set by BeforeEach, used by Measure to calculate CPU request based on node's sizes.
const nodesNum = 3 // Expect there to be 3 nodes before and after the test.

ginkgo.BeforeEach(func(ctx context.Context) {
// Make sure there is only 1 node group, otherwise this test becomes useless.
nodeGroups := strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",")
if len(nodeGroups) != 1 {
e2eskipper.Skipf("test expects 1 node group, found %d", len(nodeGroups))
}
nodeGroupName = nodeGroups[0]

// Make sure the node group has exactly 'nodesNum' nodes, otherwise this test becomes useless.
nodeGroupSize, err := framework.GroupSize(nodeGroupName)
framework.ExpectNoError(err)
if nodeGroupSize != nodesNum {
e2eskipper.Skipf("test expects %d nodes, found %d", nodesNum, nodeGroupSize)
}

// Make sure all nodes are schedulable, otherwise we are in some kind of a problem state.
nodes, err = e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
nodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
framework.ExpectNoError(err)
gomega.Expect(nodes.Items).To(gomega.HaveLen(nodeGroupSize), "not all nodes are schedulable")
})

ginkgo.AfterEach(func(ctx context.Context) {
// Attempt cleanup only if a node group was targeted for scale up.
// Otherwise the test was probably skipped and we'll get a gcloud error due to invalid parameters.
if len(nodeGroupName) > 0 {
// Scale down back to only 'nodesNum' nodes, as expected at the start of the test.
framework.ExpectNoError(framework.ResizeGroup(nodeGroupName, nodesNum))
framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, f.ClientSet, nodesNum, 15*time.Minute))
nodeCount := len(nodes.Items)
if nodeCount != nodesNum {
e2eskipper.Skipf("test expects %d schedulable nodes, found %d", nodesNum, nodeCount)
}
// As the last deferred cleanup ensure that the state is restored.
// AfterEach does not allow for this because it runs before other deferred
// cleanups happen, and they are blocking cluster restoring its initial size.
ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By("Waiting for scale down after test")
framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, f.ClientSet, nodeCount, 15*time.Minute))
})
})

ginkgo.It("takes less than 15 minutes", func(ctx context.Context) {
Expand All @@ -94,6 +75,8 @@ var _ = SIGDescribe(feature.ClusterSizeAutoscalingScaleUp, framework.WithSlow(),
// Calculate the CPU request of the service.
// This test expects that 8 pods will not fit in 'nodesNum' nodes, but will fit in >='nodesNum'+1 nodes.
// Make it so that 'nodesNum' pods fit perfectly per node.
nodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
framework.ExpectNoError(err)
nodeCpus := nodes.Items[0].Status.Allocatable[v1.ResourceCPU]
nodeCPUMillis := (&nodeCpus).MilliValue()
cpuRequestMillis := int64(nodeCPUMillis / nodesNum)
Expand Down
Loading

0 comments on commit 07b218b

Please sign in to comment.