Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Run e2e test in parallel #767

Open
wants to merge 7 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 10 additions & 4 deletions e2e/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,13 @@ KIND_VERSION = 0.23.0
KUBERNETES_VERSION = 1.31.0
CERT_MANAGER_VERSION = 1.15.3
MYSQL_VERSION = 8.4.3
GINKGO_VERSION = 2.20.2

KIND := $(dir $(shell pwd))/bin/kind
KUBECTL := $(dir $(shell pwd))/bin/kubectl
KUSTOMIZE := $(dir $(shell pwd))/bin/kustomize
KUBECTL_MOCO := $(dir $(shell pwd))/bin/kubectl-moco
GINKGO := $(dir $(shell pwd))/bin/ginkgo
KUBECONFIG := $(shell pwd)/.kubeconfig
export MYSQL_VERSION KUBECTL KUBECONFIG

Expand Down Expand Up @@ -53,14 +55,14 @@ endif
$(KUBECTL) wait --timeout=90s --for=condition=Ready --all pods

.PHONY: test
test:
test: $(GINKGO)
env PATH="$$(pwd)/../bin:$$PATH" RUN_E2E=1 \
go test -v -race -timeout 90m . -ginkgo.v -ginkgo.fail-fast -ginkgo.randomize-all -ginkgo.timeout 90m
$(GINKGO) -v --procs 5 --fail-fast --randomize-all --timeout 90m .

.PHONY: test-upgrade
test-upgrade:
test-upgrade: $(GINKGO)
env PATH=$$(pwd)/../bin:$$PATH RUN_E2E=1 UPGRADE=1 \
go test -v -race -timeout 15m . -ginkgo.v -ginkgo.fail-fast -ginkgo.randomize-all
$(GINKGO) -v --procs 5 --fail-fast --randomize-all --timeout 90m .

.PHONY: logs
logs:
Expand Down Expand Up @@ -92,3 +94,7 @@ $(KUSTOMIZE):
$(KUBECTL_MOCO):
mkdir -p ../bin
cd ..; GOBIN=$$(pwd)/bin go install ./cmd/kubectl-moco

$(GINKGO):
mkdir -p ../bin
cd ..; GOBIN=$$(pwd)/bin go install github.com/onsi/ginkgo/v2/ginkgo@v$(GINKGO_VERSION)
45 changes: 24 additions & 21 deletions e2e/backup_with_env_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

//go:embed testdata/makebucket_with_env.yaml
var makeBucketWithEnvYAML string

//go:embed testdata/backup_with_env.yaml
var backupWithEnvYAML string

Expand All @@ -31,66 +34,66 @@ var _ = Context("backup with ObjectBucketName is set in environments variables",
var restorePoint time.Time

It("should create a bucket", func() {
kubectlSafe([]byte(makeBucketYAML), "apply", "-f", "-")
kubectlSafe([]byte(makeBucketWithEnvYAML), "apply", "-f", "-")
Eventually(func(g Gomega) {
out, err := kubectl(nil, "get", "jobs", "make-bucket", "-o", "json")
out, err := kubectl(nil, "get", "jobs", "make-bucket-with-env", "-o", "json")
g.Expect(err).NotTo(HaveOccurred())
job := &batchv1.Job{}
err = json.Unmarshal(out, job)
g.Expect(err).NotTo(HaveOccurred())
condComplete, err := getJobCondition(job, batchv1.JobComplete)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(condComplete.Status).To(Equal(corev1.ConditionTrue), "make-bucket has not been finished")
g.Expect(condComplete.Status).To(Equal(corev1.ConditionTrue), "make-bucket-with-env has not been finished")
}).Should(Succeed())
})

It("should construct a source cluster", func() {
kubectlSafe(fillTemplate(backupWithEnvYAML), "apply", "-f", "-")
Eventually(func(g Gomega) {
cluster, err := getCluster("backup", "source")
cluster, err := getCluster("backup-with-env", "source")
g.Expect(err).NotTo(HaveOccurred())
condHealthy, err := getClusterCondition(cluster, mocov1beta2.ConditionHealthy)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(condHealthy.Status).To(Equal(metav1.ConditionTrue))
}).Should(Succeed())

kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-writable", "source", "--",
kubectlSafe(nil, "moco", "-n", "backup-with-env", "mysql", "-u", "moco-writable", "source", "--",
"-e", "CREATE DATABASE test")
kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-writable", "source", "--",
kubectlSafe(nil, "moco", "-n", "backup-with-env", "mysql", "-u", "moco-writable", "source", "--",
"-D", "test", "-e", "CREATE TABLE t (id INT NOT NULL AUTO_INCREMENT, data VARCHAR(32) NOT NULL, PRIMARY KEY (id), KEY key1 (data), KEY key2 (data, id)) ENGINE=InnoDB")
kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-writable", "source", "--",
kubectlSafe(nil, "moco", "-n", "backup-with-env", "mysql", "-u", "moco-writable", "source", "--",
"-D", "test", "--init_command=SET autocommit=1", "-e", "INSERT INTO t (data) VALUES ('aaa')")
})

It("should take a full dump", func() {
kubectlSafe(nil, "-n", "backup", "create", "job", "--from=cronjob/moco-backup-source", "backup-1")
kubectlSafe(nil, "-n", "backup-with-env", "create", "job", "--from=cronjob/moco-backup-source", "backup-with-env-1")
Eventually(func(g Gomega) {
out, err := kubectl(nil, "-n", "backup", "get", "jobs", "backup-1", "-o", "json")
out, err := kubectl(nil, "-n", "backup-with-env", "get", "jobs", "backup-with-env-1", "-o", "json")
g.Expect(err).NotTo(HaveOccurred())
job := &batchv1.Job{}
err = json.Unmarshal(out, job)
g.Expect(err).NotTo(HaveOccurred())
condComplete, err := getJobCondition(job, batchv1.JobComplete)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(condComplete.Status).To(Equal(corev1.ConditionTrue), "backup-1 has not been finished")
g.Expect(condComplete.Status).To(Equal(corev1.ConditionTrue), "backup-with-env-1 has not been finished")
}).Should(Succeed())
})

It("should take an incremental backup", func() {
kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-writable", "source", "--",
kubectlSafe(nil, "moco", "-n", "backup-with-env", "mysql", "-u", "moco-writable", "source", "--",
"-D", "test", "--init_command=SET autocommit=1", "-e", "INSERT INTO t (data) VALUES ('bbb')")
time.Sleep(1100 * time.Millisecond)
restorePoint = time.Now().UTC()
time.Sleep(1100 * time.Millisecond)
kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-admin", "source", "--",
kubectlSafe(nil, "moco", "-n", "backup-with-env", "mysql", "-u", "moco-admin", "source", "--",
"-D", "test", "--init_command=SET autocommit=1", "-e", "FLUSH LOCAL BINARY LOGS")
kubectlSafe(nil, "moco", "-n", "backup", "mysql", "-u", "moco-writable", "source", "--",
kubectlSafe(nil, "moco", "-n", "backup-with-env", "mysql", "-u", "moco-writable", "source", "--",
"-D", "test", "--init_command=SET autocommit=1", "-e", "INSERT INTO t (data) VALUES ('ccc')")
time.Sleep(100 * time.Millisecond)

kubectlSafe(nil, "-n", "backup", "create", "job", "--from=cronjob/moco-backup-source", "backup-2")
kubectlSafe(nil, "-n", "backup-with-env", "create", "job", "--from=cronjob/moco-backup-source", "backup-2")
Eventually(func(g Gomega) {
out, err := kubectl(nil, "-n", "backup", "get", "jobs", "backup-2", "-o", "json")
out, err := kubectl(nil, "-n", "backup-with-env", "get", "jobs", "backup-2", "-o", "json")
g.Expect(err).NotTo(HaveOccurred())
job := &batchv1.Job{}
err = json.Unmarshal(out, job)
Expand All @@ -100,13 +103,13 @@ var _ = Context("backup with ObjectBucketName is set in environments variables",
g.Expect(condComplete.Status).To(Equal(corev1.ConditionTrue), "backup-2 has not been finished")
}).Should(Succeed())

cluster, err := getCluster("backup", "source")
cluster, err := getCluster("backup-with-env", "source")
Expect(err).NotTo(HaveOccurred())
Expect(cluster.Status.Backup.BinlogSize).NotTo(Equal(int64(0)))
})

It("should destroy the source then restore the backup data", func() {
kubectlSafe(nil, "-n", "backup", "delete", "mysqlclusters", "source")
kubectlSafe(nil, "-n", "backup-with-env", "delete", "mysqlclusters", "source")

tmpl, err := template.New("").Parse(restoreWithEnvYAML)
Expect(err).NotTo(HaveOccurred())
Expand All @@ -122,22 +125,22 @@ var _ = Context("backup with ObjectBucketName is set in environments variables",

kubectlSafe(buf.Bytes(), "apply", "-f", "-")
Eventually(func(g Gomega) {
cluster, err := getCluster("backup", "target")
cluster, err := getCluster("backup-with-env", "target")
g.Expect(err).NotTo(HaveOccurred())
condHealthy, err := getClusterCondition(cluster, mocov1beta2.ConditionHealthy)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(condHealthy.Status).To(Equal(metav1.ConditionTrue), "target is not healthy")
}).Should(Succeed())

out := kubectlSafe(nil, "moco", "-n", "backup", "mysql", "target", "--",
out := kubectlSafe(nil, "moco", "-n", "backup-with-env", "mysql", "target", "--",
"-N", "-D", "test", "-e", "SELECT COUNT(*) FROM t")
count, err := strconv.Atoi(strings.TrimSpace(string(out)))
Expect(err).NotTo(HaveOccurred())
Expect(count).To(Equal(2))
})

It("should delete clusters", func() {
kubectlSafe(nil, "delete", "-n", "backup", "mysqlclusters", "--all")
verifyAllPodsDeleted("backup")
kubectlSafe(nil, "delete", "-n", "backup-with-env", "mysqlclusters", "--all")
verifyAllPodsDeleted("backup-with-env")
})
})
2 changes: 1 addition & 1 deletion e2e/prevent_delete_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func setSourceDelay(index, delay int) {
}
}

var _ = Context("PreventDelete", func() {
var _ = Context("PreventDelete", Serial, func() {
if doUpgrade {
return
}
Expand Down
4 changes: 2 additions & 2 deletions e2e/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,6 @@ func TestE2e(t *testing.T) {
//go:embed testdata/client.yaml
var clientYAML string

var _ = BeforeSuite(func() {
var _ = SynchronizedBeforeSuite(func() {
kubectlSafe(fillTemplate(clientYAML), "apply", "-f", "-")
})
}, func() {})
14 changes: 7 additions & 7 deletions e2e/testdata/backup_with_env.yaml
Original file line number Diff line number Diff line change
@@ -1,26 +1,26 @@
apiVersion: v1
kind: Namespace
metadata:
name: backup
name: backup-with-env
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: backup
namespace: backup-with-env
name: mycnf
data:
innodb_log_file_size: "10M"
---
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: backup
namespace: backup-with-env
name: backup-owner
---
apiVersion: moco.cybozu.com/v1beta2
kind: BackupPolicy
metadata:
namespace: backup
namespace: backup-with-env
name: daily
spec:
schedule: "@daily"
Expand All @@ -46,7 +46,7 @@ spec:
apiVersion: moco.cybozu.com/v1beta2
kind: MySQLCluster
metadata:
namespace: backup
namespace: backup-with-env
name: source
spec:
mysqlConfigMapName: mycnf
Expand All @@ -69,7 +69,7 @@ spec:
apiVersion: v1
kind: ConfigMap
metadata:
namespace: backup
namespace: backup-with-env
name: bucket-name
data:
BUCKET_NAME: moco
BUCKET_NAME: moco-with-env
22 changes: 22 additions & 0 deletions e2e/testdata/makebucket_with_env.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
apiVersion: batch/v1
kind: Job
metadata:
name: make-bucket-with-env
namespace: default
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- command:
- s3cmd
- --host=minio.default.svc:9000
- --host-bucket=minio.default.svc:9000
- --access_key=minioadmin
- --secret_key=minioadmin
- --no-ssl
- mb
- s3://moco-with-env
image: moco-backup:dev
imagePullPolicy: IfNotPresent
name: make-bucket
8 changes: 4 additions & 4 deletions e2e/testdata/restore_with_env.yaml
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
apiVersion: moco.cybozu.com/v1beta2
kind: MySQLCluster
metadata:
namespace: backup
namespace: backup-with-env
name: target
spec:
mysqlConfigMapName: mycnf
replicas: 1
restore:
sourceName: source
sourceNamespace: backup
sourceNamespace: backup-with-env
restorePoint: "{{ .RestorePoint }}"
jobConfig:
serviceAccountName: backup-owner
Expand Down Expand Up @@ -45,7 +45,7 @@ spec:
apiVersion: v1
kind: ConfigMap
metadata:
namespace: backup
namespace: backup-with-env
name: bucket-name
data:
BUCKET_NAME: moco
BUCKET_NAME: moco-with-env