-
Notifications
You must be signed in to change notification settings - Fork 2
153 lines (145 loc) · 6.45 KB
/
kind-e2e.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
name: KinD End-to-End
on:
pull_request:
branches: [main]
push:
branches: [main]
schedule:
- cron: "0 4 * * *"
env:
NAME: "azad-kube-proxy"
jobs:
end-to-end:
runs-on: ubuntu-latest
steps:
- name: Clone repo
uses: actions/checkout@v3
- name: Setup go
uses: actions/setup-go@v4
with:
go-version: "^1.20"
- name: Prepare
id: prep
run: |
VERSION=sha-${GITHUB_SHA::8}
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF/refs\/tags\//}
fi
echo BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') >> $GITHUB_OUTPUT
echo VERSION=${VERSION} >> $GITHUB_OUTPUT
- name: Cache container layers
uses: actions/cache@v3.3.1
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Set up QEMU
uses: docker/setup-qemu-action@v2.1.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2.5.0
- name: Set up KinD
id: kind
run: |
kind create cluster --image=kindest/node:v1.25.3
- name: Build and load (current arch)
run: |
docker buildx build --load -t ${{ env.NAME }}:${{ steps.prep.outputs.VERSION }} .
kind load docker-image ${{ env.NAME }}:${{ steps.prep.outputs.VERSION }}
- name: Install helm chart
env:
CLIENT_ID: ${{ secrets.CLIENT_ID }}
CLIENT_SECRET: ${{ secrets.CLIENT_SECRET }}
TENANT_ID: ${{ secrets.TENANT_ID }}
TEST_USER_SP_OBJECT_ID: ${{ secrets.TEST_USER_SP_OBJECT_ID }}
run: |
set -x
set -e
mkdir -p tmp
NODE_IP=$(kubectl get node -o json | jq -r '.items[0].status.addresses[] | select(.type=="InternalIP").address')
cat <<EOF > tmp/csr-config
[req]
distinguished_name=req
[san]
subjectAltName=@alt_names
[alt_names]
IP.1 = ${NODE_IP}
DNS.1 = ${NODE_IP}
EOF
openssl req -newkey rsa:4096 -x509 -sha256 -days 3650 -nodes -out tmp/cert.crt -keyout tmp/cert.key -subj "/C=SE/ST=LOCALHOST/L=LOCALHOST/O=LOCALHOST/OU=LOCALHOST/CN=${NODE_IP}" -extensions san -config tmp/csr-config
kubectl create namespace ${{ env.NAME }}
kubectl --namespace ${{ env.NAME }} create secret tls test-cert --cert=tmp/cert.crt --key=tmp/cert.key
set +e
helm upgrade --wait --timeout 120s --namespace ${{ env.NAME }} --install ${{ env.NAME }} --values test/test-values.yaml --set secret.CLIENT_ID=${CLIENT_ID} --set secret.CLIENT_SECRET=${CLIENT_SECRET} --set secret.TENANT_ID=${TENANT_ID} --set image.repository=${{ env.NAME }} --set image.tag=${{ steps.prep.outputs.VERSION }} ./charts/azad-kube-proxy
EXIT_CODE=$?
set -e
if [ $EXIT_CODE -ne 0 ]; then
kubectl --namespace ${{ env.NAME }} logs -l app.kubernetes.io/name=${{ env.NAME }}
echo helm install failed 1>&2
exit 1
fi
cat <<EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ${TEST_USER_SP_OBJECT_ID}-admin
subjects:
- kind: User
name: ${TEST_USER_SP_OBJECT_ID}
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
EOF
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: test
namespace: default
spec:
containers:
- image: busybox
name: test
command: ["sleep"]
args: ["3000"]
EOF
- name: Test azad-kube-proxy
env:
TENANT_ID: ${{ secrets.TENANT_ID }}
TEST_USER_SP_CLIENT_ID: ${{ secrets.TEST_USER_SP_CLIENT_ID }}
TEST_USER_SP_CLIENT_SECRET: ${{ secrets.TEST_USER_SP_CLIENT_SECRET }}
TEST_USER_SP_RESOURCE: ${{ secrets.TEST_USER_SP_RESOURCE }}
run: |
set -e
az login --service-principal --username ${TEST_USER_SP_CLIENT_ID} --password ${TEST_USER_SP_CLIENT_SECRET} --tenant ${TENANT_ID} --allow-no-subscriptions 1>/dev/null
TOKEN=$(az account get-access-token --resource ${TEST_USER_SP_RESOURCE} --query accessToken --output tsv)
NODE_IP=$(kubectl get node -o json | jq -r '.items[0].status.addresses[] | select(.type=="InternalIP").address')
NODE_PORT=$(kubectl --namespace ${{ env.NAME }} get service ${{ env.NAME }} -o json | jq -r '.spec.ports[0].nodePort')
RESPONSE=$(curl -s -k -H "Authorization: Bearer ${TOKEN}" https://${NODE_IP}:${NODE_PORT}/api/v1/namespaces/default/pods | jq -r '.items[0].metadata.name')
if [[ "${RESPONSE}" != "test" ]]; then
echo Expected response to be test. Was: ${RESPONSE}
exit 1
fi
- name: Test kubectl-azad-proxy
env:
TENANT_ID: ${{ secrets.TENANT_ID }}
TEST_USER_SP_CLIENT_ID: ${{ secrets.TEST_USER_SP_CLIENT_ID }}
TEST_USER_SP_CLIENT_SECRET: ${{ secrets.TEST_USER_SP_CLIENT_SECRET }}
TEST_USER_SP_RESOURCE: ${{ secrets.TEST_USER_SP_RESOURCE }}
EXCLUDE_ENVIRONMENT_AUTH: true
EXCLUDE_MSI_AUTH: true
run: |
set -e
az login --service-principal --username ${TEST_USER_SP_CLIENT_ID} --password ${TEST_USER_SP_CLIENT_SECRET} --tenant ${TENANT_ID} --allow-no-subscriptions 1>/dev/null
make build-plugin
sudo mv bin/kubectl-azad_proxy /usr/local/bin/
NODE_IP=$(kubectl get node -o json | jq -r '.items[0].status.addresses[] | select(.type=="InternalIP").address')
NODE_PORT=$(kubectl --namespace ${{ env.NAME }} get service ${{ env.NAME }} -o json | jq -r '.spec.ports[0].nodePort')
kubectl azad-proxy generate --cluster-name local-test --kubeconfig ~/.kube/test --proxy-url https://${NODE_IP}:${NODE_PORT} --tls-insecure-skip-verify=true --overwrite --resource ${TEST_USER_SP_RESOURCE}
RESPONSE=$(kubectl --kubeconfig ~/.kube/test --namespace default get pods -o json | jq -r '.items[0].metadata.name')
if [[ "${RESPONSE}" != "test" ]]; then
echo Expected response to be test. Was: ${RESPONSE}
exit 1
fi