-
Notifications
You must be signed in to change notification settings - Fork 63
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
test(DRIVERS-2657): break out lambda scripts (#325)
* fix: always allow all cleanup to run on lambda * test(DRIVERS-2657): break out lambda scripts * test: update scripts * chore: revert old script * test: update scripts * test: dont create cluster in run script * test: check cluster in setup * test: allow supplying mongodb version * test: check every 15, wait 20 min * test: tweak cluster settings * test: more config tweaks * test: auto scaling required * test: add read preference back * Update .evergreen/aws_lambda/setup-atlas-cluster.sh Co-authored-by: Bailey Pearson <bailey.pearson@gmail.com> * test: try more expansions * test: update suggestions * Update .evergreen/aws_lambda/setup-atlas-cluster.sh Co-authored-by: Andreas Braun <alcaeus@users.noreply.github.com> * test: move atlas scripts --------- Co-authored-by: Bailey Pearson <bailey.pearson@gmail.com> Co-authored-by: Andreas Braun <alcaeus@users.noreply.github.com>
- Loading branch information
1 parent
2608a86
commit 797f71c
Showing
3 changed files
with
250 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,128 @@ | ||
#!/bin/bash | ||
set -o errexit # Exit the script with error if any of the commands fail | ||
|
||
# Explanation of required environment variables: | ||
# | ||
# DRIVERS_ATLAS_PUBLIC_API_KEY: The public Atlas key for the drivers org. | ||
# DRIVERS_ATLAS_PRIVATE_API_KEY: The private Atlas key for the drivers org. | ||
# DRIVERS_ATLAS_GROUP_ID: The id of the individual projects under the drivers org, per language. | ||
# DRIVERS_ATLAS_LAMBDA_USER: The user for the lambda cluster. | ||
# DRIVERS_ATLAS_LAMBDA_PASSWORD: The password for the user. | ||
# LAMBDA_STACK_NAME: The name of the stack on lambda "dbx-<language>-lambda" | ||
# MONGODB_VERSION: The major version of the cluster to deploy. Defaults to 6.0. | ||
|
||
# Explanation of generated variables: | ||
# | ||
# MONGODB_URI: The URI for the created Atlas cluster during this script. | ||
# FUNCTION_NAME: Uses the stack name plus the current commit sha to create a unique cluster and function. | ||
# CREATE_CLUSTER_JSON: The JSON used to create a cluster via the Atlas API. | ||
# ATLAS_BASE_URL: Where the Atlas API root resides. | ||
|
||
# The Atlas API version | ||
ATLAS_API_VERSION="v1.0" | ||
# The base Atlas API url. We use the API directly as the CLI does not yet | ||
# support testing cluster outages. | ||
ATLAS_BASE_URL="https://cloud.mongodb.com/api/atlas/$ATLAS_API_VERSION" | ||
|
||
# Add git commit to name of function and cluster. | ||
FUNCTION_NAME="${LAMBDA_STACK_NAME}-$(git rev-parse --short HEAD)" | ||
|
||
# The cluster server version. | ||
VERSION="${MONGODB_VERSION:-6.0}" | ||
|
||
# Set the create cluster configuration. | ||
CREATE_CLUSTER_JSON=$(cat <<EOF | ||
{ | ||
"autoScaling" : { | ||
"autoIndexingEnabled" : false, | ||
"compute" : { | ||
"enabled" : true, | ||
"scaleDownEnabled" : true | ||
}, | ||
"diskGBEnabled" : true | ||
}, | ||
"backupEnabled" : false, | ||
"biConnector" : { | ||
"enabled" : false, | ||
"readPreference" : "secondary" | ||
}, | ||
"clusterType" : "REPLICASET", | ||
"diskSizeGB" : 10.0, | ||
"encryptionAtRestProvider" : "NONE", | ||
"mongoDBMajorVersion" : "${VERSION}", | ||
"name" : "${FUNCTION_NAME}", | ||
"numShards" : 1, | ||
"paused" : false, | ||
"pitEnabled" : false, | ||
"providerBackupEnabled" : false, | ||
"providerSettings" : { | ||
"providerName" : "AWS", | ||
"autoScaling" : { | ||
"compute" : { | ||
"maxInstanceSize" : "M20", | ||
"minInstanceSize" : "M10" | ||
} | ||
}, | ||
"diskIOPS" : 3000, | ||
"encryptEBSVolume" : true, | ||
"instanceSizeName" : "M10", | ||
"regionName" : "US_EAST_1", | ||
"volumeType" : "STANDARD" | ||
}, | ||
"replicationFactor" : 3, | ||
"rootCertType" : "ISRGROOTX1", | ||
"terminationProtectionEnabled" : false, | ||
"versionReleaseSystem" : "LTS" | ||
} | ||
EOF | ||
) | ||
|
||
# Create an Atlas M10 cluster - this returns immediately so we'll need to poll until | ||
# the cluster is created. | ||
create_cluster () | ||
{ | ||
echo "Creating new Atlas Cluster..." | ||
curl \ | ||
--digest -u "${DRIVERS_ATLAS_PUBLIC_API_KEY}:${DRIVERS_ATLAS_PRIVATE_API_KEY}" \ | ||
-d "${CREATE_CLUSTER_JSON}" \ | ||
-H 'Content-Type: application/json' \ | ||
-X POST \ | ||
"${ATLAS_BASE_URL}/groups/${DRIVERS_ATLAS_GROUP_ID}/clusters?pretty=true" | ||
} | ||
|
||
# Check if cluster has a srv address, and assume once it does, it can be used. | ||
check_cluster () | ||
{ | ||
count=0 | ||
SRV_ADDRESS="null" | ||
# Don't try longer than 20 minutes. | ||
while [ $SRV_ADDRESS = "null" ] && [ $count -le 80 ]; do | ||
echo "Checking every 15 seconds for cluster to be created..." | ||
# Poll every 15 seconds to check the cluster creation. | ||
sleep 15 | ||
SRV_ADDRESS=$(curl \ | ||
--digest -u "${DRIVERS_ATLAS_PUBLIC_API_KEY}:${DRIVERS_ATLAS_PRIVATE_API_KEY}" \ | ||
-X GET \ | ||
"${ATLAS_BASE_URL}/groups/${DRIVERS_ATLAS_GROUP_ID}/clusters/${FUNCTION_NAME}" \ | ||
| jq -r '.srvAddress' | ||
); | ||
count=$(( $count + 1 )) | ||
echo $SRV_ADDRESS | ||
done | ||
|
||
if [ $SRV_ADDRESS = "null" ]; then | ||
echo "No cluster could be created in the 20 minute timeframe or error occured." | ||
exit 1 | ||
else | ||
echo "Setting MONGODB_URI in the environment to the new cluster." | ||
# else set the mongodb uri | ||
URI=$(echo $SRV_ADDRESS | grep -Eo "[^(\/\/)]*$" | cat) | ||
MONGODB_URI="mongodb+srv://${DRIVERS_ATLAS_LAMBDA_USER}:${DRIVERS_ATLAS_LAMBDA_PASSWORD}@${URI}" | ||
# Put the MONGODB_URI in an expansions yml | ||
echo 'MONGODB_URI: "'$MONGODB_URI'"' > atlas-expansion.yml | ||
fi | ||
} | ||
|
||
create_cluster | ||
|
||
check_cluster |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,30 @@ | ||
#!/bin/bash | ||
set -o errexit # Exit the script with error if any of the commands fail | ||
|
||
# Explanation of required environment variables: | ||
# | ||
# DRIVERS_ATLAS_PUBLIC_API_KEY: The public Atlas key for the drivers org. | ||
# DRIVERS_ATLAS_PRIVATE_API_KEY: The private Atlas key for the drivers org. | ||
# DRIVERS_ATLAS_GROUP_ID: The id of the individual projects under the drivers org, per language. | ||
# LAMBDA_STACK_NAME: The name of the stack on lambda "dbx-<language>-lambda" | ||
|
||
# Explanation of generated variables: | ||
# | ||
# FUNCTION_NAME: Uses the stack name plus the current commit sha to create a unique cluster and function. | ||
# ATLAS_BASE_URL: Where the Atlas API root resides. | ||
|
||
# The Atlas API version | ||
ATLAS_API_VERSION="v1.0" | ||
# The base Atlas API url. We use the API directly as the CLI does not yet | ||
# support testing cluster outages. | ||
ATLAS_BASE_URL="https://cloud.mongodb.com/api/atlas/$ATLAS_API_VERSION" | ||
|
||
# Add git commit to name of function and cluster. | ||
FUNCTION_NAME="${LAMBDA_STACK_NAME}-$(git rev-parse --short HEAD)" | ||
|
||
# Delete the cluster. | ||
echo "Deleting Atlas Cluster..." | ||
curl \ | ||
--digest -u ${DRIVERS_ATLAS_PUBLIC_API_KEY}:${DRIVERS_ATLAS_PRIVATE_API_KEY} \ | ||
-X DELETE \ | ||
"${ATLAS_BASE_URL}/groups/${DRIVERS_ATLAS_GROUP_ID}/clusters/${FUNCTION_NAME}?pretty=true" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,92 @@ | ||
#!/bin/bash | ||
set -o errexit # Exit the script with error if any of the commands fail | ||
|
||
# Explanation of required environment variables: | ||
# | ||
# TEST_LAMBDA_DIRECTORY: The root of the project's Lambda sam project. | ||
# DRIVERS_ATLAS_PUBLIC_API_KEY: The public Atlas key for the drivers org. | ||
# DRIVERS_ATLAS_PRIVATE_API_KEY: The private Atlas key for the drivers org. | ||
# DRIVERS_ATLAS_GROUP_ID: The id of the individual projects under the drivers org, per language. | ||
# LAMBDA_STACK_NAME: The name of the stack on lambda "dbx-<language>-lambda" | ||
# AWS_REGION: The region for the function - generally us-east-1 | ||
# MONGODB_URI: The URI of the Atlas cluster to test against. | ||
|
||
# Explanation of generated variables: | ||
# | ||
# FUNCTION_NAME: Uses the stack name plus the current commit sha to create a unique cluster and function. | ||
# CREATE_CLUSTER_JSON: The JSON used to create a cluster via the Atlas API. | ||
# ATLAS_BASE_URL: Where the Atlas API root resides. | ||
|
||
# The base Atlas API url. We use the API directly as the CLI does not yet | ||
# support testing cluster outages. | ||
ATLAS_BASE_URL="https://cloud.mongodb.com/api/atlas/v1.0" | ||
|
||
# Add git commit to name of function and cluster. | ||
FUNCTION_NAME="${LAMBDA_STACK_NAME}-$(git rev-parse --short HEAD)" | ||
|
||
# Restarts the cluster's primary node. | ||
restart_cluster_primary () | ||
{ | ||
echo "Testing Atlas primary restart..." | ||
curl \ | ||
--digest -u ${DRIVERS_ATLAS_PUBLIC_API_KEY}:${DRIVERS_ATLAS_PRIVATE_API_KEY} \ | ||
-X POST \ | ||
"${ATLAS_BASE_URL}/groups/${DRIVERS_ATLAS_GROUP_ID}/clusters/${FUNCTION_NAME}/restartPrimaries" | ||
} | ||
|
||
# Deploys a lambda function to the set stack name. | ||
deploy_lambda_function () | ||
{ | ||
echo "Deploying Lambda function..." | ||
sam deploy \ | ||
--stack-name "${FUNCTION_NAME}" \ | ||
--capabilities CAPABILITY_IAM \ | ||
--resolve-s3 \ | ||
--parameter-overrides "MongoDbUri=${MONGODB_URI}" \ | ||
--region ${AWS_REGION} | ||
} | ||
|
||
# Get the ARN for the Lambda function we created and export it. | ||
get_lambda_function_arn () | ||
{ | ||
echo "Getting Lambda function ARN..." | ||
LAMBDA_FUNCTION_ARN=$(sam list stack-outputs \ | ||
--stack-name ${FUNCTION_NAME} \ | ||
--region ${AWS_REGION} \ | ||
--output json | jq '.[] | select(.OutputKey == "MongoDBFunction") | .OutputValue' | tr -d '"' | ||
) | ||
echo "Lambda function ARN: $LAMBDA_FUNCTION_ARN" | ||
export LAMBDA_FUNCTION_ARN=$LAMBDA_FUNCTION_ARN | ||
} | ||
|
||
# Delete the lambda cloud formation stack. | ||
delete_lambda_stack () | ||
{ | ||
echo "Deleting Lambda Stack..." | ||
sam delete --stack-name ${FUNCTION_NAME} --no-prompts --region us-east-1 | ||
} | ||
|
||
cd "${TEST_LAMBDA_DIRECTORY}" | ||
|
||
sam build | ||
|
||
deploy_lambda_function | ||
|
||
get_lambda_function_arn | ||
|
||
aws lambda invoke --function-name ${LAMBDA_FUNCTION_ARN} --log-type Tail lambda-invoke-standard.json | ||
tail lambda-invoke-standard.json | ||
|
||
echo "Sleeping 1 minute to build up some streaming protocol heartbeats..." | ||
sleep 60 | ||
aws lambda invoke --function-name ${LAMBDA_FUNCTION_ARN} --log-type Tail lambda-invoke-frozen.json | ||
tail lambda-invoke-frozen.json | ||
|
||
restart_cluster_primary | ||
|
||
echo "Sleeping 1 minute to build up some streaming protocol heartbeats..." | ||
sleep 60 | ||
aws lambda invoke --function-name ${LAMBDA_FUNCTION_ARN} --log-type Tail lambda-invoke-outage.json | ||
tail lambda-invoke-outage.json | ||
|
||
delete_lambda_stack || true |