diff --git a/Dockerfile b/Dockerfile index 400ea95a..038b88d0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,7 +13,8 @@ RUN apk add --update 'mariadb-client>10.3.15' mariadb-connector-c bash python3 p RUN groupadd -g 1005 appuser && \ useradd -r -u 1005 -g appuser appuser # ensure smb stuff works correctly -RUN mkdir -p /var/cache/samba && chmod 0755 /var/cache/samba && chown appuser /var/cache/samba && chown appuser /var/lib/samba/private +RUN mkdir -p /var/cache/samba && chmod 0755 /var/cache/samba && chown appuser /var/cache/samba && chown appuser /var/lib/samba/private && \ + mkdir -p /home/appuser && chown -R appuser /home/appuser USER appuser # install the entrypoint diff --git a/README.md b/README.md index c97e4390..d0f0cf27 100644 --- a/README.md +++ b/README.md @@ -52,6 +52,7 @@ __You should consider the [use of `--env-file=`](https://docs.docker.com/engine/ * Local: If the value of `DB_DUMP_TARGET` starts with a `/` character, will dump to a local path, which should be volume-mounted. * SMB: If the value of `DB_DUMP_TARGET` is a URL of the format `smb://hostname/share/path/` then it will connect via SMB. * S3: If the value of `DB_DUMP_TARGET` is a URL of the format `s3://bucketname/path` then it will connect via awscli. + * OSS: If the value of `DB_DUMP_TARGET` is a URL of the format `oss://bucketname/path` then it will also connect via awscli. [OSS compatible with S3](https://www.alibabacloud.com/help/en/oss/developer-reference/compatibility-with-amazon-s3-1/?spm=a2c63.p38356.0.0.97ba41a0Ft7InT) * Multiple: If the value of `DB_DUMP_TARGET` contains multiple targets, the targets should be separated by a whitespace **and** the value surrounded by quotes, e.g. `"/db s3://bucketname/path"`. * `DB_DUMP_SAFECHARS`: The dump filename usually includes the character `:` in the date, to comply with RFC3339. Some systems and shells don't like that character. If this environment variable is set, it will replace all `:` with `-`. * `AWS_ACCESS_KEY_ID`: AWS Key ID @@ -60,6 +61,10 @@ __You should consider the [use of `--env-file=`](https://docs.docker.com/engine/ * `AWS_ENDPOINT_URL`: Specify an alternative endpoint for s3 interopable systems e.g. Digitalocean * `AWS_CLI_OPTS`: Additional arguments to be passed to the `aws` part of the `aws s3 cp` command, click [here](https://docs.aws.amazon.com/cli/latest/reference/#options) for a list. _Be careful_, as you can break something! * `AWS_CLI_S3_CP_OPTS`: Additional arguments to be passed to the `s3 cp` part of the `aws s3 cp` command, click [here](https://docs.aws.amazon.com/cli/latest/reference/s3/cp.html#options) for a list. If you are using AWS KMS, `sse`, `sse-kms-key-id`, etc., may be of interest. +* `OSS_REGION`: Alibaba Cloud OSS Region ID, e.g. `oss-cn-beijing`, `oss-cn-hangzhou` +* `OSS_ENDPOINT_URL`: Alibaba Cloud OSS Endpoint URL, e.g. `https://oss-cn-beijing.aliyuncs.com`, `https://oss-cn-hangzhou.aliyuncs.com` +* `OSS_ACCESS_KEY_ID`: Alibaba Cloud OSS Access Key ID +* `OSS_ACCESS_KEY_SECRET`: Alibaba Cloud OSS Access Key Secret * `SMB_USER`: SMB username. May also be specified in `DB_DUMP_TARGET` with an `smb://` url. If both specified, this variable overrides the value in the URL. * `SMB_PASS`: SMB password. May also be specified in `DB_DUMP_TARGET` with an `smb://` url. If both specified, this variable overrides the value in the URL. * `COMPRESSION`: Compression to use. Supported are: `gzip` (default), `bzip2` diff --git a/entrypoint b/entrypoint index 7301621f..dec8af12 100755 --- a/entrypoint +++ b/entrypoint @@ -32,6 +32,11 @@ file_env "AWS_ACCESS_KEY_ID" file_env "AWS_SECRET_ACCESS_KEY" file_env "AWS_DEFAULT_REGION" +file_env "OSS_REGION" +file_env "OSS_ENDPOINT_URL" +file_env "OSS_ACCESS_KEY_ID" +file_env "OSS_ACCESS_KEY_SECRET" + file_env "SMB_USER" file_env "SMB_PASS" @@ -97,6 +102,12 @@ TMPRESTORE="${TMP_PATH}/restorefile" # this is global, so has to be set outside declare -A uri +# if OSS_ACCESS_KEY_ID and OSS_ACCESS_KEY_SECRET are set, add oss profile +if [[ -n "$OSS_ACCESS_KEY_ID" && -n "$OSS_ACCESS_KEY_SECRET" ]]; then + aws configure set aws_access_key_id $OSS_ACCESS_KEY_ID --profile oss + aws configure set aws_secret_access_key $OSS_ACCESS_KEY_SECRET --profile oss + aws configure set s3.addressing_style virtual --profile oss +fi if [[ -n "$DB_RESTORE_TARGET" ]]; then @@ -116,6 +127,9 @@ if [[ -n "$DB_RESTORE_TARGET" ]]; then elif [[ "${uri[schema]}" == "s3" ]]; then [[ -n "$AWS_ENDPOINT_URL" ]] && AWS_ENDPOINT_OPT="--endpoint-url $AWS_ENDPOINT_URL" aws ${AWS_CLI_OPTS} ${AWS_ENDPOINT_OPT} s3 cp ${AWS_CLI_S3_CP_OPTS} "${DB_RESTORE_TARGET}" $TMPRESTORE + elif [[ "${uri[schema]}" == "oss" ]]; then + DB_RESTORE_TARGET=${DB_RESTORE_TARGET/oss:\/\//s3:\/\/} + aws --profile oss --region $OSS_REGION --endpoint-url $OSS_ENDPOINT_URL s3 cp "${DB_RESTORE_TARGET}" $TMPRESTORE elif [[ "${uri[schema]}" == "smb" ]]; then if [[ -n "$SMB_USER" ]]; then UPASSARG="-U" diff --git a/functions.sh b/functions.sh index 4c910a74..c5a4eb57 100644 --- a/functions.sh +++ b/functions.sh @@ -221,6 +221,10 @@ function backup_target() { [[ -n "$AWS_ENDPOINT_URL" ]] && AWS_ENDPOINT_OPT="--endpoint-url $AWS_ENDPOINT_URL" aws ${AWS_CLI_OPTS} ${AWS_ENDPOINT_OPT} s3 cp ${AWS_CLI_S3_CP_OPTS} ${TMPDIR}/${SOURCE} "${target}/${TARGET}" ;; + "oss") + target=${target/oss:\/\//s3:\/\/} + aws --profile oss --region $OSS_REGION --endpoint-url $OSS_ENDPOINT_URL s3 cp ${TMPDIR}/${SOURCE} "${target}/${TARGET}" + ;; "smb") if [[ -n "$SMB_USER" ]]; then UPASSARG="-U" diff --git a/test/test_dump.sh b/test/test_dump.sh index a97a5a89..0df33c3a 100755 --- a/test/test_dump.sh +++ b/test/test_dump.sh @@ -15,6 +15,7 @@ targets=( "smb://user:pass@smb/auth/SEQ/data" "smb://CONF;user:pass@smb/auth/SEQ/data" "s3://mybucket/SEQ/data" +"oss://mybucket/SEQ/data" "file:///backups/SEQ/data file:///backups/SEQ/data" ) diff --git a/test/test_source_target.sh b/test/test_source_target.sh index 5662c632..e5ef9eb5 100755 --- a/test/test_source_target.sh +++ b/test/test_source_target.sh @@ -13,6 +13,7 @@ targets=( "file:///backups/SEQ/data" "smb://user:pass@smb/auth/SEQ/data" "s3://mybucket/SEQ/data" +"oss://mybucket/SEQ/data" ) # we need to run through each each target and test the backup.