Skip to content

Commit

Permalink
Use DOOD stategy to keep supporting ubuntu18.04
Browse files Browse the repository at this point in the history
See NVIDIA/cccl#1779

Signed-off-by: Jordan Jacobelli <jjacobelli@nvidia.com>
  • Loading branch information
jjacobelli committed Oct 25, 2024
1 parent 51fabee commit af3d6a1
Show file tree
Hide file tree
Showing 15 changed files with 381 additions and 57 deletions.
2 changes: 1 addition & 1 deletion .devcontainer/cuda11.1-gcc7/devcontainer.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"shutdownAction": "stopContainer",
"image": "rapidsai/devcontainers:24.12-cpp-gcc7-cuda11.1-ubuntu20.04",
"image": "rapidsai/devcontainers:24.12-cpp-gcc7-cuda11.1-ubuntu18.04",
"hostRequirements": {
"gpu": "optional"
},
Expand Down
2 changes: 1 addition & 1 deletion .devcontainer/cuda11.1-gcc8/devcontainer.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"shutdownAction": "stopContainer",
"image": "rapidsai/devcontainers:24.12-cpp-gcc8-cuda11.1-ubuntu20.04",
"image": "rapidsai/devcontainers:24.12-cpp-gcc8-cuda11.1-ubuntu18.04",
"hostRequirements": {
"gpu": "optional"
},
Expand Down
2 changes: 1 addition & 1 deletion .devcontainer/cuda11.1-gcc9/devcontainer.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"shutdownAction": "stopContainer",
"image": "rapidsai/devcontainers:24.12-cpp-gcc9-cuda11.1-ubuntu20.04",
"image": "rapidsai/devcontainers:24.12-cpp-gcc9-cuda11.1-ubuntu18.04",
"hostRequirements": {
"gpu": "optional"
},
Expand Down
2 changes: 1 addition & 1 deletion .devcontainer/cuda11.1-llvm9/devcontainer.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"shutdownAction": "stopContainer",
"image": "rapidsai/devcontainers:24.12-cpp-llvm9-cuda11.1-ubuntu20.04",
"image": "rapidsai/devcontainers:24.12-cpp-llvm9-cuda11.1-ubuntu18.04",
"hostRequirements": {
"gpu": "optional"
},
Expand Down
49 changes: 49 additions & 0 deletions .devcontainer/docker-entrypoint.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#!/usr/bin/env bash

# Maybe change the UID/GID of the container's non-root user to match the host's UID/GID

: "${REMOTE_USER:="coder"}";
: "${OLD_UID:=}";
: "${OLD_GID:=}";
: "${NEW_UID:=}";
: "${NEW_GID:=}";

eval "$(sed -n "s/${REMOTE_USER}:[^:]*:\([^:]*\):\([^:]*\):[^:]*:\([^:]*\).*/OLD_UID=\1;OLD_GID=\2;HOME_FOLDER=\3/p" /etc/passwd)";
eval "$(sed -n "s/\([^:]*\):[^:]*:${NEW_UID}:.*/EXISTING_USER=\1/p" /etc/passwd)";
eval "$(sed -n "s/\([^:]*\):[^:]*:${NEW_GID}:.*/EXISTING_GROUP=\1/p" /etc/group)";

if [ -z "$OLD_UID" ]; then
echo "Remote user not found in /etc/passwd ($REMOTE_USER).";
exec "$(pwd)/.devcontainer/nvbench-entrypoint.sh" "$@";
elif [ "$OLD_UID" = "$NEW_UID" ] && [ "$OLD_GID" = "$NEW_GID" ]; then
echo "UIDs and GIDs are the same ($NEW_UID:$NEW_GID).";
exec "$(pwd)/.devcontainer/nvbench-entrypoint.sh" "$@";
elif [ "$OLD_UID" != "$NEW_UID" ] && [ -n "$EXISTING_USER" ]; then
echo "User with UID exists ($EXISTING_USER=$NEW_UID).";
exec "$(pwd)/.devcontainer/nvbench-entrypoint.sh" "$@";
else
if [ "$OLD_GID" != "$NEW_GID" ] && [ -n "$EXISTING_GROUP" ]; then
echo "Group with GID exists ($EXISTING_GROUP=$NEW_GID).";
NEW_GID="$OLD_GID";
fi
echo "Updating UID:GID from $OLD_UID:$OLD_GID to $NEW_UID:$NEW_GID.";
sed -i -e "s/\(${REMOTE_USER}:[^:]*:\)[^:]*:[^:]*/\1${NEW_UID}:${NEW_GID}/" /etc/passwd;
if [ "$OLD_GID" != "$NEW_GID" ]; then
sed -i -e "s/\([^:]*:[^:]*:\)${OLD_GID}:/\1${NEW_GID}:/" /etc/group;
fi

# Fast parallel `chown -R`
find "$HOME_FOLDER/" -not -user "$REMOTE_USER" -print0 \
| xargs -0 -r -n1 -P"$(nproc --all)" chown "$NEW_UID:$NEW_GID"

# Run the container command as $REMOTE_USER, preserving the container startup environment.
#
# We cannot use `su -w` because that's not supported by the `su` in Ubuntu18.04, so we reset the following
# environment variables to the expected values, then pass through everything else from the startup environment.
export HOME="$HOME_FOLDER";
export XDG_CACHE_HOME="$HOME_FOLDER/.cache";
export XDG_CONFIG_HOME="$HOME_FOLDER/.config";
export XDG_STATE_HOME="$HOME_FOLDER/.local/state";
export PYTHONHISTFILE="$HOME_FOLDER/.local/state/.python_history";
exec su -p "$REMOTE_USER" -- "$(pwd)/.devcontainer/nvbench-entrypoint.sh" "$@";
fi
215 changes: 196 additions & 19 deletions .devcontainer/launch.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,17 +11,46 @@ print_help() {
echo "the top-level devcontainer in .devcontainer/devcontainer.json will be used."
echo ""
echo "Options:"
echo " -c, --cuda Specify the CUDA version. E.g., 12.2"
echo " -H, --host Specify the host compiler. E.g., gcc12"
echo " -d, --docker Launch the development environment in Docker directly without using VSCode."
echo " -h, --help Display this help message and exit."
echo " -c, --cuda Specify the CUDA version. E.g., 12.2"
echo " -H, --host Specify the host compiler. E.g., gcc12"
echo " -d, --docker Launch the development environment in Docker directly without using VSCode."
echo " --gpus gpu-request GPU devices to add to the container ('all' to pass all GPUs)."
echo " -e, --env list Set additional container environment variables."
echo " -v, --volume list Bind mount a volume."
echo " -h, --help Display this help message and exit."
}

# Assign variable one scope above the caller
# Usage: local "$1" && _upvar $1 "value(s)"
# Param: $1 Variable name to assign value to
# Param: $* Value(s) to assign. If multiple values, an array is
# assigned, otherwise a single value is assigned.
# See: http://fvue.nl/wiki/Bash:_Passing_variables_by_reference
_upvar() {
if unset -v "$1"; then
if (( $# == 2 )); then
eval $1=\"\$2\";
else
eval $1=\(\"\${@:2}\"\);
fi;
fi
}

parse_options() {
local OPTIONS=c:H:dh
local LONG_OPTIONS=cuda:,host:,docker,help
local -;
set -euo pipefail;

# Read the name of the variable in which to return unparsed arguments
local UNPARSED="${!#}";
# Splice the unparsed arguments variable name from the arguments list
set -- "${@:1:$#-1}";

local OPTIONS=c:e:H:dhv
local LONG_OPTIONS=cuda:,env:,host:,gpus:,volume:,docker,help
# shellcheck disable=SC2155
local PARSED_OPTIONS=$(getopt -n "$0" -o "${OPTIONS}" --long "${LONG_OPTIONS}" -- "$@")

# shellcheck disable=SC2181
if [[ $? -ne 0 ]]; then
exit 1
fi
Expand All @@ -34,10 +63,18 @@ parse_options() {
cuda_version="$2"
shift 2
;;
-e|--env)
env_vars+=("$1" "$2")
shift 2
;;
-H|--host)
host_compiler="$2"
shift 2
;;
--gpus)
gpu_request="$2"
shift 2
;;
-d|--docker)
docker_mode=true
shift
Expand All @@ -46,8 +83,13 @@ parse_options() {
print_help
exit 0
;;
-v|--volume)
volumes+=("$1" "$2")
shift 2
;;
--)
shift
_upvar "${UNPARSED}" "${@}"
break
;;
*)
Expand All @@ -59,20 +101,153 @@ parse_options() {
done
}

# shellcheck disable=SC2155
launch_docker() {
DOCKER_IMAGE=$(grep "image" "${path}/devcontainer.json" | sed 's/.*: "\(.*\)",/\1/')
echo "Found image: ${DOCKER_IMAGE}"
docker pull ${DOCKER_IMAGE}
docker run \
-it --rm \
--user coder \
--workdir /home/coder/cccl \
--mount type=bind,src="$(pwd)",dst='/home/coder/cccl' \
${DOCKER_IMAGE} \
/bin/bash
local -;
set -euo pipefail

inline_vars() {
cat - \
`# inline local workspace folder` \
| sed "s@\${localWorkspaceFolder}@$(pwd)@g" \
`# inline local workspace folder basename` \
| sed "s@\${localWorkspaceFolderBasename}@$(basename "$(pwd)")@g" \
`# inline container workspace folder` \
| sed "s@\${containerWorkspaceFolder}@${WORKSPACE_FOLDER:-}@g" \
`# inline container workspace folder basename` \
| sed "s@\${containerWorkspaceFolderBasename}@$(basename "${WORKSPACE_FOLDER:-}")@g" \
`# translate local envvars to shell syntax` \
| sed -r 's/\$\{localEnv:([^\:]*):?(.*)\}/${\1:-\2}/g'
}

args_to_path() {
local -a keys=("${@}")
keys=("${keys[@]/#/[}")
keys=("${keys[@]/%/]}")
echo "$(IFS=; echo "${keys[*]}")"
}

json_string() {
python3 -c "import json,sys; print(json.load(sys.stdin)$(args_to_path "${@}"))" 2>/dev/null | inline_vars
}

json_array() {
python3 -c "import json,sys; [print(f'\"{x}\"') for x in json.load(sys.stdin)$(args_to_path "${@}")]" 2>/dev/null | inline_vars
}

json_map() {
python3 -c "import json,sys; [print(f'{k}=\"{v}\"') for k,v in json.load(sys.stdin)$(args_to_path "${@}").items()]" 2>/dev/null | inline_vars
}

devcontainer_metadata_json() {
docker inspect --type image --format '{{json .Config.Labels}}' "$DOCKER_IMAGE" \
| json_string '"devcontainer.metadata"'
}

###
# Read relevant values from devcontainer.json
###

local devcontainer_json="${path}/devcontainer.json";

# Read image
local DOCKER_IMAGE="$(json_string '"image"' < "${devcontainer_json}")"
# Always pull the latest copy of the image
docker pull "$DOCKER_IMAGE"

# Read workspaceFolder
local WORKSPACE_FOLDER="$(json_string '"workspaceFolder"' < "${devcontainer_json}")"
# Read remoteUser
local REMOTE_USER="$(json_string '"remoteUser"' < "${devcontainer_json}")"
# If remoteUser isn't in our devcontainer.json, read it from the image's "devcontainer.metadata" label
if test -z "${REMOTE_USER:-}"; then
REMOTE_USER="$(devcontainer_metadata_json | json_string "-1" '"remoteUser"')"
fi
# Read runArgs
local -a RUN_ARGS="($(json_array '"runArgs"' < "${devcontainer_json}"))"
# Read initializeCommand
local -a INITIALIZE_COMMAND="($(json_array '"initializeCommand"' < "${devcontainer_json}"))"
# Read containerEnv
local -a ENV_VARS="($(json_map '"containerEnv"' < "${devcontainer_json}" | sed -r 's/(.*)=(.*)/--env \1=\2/'))"
# Read mounts
local -a MOUNTS="($(
tee < "${devcontainer_json}" \
1>/dev/null \
>(json_array '"mounts"') \
>(json_string '"workspaceMount"') \
| xargs -r -I% echo --mount '%'
))"

###
# Update run arguments and container environment variables
###

# Only pass `-it` if the shell is a tty
if ! ${CI:-'false'} && tty >/dev/null 2>&1 && (exec </dev/tty); then
RUN_ARGS+=("-it")
fi

for flag in rm init; do
if [[ " ${RUN_ARGS[*]} " != *" --${flag} "* ]]; then
RUN_ARGS+=("--${flag}")
fi
done

# Prefer the user-provided --gpus argument
if test -n "${gpu_request:-}"; then
RUN_ARGS+=(--gpus "${gpu_request}")
else
# Otherwise read and infer from hostRequirements.gpu
local GPU_REQUEST="$(json_string '"hostRequirements"' '"gpu"' < "${devcontainer_json}")"
if test "${GPU_REQUEST:-false}" = true; then
RUN_ARGS+=(--gpus all)
elif test "${GPU_REQUEST:-false}" = optional && \
command -v nvidia-container-runtime >/dev/null 2>&1; then
RUN_ARGS+=(--gpus all)
fi
fi

RUN_ARGS+=(--workdir "${WORKSPACE_FOLDER:-/home/coder/cccl}")

if test -n "${REMOTE_USER:-}"; then
ENV_VARS+=(--env NEW_UID="$(id -u)")
ENV_VARS+=(--env NEW_GID="$(id -g)")
ENV_VARS+=(--env REMOTE_USER="$REMOTE_USER")
RUN_ARGS+=(-u root:root)
RUN_ARGS+=(--entrypoint "${WORKSPACE_FOLDER:-/home/coder/cccl}/.devcontainer/docker-entrypoint.sh")
fi

if test -n "${SSH_AUTH_SOCK:-}"; then
ENV_VARS+=(--env "SSH_AUTH_SOCK=/tmp/ssh-auth-sock")
MOUNTS+=(--mount "source=${SSH_AUTH_SOCK},target=/tmp/ssh-auth-sock,type=bind")
fi

# Append user-provided volumes
if test -v volumes && test ${#volumes[@]} -gt 0; then
MOUNTS+=("${volumes[@]}")
fi

# Append user-provided envvars
if test -v env_vars && test ${#env_vars[@]} -gt 0; then
ENV_VARS+=("${env_vars[@]}")
fi

# Run the initialize command before starting the container
if test "${#INITIALIZE_COMMAND[@]}" -gt 0; then
eval "${INITIALIZE_COMMAND[*]@Q}"
fi

exec docker run \
"${RUN_ARGS[@]}" \
"${ENV_VARS[@]}" \
"${MOUNTS[@]}" \
"${DOCKER_IMAGE}" \
"$@"
}

launch_vscode() {
local -;
set -euo pipefail;
# Since Visual Studio Code allows only one instance per `devcontainer.json`,
# this code prepares a unique temporary directory structure for each launch of a devcontainer.
# By doing so, it ensures that multiple instances of the same environment can be run
Expand All @@ -85,7 +260,7 @@ launch_vscode() {
mkdir -p "${tmpdir}"
mkdir -p "${tmpdir}/.devcontainer"
cp -arL "${path}/devcontainer.json" "${tmpdir}/.devcontainer"
sed -i 's@\\${localWorkspaceFolder}@$(pwd)@g' "${tmpdir}/.devcontainer/devcontainer.json"
sed -i "s@\\${localWorkspaceFolder}@$(pwd)@g" "${tmpdir}/.devcontainer/devcontainer.json"
local path="${tmpdir}"
local hash="$(echo -n "${path}" | xxd -pu - | tr -d '[:space:]')"
local url="vscode://vscode-remote/dev-container+${hash}/home/coder/cccl"
Expand All @@ -105,7 +280,9 @@ launch_vscode() {
}

main() {
parse_options "$@"
local -a unparsed;
parse_options "$@" unparsed;
set -- "${unparsed[@]}";

# If no CTK/Host compiler are provided, just use the default environment
if [[ -z ${cuda_version:-} ]] && [[ -z ${host_compiler:-} ]]; then
Expand All @@ -120,7 +297,7 @@ main() {
fi

if ${docker_mode:-'false'}; then
launch_docker
launch_docker "$@"
else
launch_vscode
fi
Expand Down
17 changes: 17 additions & 0 deletions .devcontainer/nvbench-entrypoint.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/usr/bin/env bash

# shellcheck disable=SC1091

set -e;

devcontainer-utils-post-create-command;
devcontainer-utils-init-git;
devcontainer-utils-post-attach-command;

cd /home/coder/nvbench/

if test $# -gt 0; then
exec "$@";
else
exec /bin/bash -li;
fi
6 changes: 0 additions & 6 deletions .github/actions/configure_cccl_sccache/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,6 @@ description: "Set up AWS credentials and environment variables for sccache"
runs:
using: "composite"
steps:
- name: Get AWS credentials for sccache bucket
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: arn:aws:iam::279114543810:role/gha-oidc-NVIDIA
aws-region: us-east-2
role-duration-seconds: 43200 # 12 hours)
- name: Set environment variables
run: |
echo "SCCACHE_BUCKET=rapids-sccache-devs" >> $GITHUB_ENV
Expand Down
Loading

0 comments on commit af3d6a1

Please sign in to comment.