diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 6476e39b..7e48f006 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -33,17 +33,11 @@ jobs:
test: kerberos
- image: gpdb-6
test: gpdb-6
- - image: hdp2.6-hive-kerberized-2
- image: hive3.1-hive
platforms: linux/amd64,linux/arm64
test: hive3.1-hive
- - image: hdp2.6-hive-kerberized
- test: hdp2.6-hive
- image: hdp3.1-hive-kerberized
test: hdp3.1-hive
- - image: cdh5.15-hive-kerberized
- test: cdh5.15-hive
- - image: cdh5.15-hive-kerberized-kms
# TODO add test https://github.com/trinodb/trino/issues/14543
- image: phoenix5
platforms: linux/amd64,linux/arm64
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 094a41a5..c2e91aea 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -73,13 +73,7 @@ jobs:
)
single_arch=(
testing/accumulo
- testing/cdh5.15-hive
- testing/cdh5.15-hive-kerberized
- testing/cdh5.15-hive-kerberized-kms
testing/gpdb-6
- testing/hdp2.6-hive
- testing/hdp2.6-hive-kerberized
- testing/hdp2.6-hive-kerberized-2
testing/hdp3.1-hive
testing/hdp3.1-hive-kerberized
testing/hdp3.1-hive-kerberized-2
diff --git a/Makefile b/Makefile
index f8bd0926..2a62f1b4 100644
--- a/Makefile
+++ b/Makefile
@@ -34,7 +34,7 @@ FLAGDIR=$(BUILDDIR)/flags
# Also, find on Mac doesn't support -exec {} +
#
# Note that the generated .d files also include reverse dependencies so that
-# you can e.g. `make hdp2.6-base.dependants' and hdp2.6-hive, and all of its
+# you can e.g. `make hdp3.1-base.dependants' and hdp3.1-hive, and all of its
# dependent images will be rebuilt. This is used in .travis.yml to break the
# build up into pieces based on image that have a large number of direct and
# indirect children.
diff --git a/README.md b/README.md
index 6689ec27..93812dd7 100644
--- a/README.md
+++ b/README.md
@@ -3,9 +3,9 @@
## Docker Image Names
The docker images in this repository are expected to be given names of the form
-testing/hdp2.6-hive. The Dockerfile and other files needed to build the
-testing/hdp2.6-hive image are located in the directory
-testing/hdp2.6-hive.
+testing/hdp3.1-hive. The Dockerfile and other files needed to build the
+testing/hdp3.1-hive image are located in the directory
+testing/hdp3.1-hive.
Generally speaking, the images should *not* be built manually with docker
build.
@@ -13,17 +13,17 @@ build.
## Building docker images
The docker images should be built using `make`. To build the docker image named
-`testing/hdp2.6-hive`, run `make testing/hdp2.6-hive`. Make will build
+`testing/hdp3.1-hive`, run `make testing/hdp3.1-hive`. Make will build
the image and its dependencies in the correct order.
If you want to build a base image and all the images depending on it,
you can use the `*.dependants` targets. E.g.
```
-make testing/hdp2.6-base.dependants
+make testing/hdp3.1-base.dependants
```
-will build the `hdp2.6-base` and all the images depending on it (transitively).
+will build the `hdp3.1-base` and all the images depending on it (transitively).
## Releasing (pushing) docker image
@@ -36,9 +36,9 @@ It will:
All of the docker images in the repository share the same version number. This
is because most of the images depend on a parent image that is also in the
-repository (e.g. testing/hdp2.6-hive is FROM testing/hdp2.6-base),
-or are meant to be used together in testing (testing/hdp2.6-hive and
-testing/hdp2.6-hive-kerberized).
+repository (e.g. testing/hdp3.1-hive is FROM testing/hdp3.1-base),
+or are meant to be used together in testing (testing/hdp3.1-hive and
+testing/hdp3.1-hive-kerberized).
Having all of the images on the same version number make troubleshooting easy:
If all of the docker images you are using have the same version number then
diff --git a/archived/cdh5.12-hive-jdk8/Dockerfile b/archived/cdh5.12-hive-jdk8/Dockerfile
deleted file mode 100644
index e9f7b7ba..00000000
--- a/archived/cdh5.12-hive-jdk8/Dockerfile
+++ /dev/null
@@ -1,77 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM testing/centos6-oj8:unlabelled
-
-# Change default timezone
-RUN ln -snf "/usr/share/zoneinfo/Asia/Kathmandu" /etc/localtime && echo "Asia/Kathmandu" > /etc/timezone
-
-# Setup CDH repo, pin the CDH distribution to a concrete version
-RUN wget -nv https://archive.cloudera.com/cdh5/redhat/6/x86_64/cdh/cloudera-cdh5.repo -P /etc/yum.repos.d \
- && rpm --import https://archive.cloudera.com/cdh5/redhat/6/x86_64/cdh/RPM-GPG-KEY-cloudera \
- && sed -i '/^baseurl=/c\baseurl=https://archive.cloudera.com/cdh5/redhat/6/x86_64/cdh/5.12.2/' /etc/yum.repos.d/cloudera-cdh5.repo
-
-# Install Hadoop, Hive (w/ MySQL)
-RUN \
- set -xeu && \
- yum install -y \
- hadoop-hdfs-namenode \
- hadoop-hdfs-secondarynamenode \
- hadoop-hdfs-datanode \
- \
- hadoop-mapreduce \
- \
- hadoop-yarn-resourcemanager \
- hadoop-yarn-nodemanager \
- \
- hadoop-client \
- hadoop-conf-pseudo \
- \
- hive \
- hive-metastore \
- hive-server2 \
- \
- mysql-server mysql-connector-java \
- \
- # Cleanup
- && yum -y clean all && rm -rf /tmp/* /var/tmp/* \
- && ln -s /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib/mysql-connector-java.jar
-
-# Copy configuration files
-COPY ./files /
-
-# Run setup script
-RUN /root/setup.sh \
- && rm -rf /tmp/* /var/tmp/*
-
-# Setup sock proxy
-RUN yum install -y openssh openssh-clients openssh-server && yum -y clean all
-RUN ssh-keygen -t rsa -b 4096 -C "automation@teradata.com" -N "" -f /root/.ssh/id_rsa \
- && ssh-keygen -t rsa -b 4096 -N "" -f /etc/ssh/ssh_host_rsa_key \
- && ssh-keygen -t dsa -b 1024 -N "" -f /etc/ssh/ssh_host_dsa_key \
- && cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys
-RUN chmod 755 /root && chmod 700 /root/.ssh
-RUN passwd --unlock root
-
-# HDFS ports
-EXPOSE 1004 1006 8020 50010 50020 50070 50075 50470
-
-# YARN ports
-EXPOSE 8030 8031 8032 8033 8040 8041 8042 8088 10020 19888
-
-# HIVE port
-EXPOSE 9083 10000
-
-# SOCKS port
-EXPOSE 1180
-
-CMD supervisord -c /etc/supervisord.conf
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/capacity-scheduler.xml b/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/capacity-scheduler.xml
deleted file mode 100644
index 0e15fb6b..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/capacity-scheduler.xml
+++ /dev/null
@@ -1,100 +0,0 @@
-
-
-
-
-
- yarn.scheduler.capacity.maximum-applications
- 10000
-
- Maximum number of applications that can be pending and running.
-
-
-
-
- yarn.scheduler.capacity.maximum-am-resource-percent
- 1
-
- Maximum percent of resources in the cluster which can be used to run
- application masters i.e. controls number of concurrent running
- applications.
-
-
-
-
- yarn.scheduler.capacity.root.queues
- default
-
- The queues at the this level (root is the root queue).
-
-
-
-
- yarn.scheduler.capacity.root.default.capacity
- 100
- Default queue target capacity.
-
-
-
- yarn.scheduler.capacity.root.default.maximum-capacity
- 100
-
- The maximum capacity of the default queue.
-
-
-
-
- yarn.scheduler.capacity.root.default.state
- RUNNING
-
- The state of the default queue. State can be one of RUNNING or STOPPED.
-
-
-
-
- yarn.scheduler.capacity.root.default.acl_submit_applications
- *
-
- The ACL of who can submit jobs to the default queue.
-
-
-
-
- yarn.scheduler.capacity.root.default.user-limit-factor
- 1
-
- Default queue user limit a percentage from 0.0 to 1.0.
-
-
-
-
- yarn.scheduler.capacity.root.default.acl_administer_queue
- *
-
- The ACL of who can administer jobs on the default queue.
-
-
-
-
- yarn.scheduler.capacity.node-locality-delay
- -1
-
- Number of missed scheduling opportunities after which the CapacityScheduler
- attempts to schedule rack-local containers.
- Typically this should be set to number of racks in the cluster, this
- feature is disabled by default, set to -1.
-
-
-
-
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/core-site.xml b/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/core-site.xml
deleted file mode 100644
index 4dbdd062..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/core-site.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-
-
-
-
-
- fs.defaultFS
- hdfs://hadoop-master:9000
-
-
-
-
- hadoop.proxyuser.oozie.hosts
- *
-
-
- hadoop.proxyuser.oozie.groups
- *
-
-
-
-
- hadoop.proxyuser.httpfs.hosts
- *
-
-
- hadoop.proxyuser.httpfs.groups
- *
-
-
-
-
- hadoop.proxyuser.llama.hosts
- *
-
-
- hadoop.proxyuser.llama.groups
- *
-
-
-
-
- hadoop.proxyuser.hue.hosts
- *
-
-
- hadoop.proxyuser.hue.groups
- *
-
-
-
-
- hadoop.proxyuser.mapred.hosts
- *
-
-
- hadoop.proxyuser.mapred.groups
- *
-
-
-
-
- hadoop.proxyuser.hive.hosts
- *
-
-
-
- hadoop.proxyuser.hive.groups
- *
-
-
-
-
- hadoop.proxyuser.hdfs.groups
- *
-
-
-
- hadoop.proxyuser.hdfs.hosts
- *
-
-
-
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/hadoop-env.sh b/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/hadoop-env.sh
deleted file mode 100644
index 809937b4..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/hadoop-env.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hadoop-specific environment variables here.
-# Forcing YARN-based mapreduce implementaion.
-# Make sure to comment out if you want to go back to the default or
-# if you want this to be tweakable on a per-user basis
-# export HADOOP_MAPRED_HOME=/usr/lib/hadoop-mapreduce
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE=512
-
-# Extra Java runtime options. Empty by default.
-export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS -Xmx512m"
-export YARN_OPTS="$YARN_OPTS -Xmx256m"
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/hdfs-site.xml b/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/hdfs-site.xml
deleted file mode 100644
index 9d1e71aa..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/hdfs-site.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-
-
-
-
-
- dfs.namenode.name.dir
- /var/lib/hadoop-hdfs/cache/name/
-
-
-
- dfs.datanode.data.dir
- /var/lib/hadoop-hdfs/cache/data/
-
-
-
- fs.viewfs.mounttable.hadoop-viewfs.link./default
- hdfs://hadoop-master:9000/user/hive/warehouse
-
-
-
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/mapred-site.xml b/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/mapred-site.xml
deleted file mode 100644
index b99d8620..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/mapred-site.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-
-
-
-
-
- mapred.job.tracker
- hadoop-master:8021
-
-
-
- mapreduce.jobhistory.address
- hadoop-master:10020
-
-
-
- mapreduce.jobhistory.webapp.address
- hadoop-master:19888
-
-
-
- To set the value of tmp directory for map and reduce tasks.
- mapreduce.task.tmp.dir
- /var/lib/hadoop-mapreduce/cache/${user.name}/tasks
-
-
-
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/yarn-site.xml b/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/yarn-site.xml
deleted file mode 100644
index 5f3757c5..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/hadoop/conf/yarn-site.xml
+++ /dev/null
@@ -1,109 +0,0 @@
-
-
-
-
-
- yarn.nodemanager.aux-services
- mapreduce_shuffle
-
-
-
- yarn.nodemanager.aux-services.mapreduce_shuffle.class
- org.apache.hadoop.mapred.ShuffleHandler
-
-
-
- yarn.log-aggregation-enable
- true
-
-
-
- yarn.dispatcher.exit-on-error
- true
-
-
-
- List of directories to store localized files in.
- yarn.nodemanager.local-dirs
- /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir
-
-
-
- Where to store container logs.
- yarn.nodemanager.log-dirs
- /var/log/hadoop-yarn/containers
-
-
-
- Where to aggregate logs to.
- yarn.nodemanager.remote-app-log-dir
- /var/log/hadoop-yarn/apps
-
-
-
- Classpath for typical applications.
- yarn.application.classpath
-
- /etc/hadoop/conf,
- /usr/lib/hadoop/*,
- /usr/lib/hadoop/lib/*,
- /usr/lib/hadoop-hdfs/*,
- /usr/lib/hadoop-hdfs/lib/*,
- /usr/lib/hadoop-yarn/*,
- /usr/lib/hadoop-yarn/lib/*,
- /usr/lib/hadoop-mapreduce/*,
- /usr/lib/hadoop-mapreduce/lib/*
-
-
-
-
- yarn.resourcemanager.hostname
- hadoop-master
-
-
-
- yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage
- 100
-
-
-
- yarn.nodemanager.pmem-check-enabled
- false
-
-
-
- yarn.nodemanager.vmem-check-enabled
- false
-
-
-
- yarn.nodemanager.resource.memory.enforced
- false
-
-
-
- yarn.nodemanager.elastic-memory-control.enabled
- false
-
-
-
- yarn.log.server.url
- http://hadoop-master:19888/jobhistory/logs
-
-
-
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/hive/conf/hive-env.sh b/archived/cdh5.12-hive-jdk8/files/etc/hive/conf/hive-env.sh
deleted file mode 100644
index 034db442..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/hive/conf/hive-env.sh
+++ /dev/null
@@ -1 +0,0 @@
-export HADOOP_OPTS="$HADOOP_OPTS -Dhive.root.logger=INFO,console"
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/hive/conf/hive-site.xml b/archived/cdh5.12-hive-jdk8/files/etc/hive/conf/hive-site.xml
deleted file mode 100644
index 4dfaa2a5..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/hive/conf/hive-site.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-
-
-
-
-
- javax.jdo.option.ConnectionURL
- jdbc:mysql://localhost/metastore
- the URL of the MySQL database
-
-
-
- javax.jdo.option.ConnectionDriverName
- com.mysql.jdbc.Driver
-
-
-
- javax.jdo.option.ConnectionUserName
- root
-
-
-
- javax.jdo.option.ConnectionPassword
- root
-
-
-
- datanucleus.autoCreateSchema
- false
-
-
-
- datanucleus.fixedDatastore
- true
-
-
-
- datanucleus.autoStartMechanism
- SchemaTable
-
-
-
- hive.security.authorization.createtable.owner.grants
- ALL
- The set of privileges automatically granted to the owner whenever a table gets created.
-
-
-
- hive.users.in.admin.role
- hdfs,hive
-
-
-
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.conf b/archived/cdh5.12-hive-jdk8/files/etc/supervisord.conf
deleted file mode 100644
index 2ac8dba5..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.conf
+++ /dev/null
@@ -1,21 +0,0 @@
-[supervisord]
-logfile = /var/log/supervisord.log
-logfile_maxbytes = 50MB
-logfile_backups=10
-loglevel = info
-pidfile = /var/run/supervisord.pid
-nodaemon = true
-directory = /tmp
-strip_ansi = false
-
-[unix_http_server]
-file = /tmp/supervisor.sock
-
-[rpcinterface:supervisor]
-supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
-
-[supervisorctl]
-serverurl = unix:///tmp/supervisor.sock
-
-[include]
-files = /etc/supervisord.d/*.conf
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/hdfs-datanode.conf b/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/hdfs-datanode.conf
deleted file mode 100644
index 87fbcbed..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/hdfs-datanode.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hdfs-datanode]
-command=hdfs datanode
-startsecs=2
-stopwaitsecs=10
-user=hdfs
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-datanode.log
-autostart=true
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/hdfs-namenode.conf b/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/hdfs-namenode.conf
deleted file mode 100644
index 0d8de8db..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/hdfs-namenode.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hdfs-namenode]
-command=hdfs namenode
-startsecs=2
-stopwaitsecs=10
-user=hdfs
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-namenode.log
-autostart=true
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/hive-metastore.conf b/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/hive-metastore.conf
deleted file mode 100644
index f5aaf8ca..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/hive-metastore.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:hive-metastore]
-# Add `--debug:port=5006` for debugging
-command=hive --service metastore
-startsecs=2
-stopwaitsecs=10
-user=hive
-redirect_stderr=true
-stdout_logfile=/var/log/hive/hive-metastore.log
-autostart=true
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/hive-server2.conf b/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/hive-server2.conf
deleted file mode 100644
index d090496f..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/hive-server2.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hive-server2]
-command=hive --service hiveserver2
-startsecs=2
-stopwaitsecs=10
-user=hive
-redirect_stderr=true
-stdout_logfile=/var/log/hive/hive-server2.log
-autostart=true
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/mysql-metastore.conf b/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/mysql-metastore.conf
deleted file mode 100644
index e95544e5..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/mysql-metastore.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:mysql-metastore]
-command=/usr/bin/pidproxy /var/run/mysqld/mysqld.pid /usr/bin/mysqld_safe
-startsecs=2
-stopwaitsecs=10
-user=mysql
-redirect_stderr=true
-stdout_logfile=/var/log/mysql/mysql.log
-autostart=true
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/socks-proxy.conf b/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/socks-proxy.conf
deleted file mode 100644
index 43602d26..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/socks-proxy.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:socks-proxy]
-command=/usr/bin/ssh -o StrictHostKeyChecking=no -v -N -D 0.0.0.0:1180 localhost
-startsecs=2
-stopwaitsecs=10
-startretries=30
-user=root
-redirect_stderr=true
-stdout_logfile=/var/log/socks-proxy
-autostart=true
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/sshd.conf b/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/sshd.conf
deleted file mode 100644
index 3930b4c0..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/sshd.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:sshd]
-command=/usr/sbin/sshd -D -e
-startsecs=2
-stopwaitsecs=10
-startretries=30
-user=root
-redirect_stderr=true
-stdout_logfile=/var/log/sshd
-autostart=true
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/yarn-nodemanager.conf b/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/yarn-nodemanager.conf
deleted file mode 100644
index 8f81e3c7..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/yarn-nodemanager.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:yarn-nodemanager]
-command=yarn nodemanager
-startsecs=2
-stopwaitsecs=10
-user=yarn
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-yarn/hadoop-yarn-nodemanager.log
-autostart=true
diff --git a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/yarn-resourcemanager.conf b/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/yarn-resourcemanager.conf
deleted file mode 100644
index 2ef16026..00000000
--- a/archived/cdh5.12-hive-jdk8/files/etc/supervisord.d/yarn-resourcemanager.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:yarn-resourcemanager]
-command=yarn resourcemanager
-startsecs=2
-stopwaitsecs=10
-user=yarn
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-yarn/hadoop-yarn-resourcemanager.log
-autostart=true
diff --git a/archived/cdh5.12-hive-jdk8/files/root/setup.sh b/archived/cdh5.12-hive-jdk8/files/root/setup.sh
deleted file mode 100755
index 365f9ff4..00000000
--- a/archived/cdh5.12-hive-jdk8/files/root/setup.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash -ex
-
-# make file system hostname resolvable
-echo "127.0.0.1 hadoop-master" >> /etc/hosts
-
-# format namenode
-chown hdfs:hdfs /var/lib/hadoop-hdfs/cache/
-
-# workaround for 'could not open session' bug as suggested here:
-# https://github.com/docker/docker/issues/7056#issuecomment-49371610
-rm -f /etc/security/limits.d/hdfs.conf
-su -c "echo 'N' | hdfs namenode -format" hdfs
-
-# start hdfs
-su -c "hdfs datanode 2>&1 > /var/log/hadoop-hdfs/hadoop-hdfs-datanode.log" hdfs&
-su -c "hdfs namenode 2>&1 > /var/log/hadoop-hdfs/hadoop-hdfs-namenode.log" hdfs&
-
-# wait for process starting
-sleep 10
-
-# remove a broken symlink created by cdh installer so that init-hdfs.sh does no blow up on it
-# (hbase-annotations.jar seems not needed in our case)
-rm /usr/lib/hive/lib/hbase-annotations.jar
-
-# 4 exec cloudera hdfs init script
-/usr/lib/hadoop/libexec/init-hdfs.sh
-
-# init hive directories
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /user/hive/warehouse'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chmod 1777 /user/hive/warehouse'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chown hive /user/hive/warehouse'
-
-# stop hdfs
-killall java
-
-# setup metastore
-mysql_install_db
-
-/usr/bin/mysqld_safe &
-sleep 10s
-
-cd /usr/lib/hive/scripts/metastore/upgrade/mysql/
-echo "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION; FLUSH PRIVILEGES;" | mysql
-echo "CREATE DATABASE metastore; USE metastore; SOURCE hive-schema-1.1.0.mysql.sql;" | mysql
-/usr/bin/mysqladmin -u root password 'root'
-
-killall mysqld
-sleep 10s
-mkdir /var/log/mysql/
-chown mysql:mysql /var/log/mysql/
diff --git a/archived/cdh5.12-hive-kerberized/Dockerfile b/archived/cdh5.12-hive-kerberized/Dockerfile
deleted file mode 100644
index 2cc3c74f..00000000
--- a/archived/cdh5.12-hive-kerberized/Dockerfile
+++ /dev/null
@@ -1,95 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM testing/cdh5.12-hive:unlabelled
-
-# INSTALL KERBEROS
-RUN yum install -y krb5-libs krb5-server krb5-workstation
-
-# COPY CONFIGURATION
-COPY ./files /
-
-# Apply configuration overrides and remove them so they don't get reapplied
-RUN /usr/local/bin/apply-all-site-xml-overrides /overrides && rm -Rf /overrides
-
-# CREATE KERBEROS DATABASE
-RUN /usr/sbin/kdb5_util create -s -P password
-
-# ADD HADOOP PRINCIPALS
-RUN /usr/sbin/kadmin.local -q "addprinc -randkey hdfs/hadoop-master@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc -randkey mapred/hadoop-master@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc -randkey yarn/hadoop-master@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc -randkey HTTP/hadoop-master@LABS.TERADATA.COM"
-
-# CREATE HADOOP KEYTAB FILES
-RUN /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hadoop/conf/hdfs.keytab hdfs/hadoop-master HTTP/hadoop-master" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hadoop/conf/mapred.keytab mapred/hadoop-master HTTP/hadoop-master" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hadoop/conf/yarn.keytab yarn/hadoop-master HTTP/hadoop-master" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hadoop/conf/HTTP.keytab HTTP/hadoop-master"
-RUN chown hdfs:hadoop /etc/hadoop/conf/hdfs.keytab \
- && chown mapred:hadoop /etc/hadoop/conf/mapred.keytab \
- && chown yarn:hadoop /etc/hadoop/conf/yarn.keytab \
- && chown hdfs:hadoop /etc/hadoop/conf/HTTP.keytab \
- && chmod 644 /etc/hadoop/conf/*.keytab
-
-# CREATE HIVE PRINCIPAL AND KEYTAB
-RUN /usr/sbin/kadmin.local -q "addprinc -randkey hive/hadoop-master@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hive/conf/hive.keytab hive/hadoop-master"
-RUN chown hive:hadoop /etc/hive/conf/hive.keytab \
- && chmod 644 /etc/hive/conf/hive.keytab
-
-# YARN SECURITY SETTINGS
-RUN chmod 6050 /etc/hadoop/conf/container-executor.cfg
-
-# Create legacy Presto and updated Trino principals and add them to keytabs
-RUN set -xeu && \
- for hostname in presto-master trino-coordinator presto-worker trino-worker presto-worker-1 trino-worker-1 presto-worker-2 trino-worker-2; do \
- /usr/sbin/kadmin.local -q "addprinc -randkey presto-server/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc -randkey trino-server/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc -randkey HTTP/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc -randkey presto-client/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc -randkey trino-client/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc -randkey hive/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && mkdir -p /etc/trino/conf \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/presto-server.keytab presto-server/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/trino-server.keytab trino-server/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/presto-server-HTTP.keytab HTTP/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/trino-client.keytab trino-client/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/presto-client.keytab presto-client/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/hive-presto-master.keytab hive/${hostname}.docker.cluster"; \
- done && echo "OK" && \
- chmod 644 /etc/trino/conf/*.keytab
-
-# CREATE SSL KEYSTORE
-RUN keytool -genkeypair \
- -alias presto \
- -keyalg RSA \
- -keystore /etc/trino/conf/keystore.jks \
- -keypass password \
- -storepass password \
- -dname "CN=presto-master, OU=, O=, L=, S=, C=" \
- -validity 100000 && \
- keytool -genkeypair \
- -alias trino \
- -keyalg RSA \
- -keystore /etc/trino/conf/keystore.jks \
- -keypass password \
- -storepass password \
- -dname "CN=trino-coordinator, OU=, O=, L=, S=, C=" \
- -validity 100000
-RUN chmod 644 /etc/trino/conf/keystore.jks
-
-# EXPOSE KERBEROS PORTS
-EXPOSE 88
-EXPOSE 749
-
-CMD supervisord -c /etc/supervisord.conf
diff --git a/archived/cdh5.12-hive-kerberized/files/etc/hadoop/conf/container-executor.cfg b/archived/cdh5.12-hive-kerberized/files/etc/hadoop/conf/container-executor.cfg
deleted file mode 100644
index b790f86a..00000000
--- a/archived/cdh5.12-hive-kerberized/files/etc/hadoop/conf/container-executor.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-yarn.nodemanager.local-dirs=/var/lib/hadoop-yarn/cache/yarn/nm-local-dir
-yarn.nodemanager.linux-container-executor.group=yarn
-yarn.nodemanager.log-dirs=/var/log/hadoop-yarn/containers
-banned.users=yarn,mapred,bin
-min.user.id=400
diff --git a/archived/cdh5.12-hive-kerberized/files/etc/hadoop/conf/taskcontroller.cfg b/archived/cdh5.12-hive-kerberized/files/etc/hadoop/conf/taskcontroller.cfg
deleted file mode 100644
index 4f09350d..00000000
--- a/archived/cdh5.12-hive-kerberized/files/etc/hadoop/conf/taskcontroller.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-hadoop.log.dir=/var/log/hadoop-mapreduce
-mapreduce.tasktracker.group=mapred
-banned.users=mapred,bin
-min.user.id=400
diff --git a/archived/cdh5.12-hive-kerberized/files/etc/hive/conf/hiveserver2-site.xml b/archived/cdh5.12-hive-kerberized/files/etc/hive/conf/hiveserver2-site.xml
deleted file mode 100644
index 41280243..00000000
--- a/archived/cdh5.12-hive-kerberized/files/etc/hive/conf/hiveserver2-site.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-
-
-
-
-
-
- hive.security.authenticator.manager
- org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator
-
-
-
-
- hive.security.authorization.manager
- org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory
- SQL standards based Hive authorization
-
-
-
- hive.security.authorization.enabled
- true
-
-
-
diff --git a/archived/cdh5.12-hive-kerberized/files/etc/krb5.conf b/archived/cdh5.12-hive-kerberized/files/etc/krb5.conf
deleted file mode 100644
index 699ebc6b..00000000
--- a/archived/cdh5.12-hive-kerberized/files/etc/krb5.conf
+++ /dev/null
@@ -1,17 +0,0 @@
-[logging]
- default = FILE:/var/log/krb5libs.log
- kdc = FILE:/var/log/krb5kdc.log
- admin_server = FILE:/var/log/kadmind.log
-
-[libdefaults]
- default_realm = LABS.TERADATA.COM
- dns_lookup_realm = false
- dns_lookup_kdc = false
- forwardable = true
- allow_weak_crypto = true
-
-[realms]
- LABS.TERADATA.COM = {
- kdc = hadoop-master
- admin_server = hadoop-master
- }
diff --git a/archived/cdh5.12-hive-kerberized/files/etc/supervisord.d/kdc.conf b/archived/cdh5.12-hive-kerberized/files/etc/supervisord.d/kdc.conf
deleted file mode 100644
index 93f87dc4..00000000
--- a/archived/cdh5.12-hive-kerberized/files/etc/supervisord.d/kdc.conf
+++ /dev/null
@@ -1,15 +0,0 @@
-[program:krb5kdc]
-command=/bin/bash -c "exec /usr/sbin/krb5kdc -r LABS.TERADATA.COM -P /var/run/krb5kdc.pid -n"
-autostart=true
-autorestart=true
-redirect_stderr=true
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
-
-[program:kadmind]
-command=/bin/bash -c "exec /usr/sbin/kadmind -r LABS.TERADATA.COM -P /var/run/kadmind.pid -nofork"
-autostart=true
-autorestart=true
-redirect_stderr=true
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
diff --git a/archived/cdh5.12-hive-kerberized/files/overrides/etc/hadoop/conf/core-site.xml b/archived/cdh5.12-hive-kerberized/files/overrides/etc/hadoop/conf/core-site.xml
deleted file mode 100644
index 6b7143be..00000000
--- a/archived/cdh5.12-hive-kerberized/files/overrides/etc/hadoop/conf/core-site.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-
-
-
-
-
-
- hadoop.proxyuser.presto-server.groups
- *
-
-
-
- hadoop.proxyuser.presto-server.hosts
- *
-
-
-
-
- hadoop.security.authentication
- kerberos
-
-
-
- hadoop.security.authorization
- true
-
-
-
diff --git a/archived/cdh5.12-hive-kerberized/files/overrides/etc/hadoop/conf/hdfs-site.xml b/archived/cdh5.12-hive-kerberized/files/overrides/etc/hadoop/conf/hdfs-site.xml
deleted file mode 100644
index dff383ab..00000000
--- a/archived/cdh5.12-hive-kerberized/files/overrides/etc/hadoop/conf/hdfs-site.xml
+++ /dev/null
@@ -1,94 +0,0 @@
-
-
-
-
-
-
- dfs.block.access.token.enable
- true
-
-
-
-
- dfs.namenode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
-
- dfs.namenode.kerberos.principal
- hdfs/hadoop-master@LABS.TERADATA.COM
-
-
-
- dfs.namenode.kerberos.internal.spnego.principal
- HTTP/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- dfs.secondary.namenode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
-
- dfs.secondary.namenode.kerberos.principal
- hdfs/hadoop-master@LABS.TERADATA.COM
-
-
-
- dfs.secondary.namenode.kerberos.internal.spnego.principal
- HTTP/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- dfs.datanode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
-
- dfs.datanode.kerberos.principal
- hdfs/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- dfs.webhdfs.enabled
- true
-
-
-
-
- dfs.web.authentication.kerberos.principal
- HTTP/hadoop-master@LABS.TERADATA.COM
-
-
-
- dfs.web.authentication.kerberos.keytab
- /etc/hadoop/conf/HTTP.keytab
-
-
-
- ignore.secure.ports.for.testing
- true
-
-
-
- dfs.http.policy
- HTTP_ONLY
-
-
-
diff --git a/archived/cdh5.12-hive-kerberized/files/overrides/etc/hadoop/conf/mapred-site.xml b/archived/cdh5.12-hive-kerberized/files/overrides/etc/hadoop/conf/mapred-site.xml
deleted file mode 100644
index be1d4224..00000000
--- a/archived/cdh5.12-hive-kerberized/files/overrides/etc/hadoop/conf/mapred-site.xml
+++ /dev/null
@@ -1,63 +0,0 @@
-
-
-
-
-
-
- mapreduce.jobhistory.keytab
- /etc/hadoop/conf/mapred.keytab
-
-
-
- mapreduce.jobhistory.principal
- mapred/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- mapreduce.jobtracker.kerberos.principal
- mapred/hadoop-master@LABS.TERADATA.COM
-
-
-
- mapreduce.jobtracker.keytab.file
- /etc/hadoop/conf/mapred.keytab
-
-
-
-
- mapreduce.tasktracker.kerberos.principal
- mapred/hadoop-master@LABS.TERADATA.COM
-
-
-
- mapreduce.tasktracker.keytab.file
- /etc/hadoop/conf/mapred.keytab
-
-
-
- mapred.task.tracker.task-controller
- org.apache.hadoop.mapred.LinuxTaskController
-
-
-
- mapreduce.tasktracker.group
- mapred
-
-
-
diff --git a/archived/cdh5.12-hive-kerberized/files/overrides/etc/hadoop/conf/yarn-site.xml b/archived/cdh5.12-hive-kerberized/files/overrides/etc/hadoop/conf/yarn-site.xml
deleted file mode 100644
index 8be503a4..00000000
--- a/archived/cdh5.12-hive-kerberized/files/overrides/etc/hadoop/conf/yarn-site.xml
+++ /dev/null
@@ -1,58 +0,0 @@
-
-
-
-
-
-
-
- yarn.resourcemanager.keytab
- /etc/hadoop/conf/yarn.keytab
-
-
-
- yarn.resourcemanager.principal
- yarn/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- yarn.nodemanager.keytab
- /etc/hadoop/conf/yarn.keytab
-
-
-
- yarn.nodemanager.principal
- yarn/hadoop-master@LABS.TERADATA.COM
-
-
-
- yarn.nodemanager.container-executor.class
- org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor
-
-
-
- yarn.nodemanager.linux-container-executor.path
- /usr/lib/hadoop-yarn/bin/container-executor
-
-
-
- yarn.nodemanager.linux-container-executor.group
- yarn
-
-
-
diff --git a/archived/cdh5.12-hive-kerberized/files/overrides/etc/hive/conf/hive-site.xml b/archived/cdh5.12-hive-kerberized/files/overrides/etc/hive/conf/hive-site.xml
deleted file mode 100644
index 5039bc29..00000000
--- a/archived/cdh5.12-hive-kerberized/files/overrides/etc/hive/conf/hive-site.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-
-
-
-
-
-
- hive.server2.authentication
- KERBEROS
-
-
-
- hive.server2.enable.impersonation
- true
-
-
-
- hive.server2.authentication.kerberos.principal
- hive/hadoop-master@LABS.TERADATA.COM
-
-
-
- hive.server2.authentication.kerberos.keytab
- /etc/hive/conf/hive.keytab
-
-
-
- hive.metastore.sasl.enabled
- true
-
-
-
- hive.metastore.kerberos.keytab.file
- /etc/hive/conf/hive.keytab
-
-
-
- hive.metastore.kerberos.principal
- hive/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- hive.security.authorization.manager
- org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory
-
-
-
- hive.security.authorization.task.factory
- org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl
-
-
-
diff --git a/archived/cdh5.12-hive-kerberized/files/var/kerberos/krb5kdc/kadm5.acl b/archived/cdh5.12-hive-kerberized/files/var/kerberos/krb5kdc/kadm5.acl
deleted file mode 100644
index 0530526a..00000000
--- a/archived/cdh5.12-hive-kerberized/files/var/kerberos/krb5kdc/kadm5.acl
+++ /dev/null
@@ -1 +0,0 @@
-*/admin@LABS.TERADATA.COM *
diff --git a/archived/cdh5.12-hive-kerberized/files/var/kerberos/krb5kdc/kdc.conf b/archived/cdh5.12-hive-kerberized/files/var/kerberos/krb5kdc/kdc.conf
deleted file mode 100644
index 2df4f49e..00000000
--- a/archived/cdh5.12-hive-kerberized/files/var/kerberos/krb5kdc/kdc.conf
+++ /dev/null
@@ -1,12 +0,0 @@
-[kdcdefaults]
- kdc_ports = 88
- kdc_tcp_ports = 88
-
-[realms]
- LABS.TERADATA.COM = {
- #master_key_type = aes256-cts
- acl_file = /var/kerberos/krb5kdc/kadm5.acl
- dict_file = /usr/share/dict/words
- admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
- supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
- }
diff --git a/archived/cdh5.12-hive/Dockerfile b/archived/cdh5.12-hive/Dockerfile
deleted file mode 100644
index a14b3929..00000000
--- a/archived/cdh5.12-hive/Dockerfile
+++ /dev/null
@@ -1,43 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Cloudera removed an access to CDH5 repositories in April 2021.
-# In order to build new image version we use last released version as base.
-# Previous Dockerfile version is archived in archived/cdh5.12-hive-jdk8.
-FROM ghcr.io/trinodb/testing/cdh5.12-hive:38
-
-COPY ./files /
-
-RUN \
- set -xeu && \
- # Remove unaccessible CDH5 repos so yum is still usable
- rm /etc/yum.repos.d/cloudera-cdh5.repo && \
- # Install Zulu JDK 17.0.4
- rpm -i https://cdn.azul.com/zulu/bin/zulu17.36.13-ca-jdk17.0.4-linux.x86_64.rpm && \
- # Set JDK 17 as a default one
- alternatives --set java /usr/lib/jvm/zulu-17/bin/java && \
- alternatives --set javac /usr/lib/jvm/zulu-17/bin/javac && \
- echo "Done"
-
-# HDFS ports
-EXPOSE 1004 1006 8020 50010 50020 50070 50075 50470
-
-# YARN ports
-EXPOSE 8030 8031 8032 8033 8040 8041 8042 8088 10020 19888
-
-# HIVE port
-EXPOSE 9083 10000
-
-# SOCKS port
-EXPOSE 1180
-
-CMD supervisord -c /etc/supervisord.conf
diff --git a/archived/cdh5.12-hive/files/etc/hadoop/conf/capacity-scheduler.xml b/archived/cdh5.12-hive/files/etc/hadoop/conf/capacity-scheduler.xml
deleted file mode 100644
index 0e15fb6b..00000000
--- a/archived/cdh5.12-hive/files/etc/hadoop/conf/capacity-scheduler.xml
+++ /dev/null
@@ -1,100 +0,0 @@
-
-
-
-
-
- yarn.scheduler.capacity.maximum-applications
- 10000
-
- Maximum number of applications that can be pending and running.
-
-
-
-
- yarn.scheduler.capacity.maximum-am-resource-percent
- 1
-
- Maximum percent of resources in the cluster which can be used to run
- application masters i.e. controls number of concurrent running
- applications.
-
-
-
-
- yarn.scheduler.capacity.root.queues
- default
-
- The queues at the this level (root is the root queue).
-
-
-
-
- yarn.scheduler.capacity.root.default.capacity
- 100
- Default queue target capacity.
-
-
-
- yarn.scheduler.capacity.root.default.maximum-capacity
- 100
-
- The maximum capacity of the default queue.
-
-
-
-
- yarn.scheduler.capacity.root.default.state
- RUNNING
-
- The state of the default queue. State can be one of RUNNING or STOPPED.
-
-
-
-
- yarn.scheduler.capacity.root.default.acl_submit_applications
- *
-
- The ACL of who can submit jobs to the default queue.
-
-
-
-
- yarn.scheduler.capacity.root.default.user-limit-factor
- 1
-
- Default queue user limit a percentage from 0.0 to 1.0.
-
-
-
-
- yarn.scheduler.capacity.root.default.acl_administer_queue
- *
-
- The ACL of who can administer jobs on the default queue.
-
-
-
-
- yarn.scheduler.capacity.node-locality-delay
- -1
-
- Number of missed scheduling opportunities after which the CapacityScheduler
- attempts to schedule rack-local containers.
- Typically this should be set to number of racks in the cluster, this
- feature is disabled by default, set to -1.
-
-
-
-
diff --git a/archived/cdh5.12-hive/files/etc/hadoop/conf/core-site.xml b/archived/cdh5.12-hive/files/etc/hadoop/conf/core-site.xml
deleted file mode 100644
index 4dbdd062..00000000
--- a/archived/cdh5.12-hive/files/etc/hadoop/conf/core-site.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-
-
-
-
-
- fs.defaultFS
- hdfs://hadoop-master:9000
-
-
-
-
- hadoop.proxyuser.oozie.hosts
- *
-
-
- hadoop.proxyuser.oozie.groups
- *
-
-
-
-
- hadoop.proxyuser.httpfs.hosts
- *
-
-
- hadoop.proxyuser.httpfs.groups
- *
-
-
-
-
- hadoop.proxyuser.llama.hosts
- *
-
-
- hadoop.proxyuser.llama.groups
- *
-
-
-
-
- hadoop.proxyuser.hue.hosts
- *
-
-
- hadoop.proxyuser.hue.groups
- *
-
-
-
-
- hadoop.proxyuser.mapred.hosts
- *
-
-
- hadoop.proxyuser.mapred.groups
- *
-
-
-
-
- hadoop.proxyuser.hive.hosts
- *
-
-
-
- hadoop.proxyuser.hive.groups
- *
-
-
-
-
- hadoop.proxyuser.hdfs.groups
- *
-
-
-
- hadoop.proxyuser.hdfs.hosts
- *
-
-
-
diff --git a/archived/cdh5.12-hive/files/etc/hadoop/conf/hadoop-env.sh b/archived/cdh5.12-hive/files/etc/hadoop/conf/hadoop-env.sh
deleted file mode 100644
index 809937b4..00000000
--- a/archived/cdh5.12-hive/files/etc/hadoop/conf/hadoop-env.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hadoop-specific environment variables here.
-# Forcing YARN-based mapreduce implementaion.
-# Make sure to comment out if you want to go back to the default or
-# if you want this to be tweakable on a per-user basis
-# export HADOOP_MAPRED_HOME=/usr/lib/hadoop-mapreduce
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE=512
-
-# Extra Java runtime options. Empty by default.
-export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS -Xmx512m"
-export YARN_OPTS="$YARN_OPTS -Xmx256m"
diff --git a/archived/cdh5.12-hive/files/etc/hadoop/conf/hdfs-site.xml b/archived/cdh5.12-hive/files/etc/hadoop/conf/hdfs-site.xml
deleted file mode 100644
index 9d1e71aa..00000000
--- a/archived/cdh5.12-hive/files/etc/hadoop/conf/hdfs-site.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-
-
-
-
-
- dfs.namenode.name.dir
- /var/lib/hadoop-hdfs/cache/name/
-
-
-
- dfs.datanode.data.dir
- /var/lib/hadoop-hdfs/cache/data/
-
-
-
- fs.viewfs.mounttable.hadoop-viewfs.link./default
- hdfs://hadoop-master:9000/user/hive/warehouse
-
-
-
diff --git a/archived/cdh5.12-hive/files/etc/hadoop/conf/mapred-site.xml b/archived/cdh5.12-hive/files/etc/hadoop/conf/mapred-site.xml
deleted file mode 100644
index b99d8620..00000000
--- a/archived/cdh5.12-hive/files/etc/hadoop/conf/mapred-site.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-
-
-
-
-
- mapred.job.tracker
- hadoop-master:8021
-
-
-
- mapreduce.jobhistory.address
- hadoop-master:10020
-
-
-
- mapreduce.jobhistory.webapp.address
- hadoop-master:19888
-
-
-
- To set the value of tmp directory for map and reduce tasks.
- mapreduce.task.tmp.dir
- /var/lib/hadoop-mapreduce/cache/${user.name}/tasks
-
-
-
diff --git a/archived/cdh5.12-hive/files/etc/hadoop/conf/yarn-site.xml b/archived/cdh5.12-hive/files/etc/hadoop/conf/yarn-site.xml
deleted file mode 100644
index 5f3757c5..00000000
--- a/archived/cdh5.12-hive/files/etc/hadoop/conf/yarn-site.xml
+++ /dev/null
@@ -1,109 +0,0 @@
-
-
-
-
-
- yarn.nodemanager.aux-services
- mapreduce_shuffle
-
-
-
- yarn.nodemanager.aux-services.mapreduce_shuffle.class
- org.apache.hadoop.mapred.ShuffleHandler
-
-
-
- yarn.log-aggregation-enable
- true
-
-
-
- yarn.dispatcher.exit-on-error
- true
-
-
-
- List of directories to store localized files in.
- yarn.nodemanager.local-dirs
- /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir
-
-
-
- Where to store container logs.
- yarn.nodemanager.log-dirs
- /var/log/hadoop-yarn/containers
-
-
-
- Where to aggregate logs to.
- yarn.nodemanager.remote-app-log-dir
- /var/log/hadoop-yarn/apps
-
-
-
- Classpath for typical applications.
- yarn.application.classpath
-
- /etc/hadoop/conf,
- /usr/lib/hadoop/*,
- /usr/lib/hadoop/lib/*,
- /usr/lib/hadoop-hdfs/*,
- /usr/lib/hadoop-hdfs/lib/*,
- /usr/lib/hadoop-yarn/*,
- /usr/lib/hadoop-yarn/lib/*,
- /usr/lib/hadoop-mapreduce/*,
- /usr/lib/hadoop-mapreduce/lib/*
-
-
-
-
- yarn.resourcemanager.hostname
- hadoop-master
-
-
-
- yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage
- 100
-
-
-
- yarn.nodemanager.pmem-check-enabled
- false
-
-
-
- yarn.nodemanager.vmem-check-enabled
- false
-
-
-
- yarn.nodemanager.resource.memory.enforced
- false
-
-
-
- yarn.nodemanager.elastic-memory-control.enabled
- false
-
-
-
- yarn.log.server.url
- http://hadoop-master:19888/jobhistory/logs
-
-
-
diff --git a/archived/cdh5.12-hive/files/etc/hive/conf/hive-env.sh b/archived/cdh5.12-hive/files/etc/hive/conf/hive-env.sh
deleted file mode 100644
index 034db442..00000000
--- a/archived/cdh5.12-hive/files/etc/hive/conf/hive-env.sh
+++ /dev/null
@@ -1 +0,0 @@
-export HADOOP_OPTS="$HADOOP_OPTS -Dhive.root.logger=INFO,console"
diff --git a/archived/cdh5.12-hive/files/etc/hive/conf/hive-site.xml b/archived/cdh5.12-hive/files/etc/hive/conf/hive-site.xml
deleted file mode 100644
index 4dfaa2a5..00000000
--- a/archived/cdh5.12-hive/files/etc/hive/conf/hive-site.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-
-
-
-
-
- javax.jdo.option.ConnectionURL
- jdbc:mysql://localhost/metastore
- the URL of the MySQL database
-
-
-
- javax.jdo.option.ConnectionDriverName
- com.mysql.jdbc.Driver
-
-
-
- javax.jdo.option.ConnectionUserName
- root
-
-
-
- javax.jdo.option.ConnectionPassword
- root
-
-
-
- datanucleus.autoCreateSchema
- false
-
-
-
- datanucleus.fixedDatastore
- true
-
-
-
- datanucleus.autoStartMechanism
- SchemaTable
-
-
-
- hive.security.authorization.createtable.owner.grants
- ALL
- The set of privileges automatically granted to the owner whenever a table gets created.
-
-
-
- hive.users.in.admin.role
- hdfs,hive
-
-
-
diff --git a/archived/cdh5.12-hive/files/etc/supervisord.conf b/archived/cdh5.12-hive/files/etc/supervisord.conf
deleted file mode 100644
index 2ac8dba5..00000000
--- a/archived/cdh5.12-hive/files/etc/supervisord.conf
+++ /dev/null
@@ -1,21 +0,0 @@
-[supervisord]
-logfile = /var/log/supervisord.log
-logfile_maxbytes = 50MB
-logfile_backups=10
-loglevel = info
-pidfile = /var/run/supervisord.pid
-nodaemon = true
-directory = /tmp
-strip_ansi = false
-
-[unix_http_server]
-file = /tmp/supervisor.sock
-
-[rpcinterface:supervisor]
-supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
-
-[supervisorctl]
-serverurl = unix:///tmp/supervisor.sock
-
-[include]
-files = /etc/supervisord.d/*.conf
diff --git a/archived/cdh5.12-hive/files/etc/supervisord.d/hdfs-datanode.conf b/archived/cdh5.12-hive/files/etc/supervisord.d/hdfs-datanode.conf
deleted file mode 100644
index 87fbcbed..00000000
--- a/archived/cdh5.12-hive/files/etc/supervisord.d/hdfs-datanode.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hdfs-datanode]
-command=hdfs datanode
-startsecs=2
-stopwaitsecs=10
-user=hdfs
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-datanode.log
-autostart=true
diff --git a/archived/cdh5.12-hive/files/etc/supervisord.d/hdfs-namenode.conf b/archived/cdh5.12-hive/files/etc/supervisord.d/hdfs-namenode.conf
deleted file mode 100644
index 0d8de8db..00000000
--- a/archived/cdh5.12-hive/files/etc/supervisord.d/hdfs-namenode.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hdfs-namenode]
-command=hdfs namenode
-startsecs=2
-stopwaitsecs=10
-user=hdfs
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-namenode.log
-autostart=true
diff --git a/archived/cdh5.12-hive/files/etc/supervisord.d/hive-metastore.conf b/archived/cdh5.12-hive/files/etc/supervisord.d/hive-metastore.conf
deleted file mode 100644
index f5aaf8ca..00000000
--- a/archived/cdh5.12-hive/files/etc/supervisord.d/hive-metastore.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:hive-metastore]
-# Add `--debug:port=5006` for debugging
-command=hive --service metastore
-startsecs=2
-stopwaitsecs=10
-user=hive
-redirect_stderr=true
-stdout_logfile=/var/log/hive/hive-metastore.log
-autostart=true
diff --git a/archived/cdh5.12-hive/files/etc/supervisord.d/hive-server2.conf b/archived/cdh5.12-hive/files/etc/supervisord.d/hive-server2.conf
deleted file mode 100644
index d090496f..00000000
--- a/archived/cdh5.12-hive/files/etc/supervisord.d/hive-server2.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hive-server2]
-command=hive --service hiveserver2
-startsecs=2
-stopwaitsecs=10
-user=hive
-redirect_stderr=true
-stdout_logfile=/var/log/hive/hive-server2.log
-autostart=true
diff --git a/archived/cdh5.12-hive/files/etc/supervisord.d/mysql-metastore.conf b/archived/cdh5.12-hive/files/etc/supervisord.d/mysql-metastore.conf
deleted file mode 100644
index e95544e5..00000000
--- a/archived/cdh5.12-hive/files/etc/supervisord.d/mysql-metastore.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:mysql-metastore]
-command=/usr/bin/pidproxy /var/run/mysqld/mysqld.pid /usr/bin/mysqld_safe
-startsecs=2
-stopwaitsecs=10
-user=mysql
-redirect_stderr=true
-stdout_logfile=/var/log/mysql/mysql.log
-autostart=true
diff --git a/archived/cdh5.12-hive/files/etc/supervisord.d/socks-proxy.conf b/archived/cdh5.12-hive/files/etc/supervisord.d/socks-proxy.conf
deleted file mode 100644
index 43602d26..00000000
--- a/archived/cdh5.12-hive/files/etc/supervisord.d/socks-proxy.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:socks-proxy]
-command=/usr/bin/ssh -o StrictHostKeyChecking=no -v -N -D 0.0.0.0:1180 localhost
-startsecs=2
-stopwaitsecs=10
-startretries=30
-user=root
-redirect_stderr=true
-stdout_logfile=/var/log/socks-proxy
-autostart=true
diff --git a/archived/cdh5.12-hive/files/etc/supervisord.d/sshd.conf b/archived/cdh5.12-hive/files/etc/supervisord.d/sshd.conf
deleted file mode 100644
index 3930b4c0..00000000
--- a/archived/cdh5.12-hive/files/etc/supervisord.d/sshd.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:sshd]
-command=/usr/sbin/sshd -D -e
-startsecs=2
-stopwaitsecs=10
-startretries=30
-user=root
-redirect_stderr=true
-stdout_logfile=/var/log/sshd
-autostart=true
diff --git a/archived/cdh5.12-hive/files/etc/supervisord.d/yarn-nodemanager.conf b/archived/cdh5.12-hive/files/etc/supervisord.d/yarn-nodemanager.conf
deleted file mode 100644
index 8f81e3c7..00000000
--- a/archived/cdh5.12-hive/files/etc/supervisord.d/yarn-nodemanager.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:yarn-nodemanager]
-command=yarn nodemanager
-startsecs=2
-stopwaitsecs=10
-user=yarn
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-yarn/hadoop-yarn-nodemanager.log
-autostart=true
diff --git a/archived/cdh5.12-hive/files/etc/supervisord.d/yarn-resourcemanager.conf b/archived/cdh5.12-hive/files/etc/supervisord.d/yarn-resourcemanager.conf
deleted file mode 100644
index 2ef16026..00000000
--- a/archived/cdh5.12-hive/files/etc/supervisord.d/yarn-resourcemanager.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:yarn-resourcemanager]
-command=yarn resourcemanager
-startsecs=2
-stopwaitsecs=10
-user=yarn
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-yarn/hadoop-yarn-resourcemanager.log
-autostart=true
diff --git a/archived/cdh5.12-hive/files/root/setup.sh b/archived/cdh5.12-hive/files/root/setup.sh
deleted file mode 100755
index 9391f99f..00000000
--- a/archived/cdh5.12-hive/files/root/setup.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash -ex
-
-# format namenode
-chown hdfs:hdfs /var/lib/hadoop-hdfs/cache/
-
-# workaround for 'could not open session' bug as suggested here:
-# https://github.com/docker/docker/issues/7056#issuecomment-49371610
-rm -f /etc/security/limits.d/hdfs.conf
-su -c "echo 'N' | hdfs namenode -format" hdfs
-
-# start hdfs
-su -c "hdfs datanode 2>&1 > /var/log/hadoop-hdfs/hadoop-hdfs-datanode.log" hdfs&
-su -c "hdfs namenode 2>&1 > /var/log/hadoop-hdfs/hadoop-hdfs-namenode.log" hdfs&
-
-# wait for process starting
-sleep 10
-
-# remove a broken symlink created by cdh installer so that init-hdfs.sh does no blow up on it
-# (hbase-annotations.jar seems not needed in our case)
-rm /usr/lib/hive/lib/hbase-annotations.jar
-
-# 4 exec cloudera hdfs init script
-/usr/lib/hadoop/libexec/init-hdfs.sh
-
-# init hive directories
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /user/hive/warehouse'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chmod 1777 /user/hive/warehouse'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chown hive /user/hive/warehouse'
-
-# stop hdfs
-killall java
-
-# setup metastore
-mysql_install_db
-
-/usr/bin/mysqld_safe &
-sleep 10s
-
-cd /usr/lib/hive/scripts/metastore/upgrade/mysql/
-echo "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION; FLUSH PRIVILEGES;" | mysql
-echo "CREATE DATABASE metastore; USE metastore; SOURCE hive-schema-1.1.0.mysql.sql;" | mysql
-/usr/bin/mysqladmin -u root password 'root'
-
-killall mysqld
-sleep 10s
-mkdir /var/log/mysql/
-chown mysql:mysql /var/log/mysql/
diff --git a/archived/cdh5.15-hive-kerberized-kms/Dockerfile b/archived/cdh5.15-hive-kerberized-kms/Dockerfile
deleted file mode 100644
index 61050884..00000000
--- a/archived/cdh5.15-hive-kerberized-kms/Dockerfile
+++ /dev/null
@@ -1,49 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM testing/cdh5.15-hive-kerberized:unlabelled
-
-RUN set -xeu \
- && yum install -y \
- hadoop-kms \
- # Cleanup
- && yum -y clean all && rm -rf /tmp/* /var/tmp/*
-
-# COPY CONFIGURATION
-COPY ./files /
-
-# add users and group for testing purposes
-RUN set -xeu && \
- for username in alice bob charlie; do \
- groupadd "${username}_group" && \
- useradd -g "${username}_group" "${username}" && \
- /usr/sbin/kadmin.local -q "addprinc -randkey ${username}/hadoop-master@LABS.TERADATA.COM" && \
- /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hive/conf/${username}.keytab ${username}/hadoop-master"; \
- done && \
- echo OK
-
-RUN set -x && \
- install --directory --owner=kms --group=kms /var/run/hadoop-kms && \
- # $JAVA_HOME/jre/lib/security/java.security is used by default and in our Java it prevents KMS code from accessing its own keystore
- sed -e 's@-Dcatalina.base="$CATALINA_BASE"@\0 -Djceks.key.serialFilter="**"@' -i /usr/lib/bigtop-tomcat/bin/catalina.sh && \
- /root/setup_kms.sh && \
- # Purge Kerberos credential cache of root user
- kdestroy && \
- echo OK
-
-RUN set -x && \
- find /var/log -type f -name \*.log -printf "truncate %p\n" -exec truncate --size 0 {} \; && \
- # Purge /tmp, this includes credential caches of other users
- find /tmp -mindepth 1 -maxdepth 1 -exec rm -rf {} + && \
- echo OK
-
-CMD supervisord -c /etc/supervisord.conf
diff --git a/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/core-site.xml b/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/core-site.xml
deleted file mode 100644
index ddf829a1..00000000
--- a/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/core-site.xml
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-
-
- fs.defaultFS
- hdfs://hadoop-master:9000
-
-
-
diff --git a/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/kms-acls.xml b/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/kms-acls.xml
deleted file mode 100644
index 0a31b900..00000000
--- a/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/kms-acls.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-
-
-
- default.key.acl.ALL
- *
-
-
-
- default.key.acl.MANAGEMENT
- *
-
-
-
- default.key.acl.READ
- *
-
-
-
- default.key.acl.GENERATE_EEK
- *
-
-
-
- default.key.acl.DECRYPT_EEK
- *
-
-
-
diff --git a/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/kms-site.xml b/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/kms-site.xml
deleted file mode 100644
index 52ee4b51..00000000
--- a/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/kms-site.xml
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
-
-
- hadoop.kms.key.provider.uri
- jceks://file@/${user.home}/kms.keystore
-
-
-
- hadoop.kms.authentication.type
- kerberos
-
-
-
- hadoop.kms.authentication.kerberos.keytab
- /etc/hadoop/conf/HTTP.keytab
-
-
-
- hadoop.kms.authentication.kerberos.principal
- HTTP/hadoop-master
-
-
-
- hadoop.kms.authentication.kerberos.name.rules
- DEFAULT
-
-
-
diff --git a/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/passwordfile b/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/passwordfile
deleted file mode 100644
index 7d8381bf..00000000
--- a/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/passwordfile
+++ /dev/null
@@ -1 +0,0 @@
-abc1234
diff --git a/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/core-site.xml b/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/core-site.xml
deleted file mode 100644
index 32c85a75..00000000
--- a/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/core-site.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-
-
-
-
- fs.defaultFS
- hdfs://hadoop-master:9000
-
-
-
-
- hadoop.proxyuser.httpfs.hosts
- *
-
-
-
- hadoop.proxyuser.httpfs.groups
- *
-
-
-
-
- hadoop.proxyuser.hive.hosts
- *
-
-
-
- hadoop.proxyuser.hive.groups
- *
-
-
-
-
- hadoop.proxyuser.hdfs.groups
- *
-
-
-
- hadoop.proxyuser.hdfs.hosts
- *
-
-
-
-
- hadoop.proxyuser.trino-server.groups
- *
-
-
-
- hadoop.proxyuser.trino-server.hosts
- *
-
-
-
-
- hadoop.security.authentication
- kerberos
-
-
-
- hadoop.security.authorization
- true
-
-
-
-
- hadoop.security.key.provider.path
- kms://http@hadoop-master:16000/kms
-
-
-
- dfs.encryption.key.provider.uri
- kms://http@hadoop-master:16000/kms
-
-
-
diff --git a/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/hdfs-site.xml b/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/hdfs-site.xml
deleted file mode 100644
index 185d814f..00000000
--- a/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/hdfs-site.xml
+++ /dev/null
@@ -1,123 +0,0 @@
-
-
-
-
-
- dfs.namenode.name.dir
- /var/lib/hadoop-hdfs/cache/name/
-
-
-
- dfs.datanode.data.dir
- /var/lib/hadoop-hdfs/cache/data/
-
-
-
- fs.viewfs.mounttable.hadoop-viewfs.link./default
- hdfs://hadoop-master:9000/user/hive/warehouse
-
-
-
-
- dfs.block.access.token.enable
- true
-
-
-
-
- dfs.namenode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
-
-
- dfs.namenode.kerberos.principal
- hdfs/hadoop-master@LABS.TERADATA.COM
-
-
-
- dfs.namenode.kerberos.internal.spnego.principal
- HTTP/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- dfs.secondary.namenode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
-
-
- dfs.secondary.namenode.kerberos.principal
- hdfs/hadoop-master@LABS.TERADATA.COM
-
-
-
- dfs.secondary.namenode.kerberos.internal.spnego.principal
- HTTP/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- dfs.datanode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
-
-
- dfs.datanode.kerberos.principal
- hdfs/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- dfs.webhdfs.enabled
- true
-
-
-
-
- dfs.web.authentication.kerberos.principal
- HTTP/hadoop-master@LABS.TERADATA.COM
-
-
-
- dfs.web.authentication.kerberos.keytab
- /etc/hadoop/conf/HTTP.keytab
-
-
-
-
- ignore.secure.ports.for.testing
- true
-
-
-
- dfs.http.policy
- HTTP_ONLY
-
-
-
- dfs.namenode.acls.enabled
- true
-
-
-
- dfs.permissions
- true
-
-
-
diff --git a/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/taskcontroller.cfg b/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/taskcontroller.cfg
deleted file mode 100644
index 2384a21e..00000000
--- a/archived/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/taskcontroller.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-hadoop.log.dir=/var/log/hadoop-mapreduce
-mapreduce.tasktracker.group=mapred
-banned.users=mapred,bin
-min.user.id=0
-allowed.system.users=nobody,hive
diff --git a/archived/cdh5.15-hive-kerberized-kms/files/etc/hive/conf/hive-site.xml b/archived/cdh5.15-hive-kerberized-kms/files/etc/hive/conf/hive-site.xml
deleted file mode 100644
index 7629fb4b..00000000
--- a/archived/cdh5.15-hive-kerberized-kms/files/etc/hive/conf/hive-site.xml
+++ /dev/null
@@ -1,85 +0,0 @@
-
-
-
-
- javax.jdo.option.ConnectionURL
- jdbc:mysql://localhost/metastore
-
-
-
- javax.jdo.option.ConnectionDriverName
- com.mysql.jdbc.Driver
-
-
-
- javax.jdo.option.ConnectionUserName
- root
-
-
-
- javax.jdo.option.ConnectionPassword
- root
-
-
-
- datanucleus.autoCreateSchema
- false
-
-
-
- datanucleus.fixedDatastore
- true
-
-
-
- datanucleus.autoStartMechanism
- SchemaTable
-
-
-
- hive.security.authorization.createtable.owner.grants
- ALL
-
-
-
- hive.users.in.admin.role
- hdfs,hive
-
-
-
-
- hive.server2.authentication
- KERBEROS
-
-
-
- hive.server2.enable.impersonation
- false
-
-
-
- hive.server2.authentication.kerberos.principal
- hive/hadoop-master@LABS.TERADATA.COM
-
-
-
- hive.server2.authentication.kerberos.keytab
- /etc/hive/conf/hive.keytab
-
-
-
- hive.metastore.sasl.enabled
- true
-
-
-
- hive.metastore.kerberos.keytab.file
- /etc/hive/conf/hive.keytab
-
-
-
- hive.metastore.kerberos.principal
- hive/hadoop-master@LABS.TERADATA.COM
-
-
-
diff --git a/archived/cdh5.15-hive-kerberized-kms/files/etc/hive/conf/hiveserver2-site.xml b/archived/cdh5.15-hive-kerberized-kms/files/etc/hive/conf/hiveserver2-site.xml
deleted file mode 100644
index 520cd41d..00000000
--- a/archived/cdh5.15-hive-kerberized-kms/files/etc/hive/conf/hiveserver2-site.xml
+++ /dev/null
@@ -1,16 +0,0 @@
-
-
-
-
-
- hive.metastore.uris
- thrift://localhost:9083
-
-
-
-
- hive.security.authenticator.manager
- org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator
-
-
-
diff --git a/archived/cdh5.15-hive-kerberized-kms/files/etc/supervisord.d/kms.conf b/archived/cdh5.15-hive-kerberized-kms/files/etc/supervisord.d/kms.conf
deleted file mode 100644
index 3000c274..00000000
--- a/archived/cdh5.15-hive-kerberized-kms/files/etc/supervisord.d/kms.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-[program:kms]
-environment=HADOOP_KEYSTORE_PASSWORD="abc1234"
-command=bash -c '. /etc/default/hadoop; . /etc/default/hadoop-kms; . /usr/lib/hadoop-kms/tomcat-deployment.sh; export KMS_SILENT=false KMS_LOG=/var/log/hadoop-kms/; exec /usr/lib/hadoop-kms/sbin/kms.sh run'
-user=kms
-autostart=true
-autorestart=true
-redirect_stderr=true
-##### stdout_logfile=/var/log/hadoop-kms/kms.log
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
diff --git a/archived/cdh5.15-hive-kerberized-kms/files/root/setup_kms.sh b/archived/cdh5.15-hive-kerberized-kms/files/root/setup_kms.sh
deleted file mode 100755
index e99c46ce..00000000
--- a/archived/cdh5.15-hive-kerberized-kms/files/root/setup_kms.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-
-set -euo pipefail
-
-function retry() {
- END=$(($(date +%s) + 600))
-
- while (( $(date +%s) < $END )); do
- set +e
- "$@"
- EXIT_CODE=$?
- set -e
-
- if [[ ${EXIT_CODE} == 0 ]]; then
- break
- fi
- sleep 5
- done
-
- return ${EXIT_CODE}
-}
-
-echo 127.0.0.2 `# must be different than localhost IP` hadoop-master >> /etc/hosts
-supervisord -c /etc/supervisord.conf &
-
-retry kinit -kt /etc/hadoop/conf/hdfs.keytab hdfs/hadoop-master@LABS.TERADATA.COM
-retry hdfs dfsadmin -safemode leave
-
-set -x
-hadoop key create key1 -size 256
-hdfs crypto -createZone -keyName key1 -path /user/hive/warehouse
-hdfs crypto -listZones
-
-supervisorctl stop all
-killall supervisord
-wait
diff --git a/archived/cdh5.15-hive/Dockerfile b/archived/cdh5.15-hive/Dockerfile
deleted file mode 100644
index b1634b28..00000000
--- a/archived/cdh5.15-hive/Dockerfile
+++ /dev/null
@@ -1,77 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM testing/centos6-oj8:unlabelled
-
-# Change default timezone
-RUN ln -snf "/usr/share/zoneinfo/Asia/Kathmandu" /etc/localtime && echo "Asia/Kathmandu" > /etc/timezone
-
-# Setup CDH repo, pin the CDH distribution to a concrete version
-RUN wget -nv https://archive.cloudera.com/cdh5/redhat/6/x86_64/cdh/cloudera-cdh5.repo -P /etc/yum.repos.d \
- && rpm --import https://archive.cloudera.com/cdh5/redhat/6/x86_64/cdh/RPM-GPG-KEY-cloudera \
- && sed -i '/^baseurl=/c\baseurl=https://archive.cloudera.com/cdh5/redhat/6/x86_64/cdh/5.15.1/' /etc/yum.repos.d/cloudera-cdh5.repo
-
-# Install Hadoop, Hive (w/ MySQL)
-RUN \
- set -xeu && \
- yum install -y \
- hadoop-hdfs-namenode \
- hadoop-hdfs-secondarynamenode \
- hadoop-hdfs-datanode \
- \
- hadoop-mapreduce \
- \
- hadoop-yarn-resourcemanager \
- hadoop-yarn-nodemanager \
- \
- hadoop-client \
- hadoop-conf-pseudo \
- \
- hive \
- hive-metastore \
- hive-server2 \
- \
- mysql-server mysql-connector-java \
- \
- # Cleanup
- && yum -y clean all && rm -rf /tmp/* /var/tmp/* \
- && ln -s /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib/mysql-connector-java.jar
-
-# Copy configuration files
-COPY ./files /
-
-# Run setup script
-RUN /root/setup.sh \
- && rm -rf /tmp/* /var/tmp/*
-
-# Setup sock proxy
-RUN yum install -y openssh openssh-clients openssh-server && yum -y clean all
-RUN ssh-keygen -t rsa -b 4096 -C "automation@teradata.com" -N "" -f /root/.ssh/id_rsa \
- && ssh-keygen -t rsa -b 4096 -N "" -f /etc/ssh/ssh_host_rsa_key \
- && ssh-keygen -t dsa -b 1024 -N "" -f /etc/ssh/ssh_host_dsa_key \
- && cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys
-RUN chmod 755 /root && chmod 700 /root/.ssh
-RUN passwd --unlock root
-
-# HDFS ports
-EXPOSE 1004 1006 8020 50010 50020 50070 50075 50470
-
-# YARN ports
-EXPOSE 8030 8031 8032 8033 8040 8041 8042 8088 10020 19888
-
-# HIVE port
-EXPOSE 9083 10000
-
-# SOCKS port
-EXPOSE 1180
-
-CMD supervisord -c /etc/supervisord.conf
diff --git a/archived/cdh5.15-hive/files/etc/hadoop/conf/capacity-scheduler.xml b/archived/cdh5.15-hive/files/etc/hadoop/conf/capacity-scheduler.xml
deleted file mode 100644
index 0e15fb6b..00000000
--- a/archived/cdh5.15-hive/files/etc/hadoop/conf/capacity-scheduler.xml
+++ /dev/null
@@ -1,100 +0,0 @@
-
-
-
-
-
- yarn.scheduler.capacity.maximum-applications
- 10000
-
- Maximum number of applications that can be pending and running.
-
-
-
-
- yarn.scheduler.capacity.maximum-am-resource-percent
- 1
-
- Maximum percent of resources in the cluster which can be used to run
- application masters i.e. controls number of concurrent running
- applications.
-
-
-
-
- yarn.scheduler.capacity.root.queues
- default
-
- The queues at the this level (root is the root queue).
-
-
-
-
- yarn.scheduler.capacity.root.default.capacity
- 100
- Default queue target capacity.
-
-
-
- yarn.scheduler.capacity.root.default.maximum-capacity
- 100
-
- The maximum capacity of the default queue.
-
-
-
-
- yarn.scheduler.capacity.root.default.state
- RUNNING
-
- The state of the default queue. State can be one of RUNNING or STOPPED.
-
-
-
-
- yarn.scheduler.capacity.root.default.acl_submit_applications
- *
-
- The ACL of who can submit jobs to the default queue.
-
-
-
-
- yarn.scheduler.capacity.root.default.user-limit-factor
- 1
-
- Default queue user limit a percentage from 0.0 to 1.0.
-
-
-
-
- yarn.scheduler.capacity.root.default.acl_administer_queue
- *
-
- The ACL of who can administer jobs on the default queue.
-
-
-
-
- yarn.scheduler.capacity.node-locality-delay
- -1
-
- Number of missed scheduling opportunities after which the CapacityScheduler
- attempts to schedule rack-local containers.
- Typically this should be set to number of racks in the cluster, this
- feature is disabled by default, set to -1.
-
-
-
-
diff --git a/archived/cdh5.15-hive/files/etc/hadoop/conf/core-site.xml b/archived/cdh5.15-hive/files/etc/hadoop/conf/core-site.xml
deleted file mode 100644
index 4dbdd062..00000000
--- a/archived/cdh5.15-hive/files/etc/hadoop/conf/core-site.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-
-
-
-
-
- fs.defaultFS
- hdfs://hadoop-master:9000
-
-
-
-
- hadoop.proxyuser.oozie.hosts
- *
-
-
- hadoop.proxyuser.oozie.groups
- *
-
-
-
-
- hadoop.proxyuser.httpfs.hosts
- *
-
-
- hadoop.proxyuser.httpfs.groups
- *
-
-
-
-
- hadoop.proxyuser.llama.hosts
- *
-
-
- hadoop.proxyuser.llama.groups
- *
-
-
-
-
- hadoop.proxyuser.hue.hosts
- *
-
-
- hadoop.proxyuser.hue.groups
- *
-
-
-
-
- hadoop.proxyuser.mapred.hosts
- *
-
-
- hadoop.proxyuser.mapred.groups
- *
-
-
-
-
- hadoop.proxyuser.hive.hosts
- *
-
-
-
- hadoop.proxyuser.hive.groups
- *
-
-
-
-
- hadoop.proxyuser.hdfs.groups
- *
-
-
-
- hadoop.proxyuser.hdfs.hosts
- *
-
-
-
diff --git a/archived/cdh5.15-hive/files/etc/hadoop/conf/hadoop-env.sh b/archived/cdh5.15-hive/files/etc/hadoop/conf/hadoop-env.sh
deleted file mode 100644
index 809937b4..00000000
--- a/archived/cdh5.15-hive/files/etc/hadoop/conf/hadoop-env.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hadoop-specific environment variables here.
-# Forcing YARN-based mapreduce implementaion.
-# Make sure to comment out if you want to go back to the default or
-# if you want this to be tweakable on a per-user basis
-# export HADOOP_MAPRED_HOME=/usr/lib/hadoop-mapreduce
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE=512
-
-# Extra Java runtime options. Empty by default.
-export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS -Xmx512m"
-export YARN_OPTS="$YARN_OPTS -Xmx256m"
diff --git a/archived/cdh5.15-hive/files/etc/hadoop/conf/hdfs-site.xml b/archived/cdh5.15-hive/files/etc/hadoop/conf/hdfs-site.xml
deleted file mode 100644
index 9d1e71aa..00000000
--- a/archived/cdh5.15-hive/files/etc/hadoop/conf/hdfs-site.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-
-
-
-
-
- dfs.namenode.name.dir
- /var/lib/hadoop-hdfs/cache/name/
-
-
-
- dfs.datanode.data.dir
- /var/lib/hadoop-hdfs/cache/data/
-
-
-
- fs.viewfs.mounttable.hadoop-viewfs.link./default
- hdfs://hadoop-master:9000/user/hive/warehouse
-
-
-
diff --git a/archived/cdh5.15-hive/files/etc/hadoop/conf/mapred-site.xml b/archived/cdh5.15-hive/files/etc/hadoop/conf/mapred-site.xml
deleted file mode 100644
index b99d8620..00000000
--- a/archived/cdh5.15-hive/files/etc/hadoop/conf/mapred-site.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-
-
-
-
-
- mapred.job.tracker
- hadoop-master:8021
-
-
-
- mapreduce.jobhistory.address
- hadoop-master:10020
-
-
-
- mapreduce.jobhistory.webapp.address
- hadoop-master:19888
-
-
-
- To set the value of tmp directory for map and reduce tasks.
- mapreduce.task.tmp.dir
- /var/lib/hadoop-mapreduce/cache/${user.name}/tasks
-
-
-
diff --git a/archived/cdh5.15-hive/files/etc/hadoop/conf/yarn-site.xml b/archived/cdh5.15-hive/files/etc/hadoop/conf/yarn-site.xml
deleted file mode 100644
index 5f3757c5..00000000
--- a/archived/cdh5.15-hive/files/etc/hadoop/conf/yarn-site.xml
+++ /dev/null
@@ -1,109 +0,0 @@
-
-
-
-
-
- yarn.nodemanager.aux-services
- mapreduce_shuffle
-
-
-
- yarn.nodemanager.aux-services.mapreduce_shuffle.class
- org.apache.hadoop.mapred.ShuffleHandler
-
-
-
- yarn.log-aggregation-enable
- true
-
-
-
- yarn.dispatcher.exit-on-error
- true
-
-
-
- List of directories to store localized files in.
- yarn.nodemanager.local-dirs
- /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir
-
-
-
- Where to store container logs.
- yarn.nodemanager.log-dirs
- /var/log/hadoop-yarn/containers
-
-
-
- Where to aggregate logs to.
- yarn.nodemanager.remote-app-log-dir
- /var/log/hadoop-yarn/apps
-
-
-
- Classpath for typical applications.
- yarn.application.classpath
-
- /etc/hadoop/conf,
- /usr/lib/hadoop/*,
- /usr/lib/hadoop/lib/*,
- /usr/lib/hadoop-hdfs/*,
- /usr/lib/hadoop-hdfs/lib/*,
- /usr/lib/hadoop-yarn/*,
- /usr/lib/hadoop-yarn/lib/*,
- /usr/lib/hadoop-mapreduce/*,
- /usr/lib/hadoop-mapreduce/lib/*
-
-
-
-
- yarn.resourcemanager.hostname
- hadoop-master
-
-
-
- yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage
- 100
-
-
-
- yarn.nodemanager.pmem-check-enabled
- false
-
-
-
- yarn.nodemanager.vmem-check-enabled
- false
-
-
-
- yarn.nodemanager.resource.memory.enforced
- false
-
-
-
- yarn.nodemanager.elastic-memory-control.enabled
- false
-
-
-
- yarn.log.server.url
- http://hadoop-master:19888/jobhistory/logs
-
-
-
diff --git a/archived/cdh5.15-hive/files/etc/hive/conf/hive-env.sh b/archived/cdh5.15-hive/files/etc/hive/conf/hive-env.sh
deleted file mode 100644
index 034db442..00000000
--- a/archived/cdh5.15-hive/files/etc/hive/conf/hive-env.sh
+++ /dev/null
@@ -1 +0,0 @@
-export HADOOP_OPTS="$HADOOP_OPTS -Dhive.root.logger=INFO,console"
diff --git a/archived/cdh5.15-hive/files/etc/hive/conf/hive-site.xml b/archived/cdh5.15-hive/files/etc/hive/conf/hive-site.xml
deleted file mode 100644
index 4dfaa2a5..00000000
--- a/archived/cdh5.15-hive/files/etc/hive/conf/hive-site.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-
-
-
-
-
- javax.jdo.option.ConnectionURL
- jdbc:mysql://localhost/metastore
- the URL of the MySQL database
-
-
-
- javax.jdo.option.ConnectionDriverName
- com.mysql.jdbc.Driver
-
-
-
- javax.jdo.option.ConnectionUserName
- root
-
-
-
- javax.jdo.option.ConnectionPassword
- root
-
-
-
- datanucleus.autoCreateSchema
- false
-
-
-
- datanucleus.fixedDatastore
- true
-
-
-
- datanucleus.autoStartMechanism
- SchemaTable
-
-
-
- hive.security.authorization.createtable.owner.grants
- ALL
- The set of privileges automatically granted to the owner whenever a table gets created.
-
-
-
- hive.users.in.admin.role
- hdfs,hive
-
-
-
diff --git a/archived/cdh5.15-hive/files/etc/supervisord.conf b/archived/cdh5.15-hive/files/etc/supervisord.conf
deleted file mode 100644
index 2ac8dba5..00000000
--- a/archived/cdh5.15-hive/files/etc/supervisord.conf
+++ /dev/null
@@ -1,21 +0,0 @@
-[supervisord]
-logfile = /var/log/supervisord.log
-logfile_maxbytes = 50MB
-logfile_backups=10
-loglevel = info
-pidfile = /var/run/supervisord.pid
-nodaemon = true
-directory = /tmp
-strip_ansi = false
-
-[unix_http_server]
-file = /tmp/supervisor.sock
-
-[rpcinterface:supervisor]
-supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
-
-[supervisorctl]
-serverurl = unix:///tmp/supervisor.sock
-
-[include]
-files = /etc/supervisord.d/*.conf
diff --git a/archived/cdh5.15-hive/files/etc/supervisord.d/hdfs-datanode.conf b/archived/cdh5.15-hive/files/etc/supervisord.d/hdfs-datanode.conf
deleted file mode 100644
index 87fbcbed..00000000
--- a/archived/cdh5.15-hive/files/etc/supervisord.d/hdfs-datanode.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hdfs-datanode]
-command=hdfs datanode
-startsecs=2
-stopwaitsecs=10
-user=hdfs
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-datanode.log
-autostart=true
diff --git a/archived/cdh5.15-hive/files/etc/supervisord.d/hdfs-namenode.conf b/archived/cdh5.15-hive/files/etc/supervisord.d/hdfs-namenode.conf
deleted file mode 100644
index 0d8de8db..00000000
--- a/archived/cdh5.15-hive/files/etc/supervisord.d/hdfs-namenode.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hdfs-namenode]
-command=hdfs namenode
-startsecs=2
-stopwaitsecs=10
-user=hdfs
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-namenode.log
-autostart=true
diff --git a/archived/cdh5.15-hive/files/etc/supervisord.d/hive-metastore.conf b/archived/cdh5.15-hive/files/etc/supervisord.d/hive-metastore.conf
deleted file mode 100644
index f5aaf8ca..00000000
--- a/archived/cdh5.15-hive/files/etc/supervisord.d/hive-metastore.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:hive-metastore]
-# Add `--debug:port=5006` for debugging
-command=hive --service metastore
-startsecs=2
-stopwaitsecs=10
-user=hive
-redirect_stderr=true
-stdout_logfile=/var/log/hive/hive-metastore.log
-autostart=true
diff --git a/archived/cdh5.15-hive/files/etc/supervisord.d/hive-server2.conf b/archived/cdh5.15-hive/files/etc/supervisord.d/hive-server2.conf
deleted file mode 100644
index d090496f..00000000
--- a/archived/cdh5.15-hive/files/etc/supervisord.d/hive-server2.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hive-server2]
-command=hive --service hiveserver2
-startsecs=2
-stopwaitsecs=10
-user=hive
-redirect_stderr=true
-stdout_logfile=/var/log/hive/hive-server2.log
-autostart=true
diff --git a/archived/cdh5.15-hive/files/etc/supervisord.d/mysql-metastore.conf b/archived/cdh5.15-hive/files/etc/supervisord.d/mysql-metastore.conf
deleted file mode 100644
index e95544e5..00000000
--- a/archived/cdh5.15-hive/files/etc/supervisord.d/mysql-metastore.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:mysql-metastore]
-command=/usr/bin/pidproxy /var/run/mysqld/mysqld.pid /usr/bin/mysqld_safe
-startsecs=2
-stopwaitsecs=10
-user=mysql
-redirect_stderr=true
-stdout_logfile=/var/log/mysql/mysql.log
-autostart=true
diff --git a/archived/cdh5.15-hive/files/etc/supervisord.d/socks-proxy.conf b/archived/cdh5.15-hive/files/etc/supervisord.d/socks-proxy.conf
deleted file mode 100644
index 43602d26..00000000
--- a/archived/cdh5.15-hive/files/etc/supervisord.d/socks-proxy.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:socks-proxy]
-command=/usr/bin/ssh -o StrictHostKeyChecking=no -v -N -D 0.0.0.0:1180 localhost
-startsecs=2
-stopwaitsecs=10
-startretries=30
-user=root
-redirect_stderr=true
-stdout_logfile=/var/log/socks-proxy
-autostart=true
diff --git a/archived/cdh5.15-hive/files/etc/supervisord.d/sshd.conf b/archived/cdh5.15-hive/files/etc/supervisord.d/sshd.conf
deleted file mode 100644
index 3930b4c0..00000000
--- a/archived/cdh5.15-hive/files/etc/supervisord.d/sshd.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:sshd]
-command=/usr/sbin/sshd -D -e
-startsecs=2
-stopwaitsecs=10
-startretries=30
-user=root
-redirect_stderr=true
-stdout_logfile=/var/log/sshd
-autostart=true
diff --git a/archived/cdh5.15-hive/files/etc/supervisord.d/yarn-nodemanager.conf b/archived/cdh5.15-hive/files/etc/supervisord.d/yarn-nodemanager.conf
deleted file mode 100644
index 8f81e3c7..00000000
--- a/archived/cdh5.15-hive/files/etc/supervisord.d/yarn-nodemanager.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:yarn-nodemanager]
-command=yarn nodemanager
-startsecs=2
-stopwaitsecs=10
-user=yarn
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-yarn/hadoop-yarn-nodemanager.log
-autostart=true
diff --git a/archived/cdh5.15-hive/files/etc/supervisord.d/yarn-resourcemanager.conf b/archived/cdh5.15-hive/files/etc/supervisord.d/yarn-resourcemanager.conf
deleted file mode 100644
index 2ef16026..00000000
--- a/archived/cdh5.15-hive/files/etc/supervisord.d/yarn-resourcemanager.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:yarn-resourcemanager]
-command=yarn resourcemanager
-startsecs=2
-stopwaitsecs=10
-user=yarn
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-yarn/hadoop-yarn-resourcemanager.log
-autostart=true
diff --git a/archived/cdh5.15-hive/files/root/setup.sh b/archived/cdh5.15-hive/files/root/setup.sh
deleted file mode 100755
index 365f9ff4..00000000
--- a/archived/cdh5.15-hive/files/root/setup.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash -ex
-
-# make file system hostname resolvable
-echo "127.0.0.1 hadoop-master" >> /etc/hosts
-
-# format namenode
-chown hdfs:hdfs /var/lib/hadoop-hdfs/cache/
-
-# workaround for 'could not open session' bug as suggested here:
-# https://github.com/docker/docker/issues/7056#issuecomment-49371610
-rm -f /etc/security/limits.d/hdfs.conf
-su -c "echo 'N' | hdfs namenode -format" hdfs
-
-# start hdfs
-su -c "hdfs datanode 2>&1 > /var/log/hadoop-hdfs/hadoop-hdfs-datanode.log" hdfs&
-su -c "hdfs namenode 2>&1 > /var/log/hadoop-hdfs/hadoop-hdfs-namenode.log" hdfs&
-
-# wait for process starting
-sleep 10
-
-# remove a broken symlink created by cdh installer so that init-hdfs.sh does no blow up on it
-# (hbase-annotations.jar seems not needed in our case)
-rm /usr/lib/hive/lib/hbase-annotations.jar
-
-# 4 exec cloudera hdfs init script
-/usr/lib/hadoop/libexec/init-hdfs.sh
-
-# init hive directories
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /user/hive/warehouse'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chmod 1777 /user/hive/warehouse'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chown hive /user/hive/warehouse'
-
-# stop hdfs
-killall java
-
-# setup metastore
-mysql_install_db
-
-/usr/bin/mysqld_safe &
-sleep 10s
-
-cd /usr/lib/hive/scripts/metastore/upgrade/mysql/
-echo "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION; FLUSH PRIVILEGES;" | mysql
-echo "CREATE DATABASE metastore; USE metastore; SOURCE hive-schema-1.1.0.mysql.sql;" | mysql
-/usr/bin/mysqladmin -u root password 'root'
-
-killall mysqld
-sleep 10s
-mkdir /var/log/mysql/
-chown mysql:mysql /var/log/mysql/
diff --git a/archived/etc/compose/cdh5.12-hive/docker-compose.yml b/archived/etc/compose/cdh5.12-hive/docker-compose.yml
deleted file mode 100644
index 0dd3eedd..00000000
--- a/archived/etc/compose/cdh5.12-hive/docker-compose.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-version: '2.0'
-services:
- hadoop-master:
- hostname: hadoop-master
- image: testing/cdh5.12-hive:latest
diff --git a/archived/hdp2.6-hive/Dockerfile b/archived/hdp2.6-hive/Dockerfile
deleted file mode 100644
index 3de87a92..00000000
--- a/archived/hdp2.6-hive/Dockerfile
+++ /dev/null
@@ -1,95 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM testing/centos6-oj8:unlabelled
-
-# Change default timezone
-RUN ln -snf "/usr/share/zoneinfo/Asia/Kathmandu" /etc/localtime && echo "Asia/Kathmandu" > /etc/timezone
-
-# Install HDP repo
-RUN set -xeu; \
- wget -nv http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.6.5.0/hdp.repo -P /etc/yum.repos.d; \
- wget -nv http://public-repo-1.hortonworks.com/HDP-GPL/centos6/2.x/updates/2.6.5.0/hdp.gpl.repo -P /etc/yum.repos.d; \
- echo OK
-
-# Install Hadoop, Hive (w/ MySQL)
-RUN yum install -y \
- hadoop-hdfs-namenode \
- hadoop-hdfs-secondarynamenode \
- hadoop-hdfs-datanode \
-
- hadoop-yarn-resourcemanager \
- hadoop-yarn-nodemanager \
-
- hive \
- hive-metastore \
- hive-server2 \
-
- hadooplzo \
- hadooplzo-native \
- lzo \
- lzo-devel \
- lzop \
-
- mysql-server mysql-connector-java \
- libxslt \
-
-# Cleanup
- && yum -y clean all && rm -rf /tmp/* /var/tmp/* \
- && ln -s /usr/share/java/mysql-connector-java.jar /usr/hdp/current/hive-metastore/lib/mysql-connector-java.jar
-
-# Delete original configuration
-RUN rm -r /etc/hadoop/conf/* \
- && rm -r /etc/hive/conf/*
-
-# Copy configuration files
-COPY ./files /
-
-# Run setup script
-RUN /root/setup.sh \
- && rm -rf /tmp/* /var/tmp/*
-
-# Setup sock proxy
-RUN yum install -y openssh openssh-clients openssh-server && yum -y clean all
-RUN ssh-keygen -t rsa -b 4096 -C "automation@trino.io" -N "" -f /root/.ssh/id_rsa \
- && ssh-keygen -t rsa -b 4096 -N "" -f /etc/ssh/ssh_host_rsa_key \
- && ssh-keygen -t dsa -b 1024 -N "" -f /etc/ssh/ssh_host_dsa_key \
- && cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys
-RUN chmod 755 /root && chmod 700 /root/.ssh
-RUN passwd --unlock root
-
-# Provide convenience bash history
-RUN set -xeu; \
- echo "supervisorctl restart all" >> ~root/.bash_history; \
- for user in root hive hdfs; do \
- # ~hive might be owned by root
- chown -R "${user}:" "$(eval echo ~"${user}")"; \
- sudo -u "${user}" bash -c ' echo "netstat -ltnp" >> ~/.bash_history '; \
- sudo -u "${user}" bash -c ' echo "beeline -u jdbc:hive2://localhost:10000/default -n hive" >> ~/.bash_history '; \
- sudo -u "${user}" bash -c ' echo "hadoop dfs -ls -R /user/hive/warehouse" >> ~/.bash_history '; \
- sudo -u "${user}" bash -c ' mkdir -p ~/.beeline '; \
- sudo -u "${user}" bash -c ' echo "SELECT current_user();" >> ~/.beeline/history '; \
- done
-
-# HDFS ports
-EXPOSE 1004 1006 8020 50010 50020 50070 50075 50470
-
-# YARN ports
-EXPOSE 8030 8031 8032 8033 8040 8041 8042 8088 10020 19888
-
-# HIVE ports
-EXPOSE 9083 10000
-
-# SOCKS port
-EXPOSE 1180
-
-CMD supervisord -c /etc/supervisord.conf
diff --git a/archived/hdp2.6-hive/files/etc/hadoop/conf/capacity-scheduler.xml b/archived/hdp2.6-hive/files/etc/hadoop/conf/capacity-scheduler.xml
deleted file mode 100644
index 0e15fb6b..00000000
--- a/archived/hdp2.6-hive/files/etc/hadoop/conf/capacity-scheduler.xml
+++ /dev/null
@@ -1,100 +0,0 @@
-
-
-
-
-
- yarn.scheduler.capacity.maximum-applications
- 10000
-
- Maximum number of applications that can be pending and running.
-
-
-
-
- yarn.scheduler.capacity.maximum-am-resource-percent
- 1
-
- Maximum percent of resources in the cluster which can be used to run
- application masters i.e. controls number of concurrent running
- applications.
-
-
-
-
- yarn.scheduler.capacity.root.queues
- default
-
- The queues at the this level (root is the root queue).
-
-
-
-
- yarn.scheduler.capacity.root.default.capacity
- 100
- Default queue target capacity.
-
-
-
- yarn.scheduler.capacity.root.default.maximum-capacity
- 100
-
- The maximum capacity of the default queue.
-
-
-
-
- yarn.scheduler.capacity.root.default.state
- RUNNING
-
- The state of the default queue. State can be one of RUNNING or STOPPED.
-
-
-
-
- yarn.scheduler.capacity.root.default.acl_submit_applications
- *
-
- The ACL of who can submit jobs to the default queue.
-
-
-
-
- yarn.scheduler.capacity.root.default.user-limit-factor
- 1
-
- Default queue user limit a percentage from 0.0 to 1.0.
-
-
-
-
- yarn.scheduler.capacity.root.default.acl_administer_queue
- *
-
- The ACL of who can administer jobs on the default queue.
-
-
-
-
- yarn.scheduler.capacity.node-locality-delay
- -1
-
- Number of missed scheduling opportunities after which the CapacityScheduler
- attempts to schedule rack-local containers.
- Typically this should be set to number of racks in the cluster, this
- feature is disabled by default, set to -1.
-
-
-
-
diff --git a/archived/hdp2.6-hive/files/etc/hadoop/conf/core-site.xml b/archived/hdp2.6-hive/files/etc/hadoop/conf/core-site.xml
deleted file mode 100644
index 4dbdd062..00000000
--- a/archived/hdp2.6-hive/files/etc/hadoop/conf/core-site.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-
-
-
-
-
- fs.defaultFS
- hdfs://hadoop-master:9000
-
-
-
-
- hadoop.proxyuser.oozie.hosts
- *
-
-
- hadoop.proxyuser.oozie.groups
- *
-
-
-
-
- hadoop.proxyuser.httpfs.hosts
- *
-
-
- hadoop.proxyuser.httpfs.groups
- *
-
-
-
-
- hadoop.proxyuser.llama.hosts
- *
-
-
- hadoop.proxyuser.llama.groups
- *
-
-
-
-
- hadoop.proxyuser.hue.hosts
- *
-
-
- hadoop.proxyuser.hue.groups
- *
-
-
-
-
- hadoop.proxyuser.mapred.hosts
- *
-
-
- hadoop.proxyuser.mapred.groups
- *
-
-
-
-
- hadoop.proxyuser.hive.hosts
- *
-
-
-
- hadoop.proxyuser.hive.groups
- *
-
-
-
-
- hadoop.proxyuser.hdfs.groups
- *
-
-
-
- hadoop.proxyuser.hdfs.hosts
- *
-
-
-
diff --git a/archived/hdp2.6-hive/files/etc/hadoop/conf/hadoop-env.sh b/archived/hdp2.6-hive/files/etc/hadoop/conf/hadoop-env.sh
deleted file mode 100644
index cb995f10..00000000
--- a/archived/hdp2.6-hive/files/etc/hadoop/conf/hadoop-env.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hadoop-specific environment variables here.
-# Forcing YARN-based mapreduce implementaion.
-# Make sure to comment out if you want to go back to the default or
-# if you want this to be tweakable on a per-user basis
-# export HADOOP_MAPRED_HOME=/usr/lib/hadoop-mapreduce
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE=256
-
-# Extra Java runtime options. Empty by default.
-export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS -Xmx512m"
-export YARN_OPTS="$YARN_OPTS -Xmx256m"
diff --git a/archived/hdp2.6-hive/files/etc/hadoop/conf/hdfs-site.xml b/archived/hdp2.6-hive/files/etc/hadoop/conf/hdfs-site.xml
deleted file mode 100644
index 9d1e71aa..00000000
--- a/archived/hdp2.6-hive/files/etc/hadoop/conf/hdfs-site.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-
-
-
-
-
- dfs.namenode.name.dir
- /var/lib/hadoop-hdfs/cache/name/
-
-
-
- dfs.datanode.data.dir
- /var/lib/hadoop-hdfs/cache/data/
-
-
-
- fs.viewfs.mounttable.hadoop-viewfs.link./default
- hdfs://hadoop-master:9000/user/hive/warehouse
-
-
-
diff --git a/archived/hdp2.6-hive/files/etc/hadoop/conf/mapred-site.xml b/archived/hdp2.6-hive/files/etc/hadoop/conf/mapred-site.xml
deleted file mode 100644
index aff356cf..00000000
--- a/archived/hdp2.6-hive/files/etc/hadoop/conf/mapred-site.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-
-
-
-
-
- mapred.job.tracker
- hadoop-master:8021
-
-
-
- mapreduce.framework.name
- yarn
-
-
-
- mapreduce.jobhistory.address
- hadoop-master:10020
-
-
-
- mapreduce.jobhistory.webapp.address
- hadoop-master:19888
-
-
-
- To set the value of tmp directory for map and reduce tasks.
- mapreduce.task.tmp.dir
- /var/lib/hadoop-mapreduce/cache/${user.name}/tasks
-
-
-
diff --git a/archived/hdp2.6-hive/files/etc/hadoop/conf/yarn-site.xml b/archived/hdp2.6-hive/files/etc/hadoop/conf/yarn-site.xml
deleted file mode 100644
index e7b04a6a..00000000
--- a/archived/hdp2.6-hive/files/etc/hadoop/conf/yarn-site.xml
+++ /dev/null
@@ -1,110 +0,0 @@
-
-
-
-
-
-
- yarn.nodemanager.aux-services
- mapreduce_shuffle
-
-
-
- yarn.nodemanager.aux-services.mapreduce_shuffle.class
- org.apache.hadoop.mapred.ShuffleHandler
-
-
-
- yarn.log-aggregation-enable
- true
-
-
-
- yarn.dispatcher.exit-on-error
- true
-
-
-
- List of directories to store localized files in.
- yarn.nodemanager.local-dirs
- /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir
-
-
-
- Where to store container logs.
- yarn.nodemanager.log-dirs
- /var/log/hadoop-yarn/containers
-
-
-
- Where to aggregate logs to.
- yarn.nodemanager.remote-app-log-dir
- /var/log/hadoop-yarn/apps
-
-
-
- Classpath for typical applications.
- yarn.application.classpath
-
- /etc/hadoop/conf,
- /usr/hdp/current/hadoop-client/*,
- /usr/hdp/current/hadoop-client/lib/*,
- /usr/hdp/current/hadoop-hdfs-client/*,
- /usr/hdp/current/hadoop-hdfs-client/lib/*,
- /usr/hdp/current/hadoop-yarn-client/*,
- /usr/hdp/current/hadoop-yarn-client/lib/*,
- /usr/hdp/current/hadoop-mapreduce-client/*,
- /usr/hdp/current/hadoop-mapreduce-client/lib/*
-
-
-
-
- yarn.resourcemanager.hostname
- hadoop-master
-
-
-
- yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage
- 100
-
-
-
- yarn.nodemanager.pmem-check-enabled
- false
-
-
-
- yarn.nodemanager.vmem-check-enabled
- false
-
-
-
- yarn.nodemanager.resource.memory.enforced
- false
-
-
-
- yarn.nodemanager.elastic-memory-control.enabled
- false
-
-
-
- yarn.log.server.url
- http://hadoop-master:19888/jobhistory/logs
-
-
-
diff --git a/archived/hdp2.6-hive/files/etc/hive/conf/hive-site.xml b/archived/hdp2.6-hive/files/etc/hive/conf/hive-site.xml
deleted file mode 100644
index 4dfaa2a5..00000000
--- a/archived/hdp2.6-hive/files/etc/hive/conf/hive-site.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-
-
-
-
-
- javax.jdo.option.ConnectionURL
- jdbc:mysql://localhost/metastore
- the URL of the MySQL database
-
-
-
- javax.jdo.option.ConnectionDriverName
- com.mysql.jdbc.Driver
-
-
-
- javax.jdo.option.ConnectionUserName
- root
-
-
-
- javax.jdo.option.ConnectionPassword
- root
-
-
-
- datanucleus.autoCreateSchema
- false
-
-
-
- datanucleus.fixedDatastore
- true
-
-
-
- datanucleus.autoStartMechanism
- SchemaTable
-
-
-
- hive.security.authorization.createtable.owner.grants
- ALL
- The set of privileges automatically granted to the owner whenever a table gets created.
-
-
-
- hive.users.in.admin.role
- hdfs,hive
-
-
-
diff --git a/archived/hdp2.6-hive/files/etc/supervisord.conf b/archived/hdp2.6-hive/files/etc/supervisord.conf
deleted file mode 100644
index 2ac8dba5..00000000
--- a/archived/hdp2.6-hive/files/etc/supervisord.conf
+++ /dev/null
@@ -1,21 +0,0 @@
-[supervisord]
-logfile = /var/log/supervisord.log
-logfile_maxbytes = 50MB
-logfile_backups=10
-loglevel = info
-pidfile = /var/run/supervisord.pid
-nodaemon = true
-directory = /tmp
-strip_ansi = false
-
-[unix_http_server]
-file = /tmp/supervisor.sock
-
-[rpcinterface:supervisor]
-supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
-
-[supervisorctl]
-serverurl = unix:///tmp/supervisor.sock
-
-[include]
-files = /etc/supervisord.d/*.conf
diff --git a/archived/hdp2.6-hive/files/etc/supervisord.d/hdfs-datanode.conf b/archived/hdp2.6-hive/files/etc/supervisord.d/hdfs-datanode.conf
deleted file mode 100644
index 87fbcbed..00000000
--- a/archived/hdp2.6-hive/files/etc/supervisord.d/hdfs-datanode.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hdfs-datanode]
-command=hdfs datanode
-startsecs=2
-stopwaitsecs=10
-user=hdfs
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-datanode.log
-autostart=true
diff --git a/archived/hdp2.6-hive/files/etc/supervisord.d/hdfs-namenode.conf b/archived/hdp2.6-hive/files/etc/supervisord.d/hdfs-namenode.conf
deleted file mode 100644
index 0d8de8db..00000000
--- a/archived/hdp2.6-hive/files/etc/supervisord.d/hdfs-namenode.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hdfs-namenode]
-command=hdfs namenode
-startsecs=2
-stopwaitsecs=10
-user=hdfs
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-namenode.log
-autostart=true
diff --git a/archived/hdp2.6-hive/files/etc/supervisord.d/hive-metastore.conf b/archived/hdp2.6-hive/files/etc/supervisord.d/hive-metastore.conf
deleted file mode 100644
index f5aaf8ca..00000000
--- a/archived/hdp2.6-hive/files/etc/supervisord.d/hive-metastore.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:hive-metastore]
-# Add `--debug:port=5006` for debugging
-command=hive --service metastore
-startsecs=2
-stopwaitsecs=10
-user=hive
-redirect_stderr=true
-stdout_logfile=/var/log/hive/hive-metastore.log
-autostart=true
diff --git a/archived/hdp2.6-hive/files/etc/supervisord.d/hive-server2.conf b/archived/hdp2.6-hive/files/etc/supervisord.d/hive-server2.conf
deleted file mode 100644
index d090496f..00000000
--- a/archived/hdp2.6-hive/files/etc/supervisord.d/hive-server2.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hive-server2]
-command=hive --service hiveserver2
-startsecs=2
-stopwaitsecs=10
-user=hive
-redirect_stderr=true
-stdout_logfile=/var/log/hive/hive-server2.log
-autostart=true
diff --git a/archived/hdp2.6-hive/files/etc/supervisord.d/mysql-metastore.conf b/archived/hdp2.6-hive/files/etc/supervisord.d/mysql-metastore.conf
deleted file mode 100644
index e95544e5..00000000
--- a/archived/hdp2.6-hive/files/etc/supervisord.d/mysql-metastore.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:mysql-metastore]
-command=/usr/bin/pidproxy /var/run/mysqld/mysqld.pid /usr/bin/mysqld_safe
-startsecs=2
-stopwaitsecs=10
-user=mysql
-redirect_stderr=true
-stdout_logfile=/var/log/mysql/mysql.log
-autostart=true
diff --git a/archived/hdp2.6-hive/files/etc/supervisord.d/socks-proxy.conf b/archived/hdp2.6-hive/files/etc/supervisord.d/socks-proxy.conf
deleted file mode 100644
index 43602d26..00000000
--- a/archived/hdp2.6-hive/files/etc/supervisord.d/socks-proxy.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:socks-proxy]
-command=/usr/bin/ssh -o StrictHostKeyChecking=no -v -N -D 0.0.0.0:1180 localhost
-startsecs=2
-stopwaitsecs=10
-startretries=30
-user=root
-redirect_stderr=true
-stdout_logfile=/var/log/socks-proxy
-autostart=true
diff --git a/archived/hdp2.6-hive/files/etc/supervisord.d/sshd.conf b/archived/hdp2.6-hive/files/etc/supervisord.d/sshd.conf
deleted file mode 100644
index 3930b4c0..00000000
--- a/archived/hdp2.6-hive/files/etc/supervisord.d/sshd.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:sshd]
-command=/usr/sbin/sshd -D -e
-startsecs=2
-stopwaitsecs=10
-startretries=30
-user=root
-redirect_stderr=true
-stdout_logfile=/var/log/sshd
-autostart=true
diff --git a/archived/hdp2.6-hive/files/etc/supervisord.d/yarn-nodemanager.conf b/archived/hdp2.6-hive/files/etc/supervisord.d/yarn-nodemanager.conf
deleted file mode 100644
index 8f81e3c7..00000000
--- a/archived/hdp2.6-hive/files/etc/supervisord.d/yarn-nodemanager.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:yarn-nodemanager]
-command=yarn nodemanager
-startsecs=2
-stopwaitsecs=10
-user=yarn
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-yarn/hadoop-yarn-nodemanager.log
-autostart=true
diff --git a/archived/hdp2.6-hive/files/etc/supervisord.d/yarn-resourcemanager.conf b/archived/hdp2.6-hive/files/etc/supervisord.d/yarn-resourcemanager.conf
deleted file mode 100644
index 2ef16026..00000000
--- a/archived/hdp2.6-hive/files/etc/supervisord.d/yarn-resourcemanager.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:yarn-resourcemanager]
-command=yarn resourcemanager
-startsecs=2
-stopwaitsecs=10
-user=yarn
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-yarn/hadoop-yarn-resourcemanager.log
-autostart=true
diff --git a/archived/hdp2.6-hive/files/root/setup.sh b/archived/hdp2.6-hive/files/root/setup.sh
deleted file mode 100755
index 5dab21ea..00000000
--- a/archived/hdp2.6-hive/files/root/setup.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/bash -ex
-
-# make file system hostname resolvable
-echo "127.0.0.1 hadoop-master" >> /etc/hosts
-
-# format namenode
-chown hdfs:hdfs /var/lib/hadoop-hdfs/cache/
-
-# workaround for 'could not open session' bug as suggested here:
-# https://github.com/docker/docker/issues/7056#issuecomment-49371610
-rm -f /etc/security/limits.d/hdfs.conf
-su -c "echo 'N' | hdfs namenode -format" hdfs
-
-# start hdfs
-su -c "hdfs namenode 2>&1 > /var/log/hadoop-hdfs/hadoop-hdfs-namenode.log" hdfs&
-
-# wait for process starting
-sleep 15
-
-# init basic hdfs directories
-/usr/hdp/current/hadoop-client/libexec/init-hdfs.sh
-
-# 4.1 Create an hdfs home directory for the yarn user. For some reason, init-hdfs doesn't do so.
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /user/yarn && /usr/bin/hadoop fs -chown yarn:yarn /user/yarn'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chmod -R 1777 /tmp/hadoop-yarn'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /tmp/hadoop-yarn/staging && /usr/bin/hadoop fs -chown mapred:mapred /tmp/hadoop-yarn/staging && /usr/bin/hadoop fs -chmod -R 1777 /tmp/hadoop-yarn/staging'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /tmp/hadoop-yarn/staging/history && /usr/bin/hadoop fs -chown mapred:mapred /tmp/hadoop-yarn/staging/history && /usr/bin/hadoop fs -chmod -R 1777 /tmp/hadoop-yarn/staging/history'
-
-# init hive directories
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /user/hive/warehouse'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chmod 1777 /user/hive/warehouse'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chown hive /user/hive/warehouse'
-
-# stop hdfs
-killall java
-
-# setup metastore
-mysql_install_db
-
-/usr/bin/mysqld_safe &
-sleep 10s
-
-cd /usr/hdp/current/hive-metastore/scripts/metastore/upgrade/mysql/
-echo "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION; FLUSH PRIVILEGES;" | mysql
-echo "CREATE DATABASE metastore; USE metastore; SOURCE hive-schema-1.2.1000.mysql.sql;" | mysql
-/usr/bin/mysqladmin -u root password 'root'
-
-killall mysqld
-sleep 10s
-mkdir /var/log/mysql/
-chown mysql:mysql /var/log/mysql/
-
-# Additional libs
-cp -av /usr/hdp/current/hadoop-client/lib/native/Linux-amd64-64/* /usr/lib64/
diff --git a/etc/compose/cdh5.15-hive/docker-compose.yml b/etc/compose/cdh5.15-hive/docker-compose.yml
deleted file mode 100644
index e70fb6bd..00000000
--- a/etc/compose/cdh5.15-hive/docker-compose.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-version: '2.0'
-services:
- hadoop-master:
- hostname: hadoop-master
- image: testing/cdh5.15-hive:latest
diff --git a/etc/compose/hdp2.6-hive/docker-compose.yml b/etc/compose/hdp2.6-hive/docker-compose.yml
deleted file mode 100644
index ee0ca5b3..00000000
--- a/etc/compose/hdp2.6-hive/docker-compose.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-version: '2.0'
-services:
- hadoop-master:
- hostname: hadoop-master
- image: testing/hdp2.6-hive:latest
diff --git a/testing/cdh5.15-hive-kerberized-kms/Dockerfile b/testing/cdh5.15-hive-kerberized-kms/Dockerfile
deleted file mode 100644
index 6ee88cbe..00000000
--- a/testing/cdh5.15-hive-kerberized-kms/Dockerfile
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Cloudera removed an access to CDH5 repositories in April 2021.
-# In order to build new image version we use last released version to extract files from it.
-# Previous Dockerfile version is archived in archived/cdh5.15-hive.
-FROM ghcr.io/trinodb/testing/cdh5.15-hive-kerberized-kms:38 AS builder
-COPY ./files /
-RUN /usr/bin/extract_rpms.sh hadoop-kms
-
-FROM testing/cdh5.15-hive-kerberized:unlabelled
-ARG ADDPRINC_ARGS="-maxrenewlife \"10 days\" +allow_renewable"
-COPY --from=builder /rpms/ /rpms/
-
-RUN set -xeu \
- && rpm -i -U /rpms/*.rpm \
- # Cleanup
- && yum -y clean all && rm -rf /tmp/* /var/tmp/*
-
-# buildx doesn't allow to copy files into linked directories so we need to squash those first
-RUN unlink /etc/hadoop-kms/conf && \
- unlink /etc/hadoop-kms/tomcat-conf && \
- mv /etc/hadoop-kms/tomcat-conf.http /etc/hadoop-kms/tomcat-conf
-
-# COPY CONFIGURATION
-COPY ./files /
-
-# add users and group for testing purposes
-RUN set -xeu && \
- for username in alice bob charlie; do \
- groupadd "${username}_group" && \
- useradd -g "${username}_group" "${username}" && \
- /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey ${username}/hadoop-master@LABS.TERADATA.COM" && \
- /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hive/conf/${username}.keytab ${username}/hadoop-master"; \
- done && \
- echo OK
-
-RUN set -x && \
- install --directory --owner=kms --group=kms /var/run/hadoop-kms && \
- # $JAVA_HOME/jre/lib/security/java.security is used by default and in our Java it prevents KMS code from accessing its own keystore
- sed -e 's@-Dcatalina.base="$CATALINA_BASE"@\0 -Djceks.key.serialFilter="**"@' -i /usr/lib/bigtop-tomcat/bin/catalina.sh && \
- /root/setup_kms.sh && \
- # Purge Kerberos credential cache of root user
- kdestroy && \
- echo OK
-
-RUN set -x && \
- find /var/log -type f -name \*.log -printf "truncate %p\n" -exec truncate --size 0 {} \; && \
- # Purge /tmp, this includes credential caches of other users
- find /tmp -mindepth 1 -maxdepth 1 -exec rm -rf {} + && \
- echo OK
-
-CMD supervisord -c /etc/supervisord.conf
diff --git a/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/core-site.xml b/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/core-site.xml
deleted file mode 100644
index ddf829a1..00000000
--- a/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/core-site.xml
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-
-
- fs.defaultFS
- hdfs://hadoop-master:9000
-
-
-
diff --git a/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/kms-acls.xml b/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/kms-acls.xml
deleted file mode 100644
index 0a31b900..00000000
--- a/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/kms-acls.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-
-
-
- default.key.acl.ALL
- *
-
-
-
- default.key.acl.MANAGEMENT
- *
-
-
-
- default.key.acl.READ
- *
-
-
-
- default.key.acl.GENERATE_EEK
- *
-
-
-
- default.key.acl.DECRYPT_EEK
- *
-
-
-
diff --git a/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/kms-site.xml b/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/kms-site.xml
deleted file mode 100644
index 52ee4b51..00000000
--- a/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/kms-site.xml
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
-
-
- hadoop.kms.key.provider.uri
- jceks://file@/${user.home}/kms.keystore
-
-
-
- hadoop.kms.authentication.type
- kerberos
-
-
-
- hadoop.kms.authentication.kerberos.keytab
- /etc/hadoop/conf/HTTP.keytab
-
-
-
- hadoop.kms.authentication.kerberos.principal
- HTTP/hadoop-master
-
-
-
- hadoop.kms.authentication.kerberos.name.rules
- DEFAULT
-
-
-
diff --git a/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/passwordfile b/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/passwordfile
deleted file mode 100644
index 7d8381bf..00000000
--- a/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop-kms/conf/passwordfile
+++ /dev/null
@@ -1 +0,0 @@
-abc1234
diff --git a/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/core-site.xml b/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/core-site.xml
deleted file mode 100644
index 3efd6479..00000000
--- a/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/core-site.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-
-
-
-
- fs.defaultFS
- hdfs://hadoop-master:9000
-
-
-
-
- hadoop.proxyuser.httpfs.hosts
- *
-
-
-
- hadoop.proxyuser.httpfs.groups
- *
-
-
-
-
- hadoop.proxyuser.hive.hosts
- *
-
-
-
- hadoop.proxyuser.hive.groups
- *
-
-
-
-
- hadoop.proxyuser.hdfs.groups
- *
-
-
-
- hadoop.proxyuser.hdfs.hosts
- *
-
-
-
-
- hadoop.proxyuser.presto-server.groups
- *
-
-
-
- hadoop.proxyuser.presto-server.hosts
- *
-
-
-
-
- hadoop.security.authentication
- kerberos
-
-
-
- hadoop.security.authorization
- true
-
-
-
-
- hadoop.security.key.provider.path
- kms://http@hadoop-master:16000/kms
-
-
-
- dfs.encryption.key.provider.uri
- kms://http@hadoop-master:16000/kms
-
-
-
diff --git a/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/hdfs-site.xml b/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/hdfs-site.xml
deleted file mode 100644
index 185d814f..00000000
--- a/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/hdfs-site.xml
+++ /dev/null
@@ -1,123 +0,0 @@
-
-
-
-
-
- dfs.namenode.name.dir
- /var/lib/hadoop-hdfs/cache/name/
-
-
-
- dfs.datanode.data.dir
- /var/lib/hadoop-hdfs/cache/data/
-
-
-
- fs.viewfs.mounttable.hadoop-viewfs.link./default
- hdfs://hadoop-master:9000/user/hive/warehouse
-
-
-
-
- dfs.block.access.token.enable
- true
-
-
-
-
- dfs.namenode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
-
-
- dfs.namenode.kerberos.principal
- hdfs/hadoop-master@LABS.TERADATA.COM
-
-
-
- dfs.namenode.kerberos.internal.spnego.principal
- HTTP/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- dfs.secondary.namenode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
-
-
- dfs.secondary.namenode.kerberos.principal
- hdfs/hadoop-master@LABS.TERADATA.COM
-
-
-
- dfs.secondary.namenode.kerberos.internal.spnego.principal
- HTTP/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- dfs.datanode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
-
-
- dfs.datanode.kerberos.principal
- hdfs/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- dfs.webhdfs.enabled
- true
-
-
-
-
- dfs.web.authentication.kerberos.principal
- HTTP/hadoop-master@LABS.TERADATA.COM
-
-
-
- dfs.web.authentication.kerberos.keytab
- /etc/hadoop/conf/HTTP.keytab
-
-
-
-
- ignore.secure.ports.for.testing
- true
-
-
-
- dfs.http.policy
- HTTP_ONLY
-
-
-
- dfs.namenode.acls.enabled
- true
-
-
-
- dfs.permissions
- true
-
-
-
diff --git a/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/taskcontroller.cfg b/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/taskcontroller.cfg
deleted file mode 100644
index 2384a21e..00000000
--- a/testing/cdh5.15-hive-kerberized-kms/files/etc/hadoop/conf/taskcontroller.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-hadoop.log.dir=/var/log/hadoop-mapreduce
-mapreduce.tasktracker.group=mapred
-banned.users=mapred,bin
-min.user.id=0
-allowed.system.users=nobody,hive
diff --git a/testing/cdh5.15-hive-kerberized-kms/files/etc/hive/conf/hive-site.xml b/testing/cdh5.15-hive-kerberized-kms/files/etc/hive/conf/hive-site.xml
deleted file mode 100644
index 7629fb4b..00000000
--- a/testing/cdh5.15-hive-kerberized-kms/files/etc/hive/conf/hive-site.xml
+++ /dev/null
@@ -1,85 +0,0 @@
-
-
-
-
- javax.jdo.option.ConnectionURL
- jdbc:mysql://localhost/metastore
-
-
-
- javax.jdo.option.ConnectionDriverName
- com.mysql.jdbc.Driver
-
-
-
- javax.jdo.option.ConnectionUserName
- root
-
-
-
- javax.jdo.option.ConnectionPassword
- root
-
-
-
- datanucleus.autoCreateSchema
- false
-
-
-
- datanucleus.fixedDatastore
- true
-
-
-
- datanucleus.autoStartMechanism
- SchemaTable
-
-
-
- hive.security.authorization.createtable.owner.grants
- ALL
-
-
-
- hive.users.in.admin.role
- hdfs,hive
-
-
-
-
- hive.server2.authentication
- KERBEROS
-
-
-
- hive.server2.enable.impersonation
- false
-
-
-
- hive.server2.authentication.kerberos.principal
- hive/hadoop-master@LABS.TERADATA.COM
-
-
-
- hive.server2.authentication.kerberos.keytab
- /etc/hive/conf/hive.keytab
-
-
-
- hive.metastore.sasl.enabled
- true
-
-
-
- hive.metastore.kerberos.keytab.file
- /etc/hive/conf/hive.keytab
-
-
-
- hive.metastore.kerberos.principal
- hive/hadoop-master@LABS.TERADATA.COM
-
-
-
diff --git a/testing/cdh5.15-hive-kerberized-kms/files/etc/hive/conf/hiveserver2-site.xml b/testing/cdh5.15-hive-kerberized-kms/files/etc/hive/conf/hiveserver2-site.xml
deleted file mode 100644
index 520cd41d..00000000
--- a/testing/cdh5.15-hive-kerberized-kms/files/etc/hive/conf/hiveserver2-site.xml
+++ /dev/null
@@ -1,16 +0,0 @@
-
-
-
-
-
- hive.metastore.uris
- thrift://localhost:9083
-
-
-
-
- hive.security.authenticator.manager
- org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator
-
-
-
diff --git a/testing/cdh5.15-hive-kerberized-kms/files/etc/supervisord.d/kms.conf b/testing/cdh5.15-hive-kerberized-kms/files/etc/supervisord.d/kms.conf
deleted file mode 100644
index 3000c274..00000000
--- a/testing/cdh5.15-hive-kerberized-kms/files/etc/supervisord.d/kms.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-[program:kms]
-environment=HADOOP_KEYSTORE_PASSWORD="abc1234"
-command=bash -c '. /etc/default/hadoop; . /etc/default/hadoop-kms; . /usr/lib/hadoop-kms/tomcat-deployment.sh; export KMS_SILENT=false KMS_LOG=/var/log/hadoop-kms/; exec /usr/lib/hadoop-kms/sbin/kms.sh run'
-user=kms
-autostart=true
-autorestart=true
-redirect_stderr=true
-##### stdout_logfile=/var/log/hadoop-kms/kms.log
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
diff --git a/testing/cdh5.15-hive-kerberized-kms/files/root/setup_kms.sh b/testing/cdh5.15-hive-kerberized-kms/files/root/setup_kms.sh
deleted file mode 100755
index 1ac87839..00000000
--- a/testing/cdh5.15-hive-kerberized-kms/files/root/setup_kms.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-
-set -euo pipefail
-
-function retry() {
- END=$(($(date +%s) + 600))
-
- while (( $(date +%s) < $END )); do
- set +e
- "$@"
- EXIT_CODE=$?
- set -e
-
- if [[ ${EXIT_CODE} == 0 ]]; then
- break
- fi
- sleep 5
- done
-
- return ${EXIT_CODE}
-}
-
-supervisord -c /etc/supervisord.conf &
-
-retry kinit -kt /etc/hadoop/conf/hdfs.keytab hdfs/hadoop-master@LABS.TERADATA.COM
-retry hdfs dfsadmin -safemode leave
-
-set -x
-hadoop key create key1 -size 256
-hdfs crypto -createZone -keyName key1 -path /user/hive/warehouse
-hdfs crypto -listZones
-
-supervisorctl stop all
-killall supervisord
-wait
diff --git a/testing/cdh5.15-hive-kerberized-kms/files/usr/bin/extract_rpms.sh b/testing/cdh5.15-hive-kerberized-kms/files/usr/bin/extract_rpms.sh
deleted file mode 100755
index 507ff06f..00000000
--- a/testing/cdh5.15-hive-kerberized-kms/files/usr/bin/extract_rpms.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env bash
-set -xeu
-
-dependencies=(
- gdb-7.2-92.el6.x86_64.rpm
- elfutils-libs-0.164-2.el6.x86_64.rpm
- elfutils-0.164-2.el6.x86_64.rpm
- redhat-rpm-config-9.0.3-51.el6.centos.noarch.rpm
- unzip-6.0-5.el6.x86_64.rpm
- rpm-build-4.8.0-59.el6.x86_64.rpm
-)
-
-for dependency in "${dependencies[@]}"
-do
- echo "Installing required dependency ${dependency}"
- rpm -i -U https://vault.centos.org/6.10/os/x86_64/Packages/${dependency}
-done
-
-rpm -i https://kojipkgs.fedoraproject.org/packages/rpmrebuild/2.11/3.el7/noarch/rpmrebuild-2.11-3.el7.noarch.rpm
-
-mkdir /rpms/
-
-for package in "$@"
-do
- echo "Rebuilding RPM ${package}"
- exact_package=$(rpm -qa | grep "${package}")
- echo "Found RPM ${package} as ${exact_package}"
- rpmrebuild -w "${exact_package}"
- mv "/root/rpmbuild/RPMS/"*"/${exact_package}.rpm" "/rpms/${package}.rpm"
-done
diff --git a/testing/cdh5.15-hive-kerberized/Dockerfile b/testing/cdh5.15-hive-kerberized/Dockerfile
deleted file mode 100644
index 38775dc6..00000000
--- a/testing/cdh5.15-hive-kerberized/Dockerfile
+++ /dev/null
@@ -1,97 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM testing/cdh5.15-hive:unlabelled
-
-ARG ADDPRINC_ARGS="-maxrenewlife \"10 days\" +allow_renewable"
-
-# INSTALL KERBEROS
-RUN yum install -y krb5-libs krb5-server krb5-workstation
-
-# COPY CONFIGURATION
-COPY ./files /
-
-# Apply configuration overrides and remove them so they don't get reapplied
-RUN /usr/local/bin/apply-all-site-xml-overrides /overrides && rm -Rf /overrides
-
-# CREATE KERBEROS DATABASE
-RUN /usr/sbin/kdb5_util create -s -P password
-
-# ADD HADOOP PRINCIPALS
-RUN /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey hdfs/hadoop-master@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey mapred/hadoop-master@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey yarn/hadoop-master@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey HTTP/hadoop-master@LABS.TERADATA.COM"
-
-# CREATE HADOOP KEYTAB FILES
-RUN /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hadoop/conf/hdfs.keytab hdfs/hadoop-master HTTP/hadoop-master" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hadoop/conf/mapred.keytab mapred/hadoop-master HTTP/hadoop-master" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hadoop/conf/yarn.keytab yarn/hadoop-master HTTP/hadoop-master" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hadoop/conf/HTTP.keytab HTTP/hadoop-master"
-RUN chown hdfs:hadoop /etc/hadoop/conf/hdfs.keytab \
- && chown mapred:hadoop /etc/hadoop/conf/mapred.keytab \
- && chown yarn:hadoop /etc/hadoop/conf/yarn.keytab \
- && chown hdfs:hadoop /etc/hadoop/conf/HTTP.keytab \
- && chmod 644 /etc/hadoop/conf/*.keytab
-
-# CREATE HIVE PRINCIPAL AND KEYTAB
-RUN /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey hive/hadoop-master@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hive/conf/hive.keytab hive/hadoop-master"
-RUN chown hive:hadoop /etc/hive/conf/hive.keytab \
- && chmod 644 /etc/hive/conf/hive.keytab
-
-# YARN SECURITY SETTINGS
-RUN chmod 6050 /etc/hadoop/conf/container-executor.cfg
-
-# Create legacy Presto and updated Trino principals and add them to keytabs
-RUN set -xeu && \
- for hostname in presto-master trino-coordinator presto-worker trino-worker presto-worker-1 trino-worker-1 presto-worker-2 trino-worker-2; do \
- /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey presto-server/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey trino-server/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey HTTP/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey presto-client/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey trino-client/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey hive/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && mkdir -p /etc/trino/conf \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/presto-server.keytab presto-server/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/trino-server.keytab trino-server/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/presto-server-HTTP.keytab HTTP/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/trino-client.keytab trino-client/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/presto-client.keytab presto-client/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/hive-presto-master.keytab hive/${hostname}.docker.cluster"; \
- done && echo "OK" && \
- chmod 644 /etc/trino/conf/*.keytab
-
-# CREATE SSL KEYSTORE
-RUN keytool -genkeypair \
- -alias presto \
- -keyalg RSA \
- -keystore /etc/trino/conf/keystore.jks \
- -keypass password \
- -storepass password \
- -dname "CN=presto-master, OU=, O=, L=, S=, C=" \
- -validity 100000 && \
- keytool -genkeypair \
- -alias trino \
- -keyalg RSA \
- -keystore /etc/trino/conf/keystore.jks \
- -keypass password \
- -storepass password \
- -dname "CN=trino-coordinator, OU=, O=, L=, S=, C=" \
- -validity 100000
-RUN chmod 644 /etc/trino/conf/keystore.jks
-
-# EXPOSE KERBEROS PORTS
-EXPOSE 88
-EXPOSE 749
-
-CMD supervisord -c /etc/supervisord.conf
diff --git a/testing/cdh5.15-hive-kerberized/files/etc/hadoop/conf/container-executor.cfg b/testing/cdh5.15-hive-kerberized/files/etc/hadoop/conf/container-executor.cfg
deleted file mode 100644
index b790f86a..00000000
--- a/testing/cdh5.15-hive-kerberized/files/etc/hadoop/conf/container-executor.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-yarn.nodemanager.local-dirs=/var/lib/hadoop-yarn/cache/yarn/nm-local-dir
-yarn.nodemanager.linux-container-executor.group=yarn
-yarn.nodemanager.log-dirs=/var/log/hadoop-yarn/containers
-banned.users=yarn,mapred,bin
-min.user.id=400
diff --git a/testing/cdh5.15-hive-kerberized/files/etc/hadoop/conf/taskcontroller.cfg b/testing/cdh5.15-hive-kerberized/files/etc/hadoop/conf/taskcontroller.cfg
deleted file mode 100644
index 4f09350d..00000000
--- a/testing/cdh5.15-hive-kerberized/files/etc/hadoop/conf/taskcontroller.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-hadoop.log.dir=/var/log/hadoop-mapreduce
-mapreduce.tasktracker.group=mapred
-banned.users=mapred,bin
-min.user.id=400
diff --git a/testing/cdh5.15-hive-kerberized/files/etc/hive/conf/hiveserver2-site.xml b/testing/cdh5.15-hive-kerberized/files/etc/hive/conf/hiveserver2-site.xml
deleted file mode 100644
index 41280243..00000000
--- a/testing/cdh5.15-hive-kerberized/files/etc/hive/conf/hiveserver2-site.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-
-
-
-
-
-
- hive.security.authenticator.manager
- org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator
-
-
-
-
- hive.security.authorization.manager
- org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory
- SQL standards based Hive authorization
-
-
-
- hive.security.authorization.enabled
- true
-
-
-
diff --git a/testing/cdh5.15-hive-kerberized/files/etc/krb5.conf b/testing/cdh5.15-hive-kerberized/files/etc/krb5.conf
deleted file mode 100644
index 699ebc6b..00000000
--- a/testing/cdh5.15-hive-kerberized/files/etc/krb5.conf
+++ /dev/null
@@ -1,17 +0,0 @@
-[logging]
- default = FILE:/var/log/krb5libs.log
- kdc = FILE:/var/log/krb5kdc.log
- admin_server = FILE:/var/log/kadmind.log
-
-[libdefaults]
- default_realm = LABS.TERADATA.COM
- dns_lookup_realm = false
- dns_lookup_kdc = false
- forwardable = true
- allow_weak_crypto = true
-
-[realms]
- LABS.TERADATA.COM = {
- kdc = hadoop-master
- admin_server = hadoop-master
- }
diff --git a/testing/cdh5.15-hive-kerberized/files/etc/supervisord.d/kdc.conf b/testing/cdh5.15-hive-kerberized/files/etc/supervisord.d/kdc.conf
deleted file mode 100644
index 93f87dc4..00000000
--- a/testing/cdh5.15-hive-kerberized/files/etc/supervisord.d/kdc.conf
+++ /dev/null
@@ -1,15 +0,0 @@
-[program:krb5kdc]
-command=/bin/bash -c "exec /usr/sbin/krb5kdc -r LABS.TERADATA.COM -P /var/run/krb5kdc.pid -n"
-autostart=true
-autorestart=true
-redirect_stderr=true
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
-
-[program:kadmind]
-command=/bin/bash -c "exec /usr/sbin/kadmind -r LABS.TERADATA.COM -P /var/run/kadmind.pid -nofork"
-autostart=true
-autorestart=true
-redirect_stderr=true
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
diff --git a/testing/cdh5.15-hive-kerberized/files/overrides/etc/hadoop/conf/core-site.xml b/testing/cdh5.15-hive-kerberized/files/overrides/etc/hadoop/conf/core-site.xml
deleted file mode 100644
index 6b7143be..00000000
--- a/testing/cdh5.15-hive-kerberized/files/overrides/etc/hadoop/conf/core-site.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-
-
-
-
-
-
- hadoop.proxyuser.presto-server.groups
- *
-
-
-
- hadoop.proxyuser.presto-server.hosts
- *
-
-
-
-
- hadoop.security.authentication
- kerberos
-
-
-
- hadoop.security.authorization
- true
-
-
-
diff --git a/testing/cdh5.15-hive-kerberized/files/overrides/etc/hadoop/conf/hdfs-site.xml b/testing/cdh5.15-hive-kerberized/files/overrides/etc/hadoop/conf/hdfs-site.xml
deleted file mode 100644
index dff383ab..00000000
--- a/testing/cdh5.15-hive-kerberized/files/overrides/etc/hadoop/conf/hdfs-site.xml
+++ /dev/null
@@ -1,94 +0,0 @@
-
-
-
-
-
-
- dfs.block.access.token.enable
- true
-
-
-
-
- dfs.namenode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
-
- dfs.namenode.kerberos.principal
- hdfs/hadoop-master@LABS.TERADATA.COM
-
-
-
- dfs.namenode.kerberos.internal.spnego.principal
- HTTP/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- dfs.secondary.namenode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
-
- dfs.secondary.namenode.kerberos.principal
- hdfs/hadoop-master@LABS.TERADATA.COM
-
-
-
- dfs.secondary.namenode.kerberos.internal.spnego.principal
- HTTP/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- dfs.datanode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
-
- dfs.datanode.kerberos.principal
- hdfs/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- dfs.webhdfs.enabled
- true
-
-
-
-
- dfs.web.authentication.kerberos.principal
- HTTP/hadoop-master@LABS.TERADATA.COM
-
-
-
- dfs.web.authentication.kerberos.keytab
- /etc/hadoop/conf/HTTP.keytab
-
-
-
- ignore.secure.ports.for.testing
- true
-
-
-
- dfs.http.policy
- HTTP_ONLY
-
-
-
diff --git a/testing/cdh5.15-hive-kerberized/files/overrides/etc/hadoop/conf/mapred-site.xml b/testing/cdh5.15-hive-kerberized/files/overrides/etc/hadoop/conf/mapred-site.xml
deleted file mode 100644
index be1d4224..00000000
--- a/testing/cdh5.15-hive-kerberized/files/overrides/etc/hadoop/conf/mapred-site.xml
+++ /dev/null
@@ -1,63 +0,0 @@
-
-
-
-
-
-
- mapreduce.jobhistory.keytab
- /etc/hadoop/conf/mapred.keytab
-
-
-
- mapreduce.jobhistory.principal
- mapred/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- mapreduce.jobtracker.kerberos.principal
- mapred/hadoop-master@LABS.TERADATA.COM
-
-
-
- mapreduce.jobtracker.keytab.file
- /etc/hadoop/conf/mapred.keytab
-
-
-
-
- mapreduce.tasktracker.kerberos.principal
- mapred/hadoop-master@LABS.TERADATA.COM
-
-
-
- mapreduce.tasktracker.keytab.file
- /etc/hadoop/conf/mapred.keytab
-
-
-
- mapred.task.tracker.task-controller
- org.apache.hadoop.mapred.LinuxTaskController
-
-
-
- mapreduce.tasktracker.group
- mapred
-
-
-
diff --git a/testing/cdh5.15-hive-kerberized/files/overrides/etc/hadoop/conf/yarn-site.xml b/testing/cdh5.15-hive-kerberized/files/overrides/etc/hadoop/conf/yarn-site.xml
deleted file mode 100644
index 8be503a4..00000000
--- a/testing/cdh5.15-hive-kerberized/files/overrides/etc/hadoop/conf/yarn-site.xml
+++ /dev/null
@@ -1,58 +0,0 @@
-
-
-
-
-
-
-
- yarn.resourcemanager.keytab
- /etc/hadoop/conf/yarn.keytab
-
-
-
- yarn.resourcemanager.principal
- yarn/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- yarn.nodemanager.keytab
- /etc/hadoop/conf/yarn.keytab
-
-
-
- yarn.nodemanager.principal
- yarn/hadoop-master@LABS.TERADATA.COM
-
-
-
- yarn.nodemanager.container-executor.class
- org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor
-
-
-
- yarn.nodemanager.linux-container-executor.path
- /usr/lib/hadoop-yarn/bin/container-executor
-
-
-
- yarn.nodemanager.linux-container-executor.group
- yarn
-
-
-
diff --git a/testing/cdh5.15-hive-kerberized/files/overrides/etc/hive/conf/hive-site.xml b/testing/cdh5.15-hive-kerberized/files/overrides/etc/hive/conf/hive-site.xml
deleted file mode 100644
index 5039bc29..00000000
--- a/testing/cdh5.15-hive-kerberized/files/overrides/etc/hive/conf/hive-site.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-
-
-
-
-
-
- hive.server2.authentication
- KERBEROS
-
-
-
- hive.server2.enable.impersonation
- true
-
-
-
- hive.server2.authentication.kerberos.principal
- hive/hadoop-master@LABS.TERADATA.COM
-
-
-
- hive.server2.authentication.kerberos.keytab
- /etc/hive/conf/hive.keytab
-
-
-
- hive.metastore.sasl.enabled
- true
-
-
-
- hive.metastore.kerberos.keytab.file
- /etc/hive/conf/hive.keytab
-
-
-
- hive.metastore.kerberos.principal
- hive/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- hive.security.authorization.manager
- org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory
-
-
-
- hive.security.authorization.task.factory
- org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl
-
-
-
diff --git a/testing/cdh5.15-hive-kerberized/files/var/kerberos/krb5kdc/kadm5.acl b/testing/cdh5.15-hive-kerberized/files/var/kerberos/krb5kdc/kadm5.acl
deleted file mode 100644
index 0530526a..00000000
--- a/testing/cdh5.15-hive-kerberized/files/var/kerberos/krb5kdc/kadm5.acl
+++ /dev/null
@@ -1 +0,0 @@
-*/admin@LABS.TERADATA.COM *
diff --git a/testing/cdh5.15-hive-kerberized/files/var/kerberos/krb5kdc/kdc.conf b/testing/cdh5.15-hive-kerberized/files/var/kerberos/krb5kdc/kdc.conf
deleted file mode 100644
index 2df4f49e..00000000
--- a/testing/cdh5.15-hive-kerberized/files/var/kerberos/krb5kdc/kdc.conf
+++ /dev/null
@@ -1,12 +0,0 @@
-[kdcdefaults]
- kdc_ports = 88
- kdc_tcp_ports = 88
-
-[realms]
- LABS.TERADATA.COM = {
- #master_key_type = aes256-cts
- acl_file = /var/kerberos/krb5kdc/kadm5.acl
- dict_file = /usr/share/dict/words
- admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
- supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
- }
diff --git a/testing/cdh5.15-hive/Dockerfile b/testing/cdh5.15-hive/Dockerfile
deleted file mode 100644
index d0150a76..00000000
--- a/testing/cdh5.15-hive/Dockerfile
+++ /dev/null
@@ -1,43 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Cloudera removed an access to CDH5 repositories in April 2021.
-# In order to build new image version we use last released version as base.
-# Previous Dockerfile version is archived in archived/cdh5.15-hive.
-FROM ghcr.io/trinodb/testing/cdh5.15-hive:38
-
-COPY ./files /
-
-RUN \
- set -xeu && \
- # Remove unaccessible CDH5 repos so yum is still usable
- rm /etc/yum.repos.d/cloudera-cdh5.repo && \
- # Install Zulu JDK 17.0.4
- rpm -i https://cdn.azul.com/zulu/bin/zulu17.36.13-ca-jdk17.0.4-linux.x86_64.rpm && \
- # Set JDK 17 as a default one
- alternatives --set java /usr/lib/jvm/zulu-17/bin/java && \
- alternatives --set javac /usr/lib/jvm/zulu-17/bin/javac && \
- echo "Done"
-
-# HDFS ports
-EXPOSE 1004 1006 8020 50010 50020 50070 50075 50470
-
-# YARN ports
-EXPOSE 8030 8031 8032 8033 8040 8041 8042 8088 10020 19888
-
-# HIVE port
-EXPOSE 9083 10000
-
-# SOCKS port
-EXPOSE 1180
-
-CMD supervisord -c /etc/supervisord.conf
diff --git a/testing/cdh5.15-hive/files/etc/hadoop/conf/capacity-scheduler.xml b/testing/cdh5.15-hive/files/etc/hadoop/conf/capacity-scheduler.xml
deleted file mode 100644
index 0e15fb6b..00000000
--- a/testing/cdh5.15-hive/files/etc/hadoop/conf/capacity-scheduler.xml
+++ /dev/null
@@ -1,100 +0,0 @@
-
-
-
-
-
- yarn.scheduler.capacity.maximum-applications
- 10000
-
- Maximum number of applications that can be pending and running.
-
-
-
-
- yarn.scheduler.capacity.maximum-am-resource-percent
- 1
-
- Maximum percent of resources in the cluster which can be used to run
- application masters i.e. controls number of concurrent running
- applications.
-
-
-
-
- yarn.scheduler.capacity.root.queues
- default
-
- The queues at the this level (root is the root queue).
-
-
-
-
- yarn.scheduler.capacity.root.default.capacity
- 100
- Default queue target capacity.
-
-
-
- yarn.scheduler.capacity.root.default.maximum-capacity
- 100
-
- The maximum capacity of the default queue.
-
-
-
-
- yarn.scheduler.capacity.root.default.state
- RUNNING
-
- The state of the default queue. State can be one of RUNNING or STOPPED.
-
-
-
-
- yarn.scheduler.capacity.root.default.acl_submit_applications
- *
-
- The ACL of who can submit jobs to the default queue.
-
-
-
-
- yarn.scheduler.capacity.root.default.user-limit-factor
- 1
-
- Default queue user limit a percentage from 0.0 to 1.0.
-
-
-
-
- yarn.scheduler.capacity.root.default.acl_administer_queue
- *
-
- The ACL of who can administer jobs on the default queue.
-
-
-
-
- yarn.scheduler.capacity.node-locality-delay
- -1
-
- Number of missed scheduling opportunities after which the CapacityScheduler
- attempts to schedule rack-local containers.
- Typically this should be set to number of racks in the cluster, this
- feature is disabled by default, set to -1.
-
-
-
-
diff --git a/testing/cdh5.15-hive/files/etc/hadoop/conf/core-site.xml b/testing/cdh5.15-hive/files/etc/hadoop/conf/core-site.xml
deleted file mode 100644
index 4dbdd062..00000000
--- a/testing/cdh5.15-hive/files/etc/hadoop/conf/core-site.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-
-
-
-
-
- fs.defaultFS
- hdfs://hadoop-master:9000
-
-
-
-
- hadoop.proxyuser.oozie.hosts
- *
-
-
- hadoop.proxyuser.oozie.groups
- *
-
-
-
-
- hadoop.proxyuser.httpfs.hosts
- *
-
-
- hadoop.proxyuser.httpfs.groups
- *
-
-
-
-
- hadoop.proxyuser.llama.hosts
- *
-
-
- hadoop.proxyuser.llama.groups
- *
-
-
-
-
- hadoop.proxyuser.hue.hosts
- *
-
-
- hadoop.proxyuser.hue.groups
- *
-
-
-
-
- hadoop.proxyuser.mapred.hosts
- *
-
-
- hadoop.proxyuser.mapred.groups
- *
-
-
-
-
- hadoop.proxyuser.hive.hosts
- *
-
-
-
- hadoop.proxyuser.hive.groups
- *
-
-
-
-
- hadoop.proxyuser.hdfs.groups
- *
-
-
-
- hadoop.proxyuser.hdfs.hosts
- *
-
-
-
diff --git a/testing/cdh5.15-hive/files/etc/hadoop/conf/hadoop-env.sh b/testing/cdh5.15-hive/files/etc/hadoop/conf/hadoop-env.sh
deleted file mode 100644
index b855cc4e..00000000
--- a/testing/cdh5.15-hive/files/etc/hadoop/conf/hadoop-env.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hadoop-specific environment variables here.
-# Forcing YARN-based mapreduce implementaion.
-# Make sure to comment out if you want to go back to the default or
-# if you want this to be tweakable on a per-user basis
-# export HADOOP_MAPRED_HOME=/usr/lib/hadoop-mapreduce
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE=768
-
-# Extra Java runtime options. Empty by default.
-export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS -Xmx512m"
-export YARN_OPTS="$YARN_OPTS -Xmx256m"
diff --git a/testing/cdh5.15-hive/files/etc/hadoop/conf/hdfs-site.xml b/testing/cdh5.15-hive/files/etc/hadoop/conf/hdfs-site.xml
deleted file mode 100644
index 9d1e71aa..00000000
--- a/testing/cdh5.15-hive/files/etc/hadoop/conf/hdfs-site.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-
-
-
-
-
- dfs.namenode.name.dir
- /var/lib/hadoop-hdfs/cache/name/
-
-
-
- dfs.datanode.data.dir
- /var/lib/hadoop-hdfs/cache/data/
-
-
-
- fs.viewfs.mounttable.hadoop-viewfs.link./default
- hdfs://hadoop-master:9000/user/hive/warehouse
-
-
-
diff --git a/testing/cdh5.15-hive/files/etc/hadoop/conf/mapred-site.xml b/testing/cdh5.15-hive/files/etc/hadoop/conf/mapred-site.xml
deleted file mode 100644
index b99d8620..00000000
--- a/testing/cdh5.15-hive/files/etc/hadoop/conf/mapred-site.xml
+++ /dev/null
@@ -1,41 +0,0 @@
-
-
-
-
-
- mapred.job.tracker
- hadoop-master:8021
-
-
-
- mapreduce.jobhistory.address
- hadoop-master:10020
-
-
-
- mapreduce.jobhistory.webapp.address
- hadoop-master:19888
-
-
-
- To set the value of tmp directory for map and reduce tasks.
- mapreduce.task.tmp.dir
- /var/lib/hadoop-mapreduce/cache/${user.name}/tasks
-
-
-
diff --git a/testing/cdh5.15-hive/files/etc/hadoop/conf/yarn-site.xml b/testing/cdh5.15-hive/files/etc/hadoop/conf/yarn-site.xml
deleted file mode 100644
index 5f3757c5..00000000
--- a/testing/cdh5.15-hive/files/etc/hadoop/conf/yarn-site.xml
+++ /dev/null
@@ -1,109 +0,0 @@
-
-
-
-
-
- yarn.nodemanager.aux-services
- mapreduce_shuffle
-
-
-
- yarn.nodemanager.aux-services.mapreduce_shuffle.class
- org.apache.hadoop.mapred.ShuffleHandler
-
-
-
- yarn.log-aggregation-enable
- true
-
-
-
- yarn.dispatcher.exit-on-error
- true
-
-
-
- List of directories to store localized files in.
- yarn.nodemanager.local-dirs
- /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir
-
-
-
- Where to store container logs.
- yarn.nodemanager.log-dirs
- /var/log/hadoop-yarn/containers
-
-
-
- Where to aggregate logs to.
- yarn.nodemanager.remote-app-log-dir
- /var/log/hadoop-yarn/apps
-
-
-
- Classpath for typical applications.
- yarn.application.classpath
-
- /etc/hadoop/conf,
- /usr/lib/hadoop/*,
- /usr/lib/hadoop/lib/*,
- /usr/lib/hadoop-hdfs/*,
- /usr/lib/hadoop-hdfs/lib/*,
- /usr/lib/hadoop-yarn/*,
- /usr/lib/hadoop-yarn/lib/*,
- /usr/lib/hadoop-mapreduce/*,
- /usr/lib/hadoop-mapreduce/lib/*
-
-
-
-
- yarn.resourcemanager.hostname
- hadoop-master
-
-
-
- yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage
- 100
-
-
-
- yarn.nodemanager.pmem-check-enabled
- false
-
-
-
- yarn.nodemanager.vmem-check-enabled
- false
-
-
-
- yarn.nodemanager.resource.memory.enforced
- false
-
-
-
- yarn.nodemanager.elastic-memory-control.enabled
- false
-
-
-
- yarn.log.server.url
- http://hadoop-master:19888/jobhistory/logs
-
-
-
diff --git a/testing/cdh5.15-hive/files/etc/hive/conf/hive-env.sh b/testing/cdh5.15-hive/files/etc/hive/conf/hive-env.sh
deleted file mode 100644
index 034db442..00000000
--- a/testing/cdh5.15-hive/files/etc/hive/conf/hive-env.sh
+++ /dev/null
@@ -1 +0,0 @@
-export HADOOP_OPTS="$HADOOP_OPTS -Dhive.root.logger=INFO,console"
diff --git a/testing/cdh5.15-hive/files/etc/hive/conf/hive-site.xml b/testing/cdh5.15-hive/files/etc/hive/conf/hive-site.xml
deleted file mode 100644
index 4dfaa2a5..00000000
--- a/testing/cdh5.15-hive/files/etc/hive/conf/hive-site.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-
-
-
-
-
- javax.jdo.option.ConnectionURL
- jdbc:mysql://localhost/metastore
- the URL of the MySQL database
-
-
-
- javax.jdo.option.ConnectionDriverName
- com.mysql.jdbc.Driver
-
-
-
- javax.jdo.option.ConnectionUserName
- root
-
-
-
- javax.jdo.option.ConnectionPassword
- root
-
-
-
- datanucleus.autoCreateSchema
- false
-
-
-
- datanucleus.fixedDatastore
- true
-
-
-
- datanucleus.autoStartMechanism
- SchemaTable
-
-
-
- hive.security.authorization.createtable.owner.grants
- ALL
- The set of privileges automatically granted to the owner whenever a table gets created.
-
-
-
- hive.users.in.admin.role
- hdfs,hive
-
-
-
diff --git a/testing/cdh5.15-hive/files/etc/supervisord.conf b/testing/cdh5.15-hive/files/etc/supervisord.conf
deleted file mode 100644
index 2ac8dba5..00000000
--- a/testing/cdh5.15-hive/files/etc/supervisord.conf
+++ /dev/null
@@ -1,21 +0,0 @@
-[supervisord]
-logfile = /var/log/supervisord.log
-logfile_maxbytes = 50MB
-logfile_backups=10
-loglevel = info
-pidfile = /var/run/supervisord.pid
-nodaemon = true
-directory = /tmp
-strip_ansi = false
-
-[unix_http_server]
-file = /tmp/supervisor.sock
-
-[rpcinterface:supervisor]
-supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
-
-[supervisorctl]
-serverurl = unix:///tmp/supervisor.sock
-
-[include]
-files = /etc/supervisord.d/*.conf
diff --git a/testing/cdh5.15-hive/files/etc/supervisord.d/hdfs-datanode.conf b/testing/cdh5.15-hive/files/etc/supervisord.d/hdfs-datanode.conf
deleted file mode 100644
index 87fbcbed..00000000
--- a/testing/cdh5.15-hive/files/etc/supervisord.d/hdfs-datanode.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hdfs-datanode]
-command=hdfs datanode
-startsecs=2
-stopwaitsecs=10
-user=hdfs
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-datanode.log
-autostart=true
diff --git a/testing/cdh5.15-hive/files/etc/supervisord.d/hdfs-namenode.conf b/testing/cdh5.15-hive/files/etc/supervisord.d/hdfs-namenode.conf
deleted file mode 100644
index 0d8de8db..00000000
--- a/testing/cdh5.15-hive/files/etc/supervisord.d/hdfs-namenode.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hdfs-namenode]
-command=hdfs namenode
-startsecs=2
-stopwaitsecs=10
-user=hdfs
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-namenode.log
-autostart=true
diff --git a/testing/cdh5.15-hive/files/etc/supervisord.d/hive-metastore.conf b/testing/cdh5.15-hive/files/etc/supervisord.d/hive-metastore.conf
deleted file mode 100644
index f5aaf8ca..00000000
--- a/testing/cdh5.15-hive/files/etc/supervisord.d/hive-metastore.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:hive-metastore]
-# Add `--debug:port=5006` for debugging
-command=hive --service metastore
-startsecs=2
-stopwaitsecs=10
-user=hive
-redirect_stderr=true
-stdout_logfile=/var/log/hive/hive-metastore.log
-autostart=true
diff --git a/testing/cdh5.15-hive/files/etc/supervisord.d/hive-server2.conf b/testing/cdh5.15-hive/files/etc/supervisord.d/hive-server2.conf
deleted file mode 100644
index d090496f..00000000
--- a/testing/cdh5.15-hive/files/etc/supervisord.d/hive-server2.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hive-server2]
-command=hive --service hiveserver2
-startsecs=2
-stopwaitsecs=10
-user=hive
-redirect_stderr=true
-stdout_logfile=/var/log/hive/hive-server2.log
-autostart=true
diff --git a/testing/cdh5.15-hive/files/etc/supervisord.d/mysql-metastore.conf b/testing/cdh5.15-hive/files/etc/supervisord.d/mysql-metastore.conf
deleted file mode 100644
index e95544e5..00000000
--- a/testing/cdh5.15-hive/files/etc/supervisord.d/mysql-metastore.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:mysql-metastore]
-command=/usr/bin/pidproxy /var/run/mysqld/mysqld.pid /usr/bin/mysqld_safe
-startsecs=2
-stopwaitsecs=10
-user=mysql
-redirect_stderr=true
-stdout_logfile=/var/log/mysql/mysql.log
-autostart=true
diff --git a/testing/cdh5.15-hive/files/etc/supervisord.d/socks-proxy.conf b/testing/cdh5.15-hive/files/etc/supervisord.d/socks-proxy.conf
deleted file mode 100644
index 43602d26..00000000
--- a/testing/cdh5.15-hive/files/etc/supervisord.d/socks-proxy.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:socks-proxy]
-command=/usr/bin/ssh -o StrictHostKeyChecking=no -v -N -D 0.0.0.0:1180 localhost
-startsecs=2
-stopwaitsecs=10
-startretries=30
-user=root
-redirect_stderr=true
-stdout_logfile=/var/log/socks-proxy
-autostart=true
diff --git a/testing/cdh5.15-hive/files/etc/supervisord.d/sshd.conf b/testing/cdh5.15-hive/files/etc/supervisord.d/sshd.conf
deleted file mode 100644
index 3930b4c0..00000000
--- a/testing/cdh5.15-hive/files/etc/supervisord.d/sshd.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:sshd]
-command=/usr/sbin/sshd -D -e
-startsecs=2
-stopwaitsecs=10
-startretries=30
-user=root
-redirect_stderr=true
-stdout_logfile=/var/log/sshd
-autostart=true
diff --git a/testing/cdh5.15-hive/files/etc/supervisord.d/yarn-nodemanager.conf b/testing/cdh5.15-hive/files/etc/supervisord.d/yarn-nodemanager.conf
deleted file mode 100644
index 8f81e3c7..00000000
--- a/testing/cdh5.15-hive/files/etc/supervisord.d/yarn-nodemanager.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:yarn-nodemanager]
-command=yarn nodemanager
-startsecs=2
-stopwaitsecs=10
-user=yarn
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-yarn/hadoop-yarn-nodemanager.log
-autostart=true
diff --git a/testing/cdh5.15-hive/files/etc/supervisord.d/yarn-resourcemanager.conf b/testing/cdh5.15-hive/files/etc/supervisord.d/yarn-resourcemanager.conf
deleted file mode 100644
index 2ef16026..00000000
--- a/testing/cdh5.15-hive/files/etc/supervisord.d/yarn-resourcemanager.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:yarn-resourcemanager]
-command=yarn resourcemanager
-startsecs=2
-stopwaitsecs=10
-user=yarn
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-yarn/hadoop-yarn-resourcemanager.log
-autostart=true
diff --git a/testing/cdh5.15-hive/files/root/setup.sh b/testing/cdh5.15-hive/files/root/setup.sh
deleted file mode 100755
index 9391f99f..00000000
--- a/testing/cdh5.15-hive/files/root/setup.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash -ex
-
-# format namenode
-chown hdfs:hdfs /var/lib/hadoop-hdfs/cache/
-
-# workaround for 'could not open session' bug as suggested here:
-# https://github.com/docker/docker/issues/7056#issuecomment-49371610
-rm -f /etc/security/limits.d/hdfs.conf
-su -c "echo 'N' | hdfs namenode -format" hdfs
-
-# start hdfs
-su -c "hdfs datanode 2>&1 > /var/log/hadoop-hdfs/hadoop-hdfs-datanode.log" hdfs&
-su -c "hdfs namenode 2>&1 > /var/log/hadoop-hdfs/hadoop-hdfs-namenode.log" hdfs&
-
-# wait for process starting
-sleep 10
-
-# remove a broken symlink created by cdh installer so that init-hdfs.sh does no blow up on it
-# (hbase-annotations.jar seems not needed in our case)
-rm /usr/lib/hive/lib/hbase-annotations.jar
-
-# 4 exec cloudera hdfs init script
-/usr/lib/hadoop/libexec/init-hdfs.sh
-
-# init hive directories
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /user/hive/warehouse'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chmod 1777 /user/hive/warehouse'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chown hive /user/hive/warehouse'
-
-# stop hdfs
-killall java
-
-# setup metastore
-mysql_install_db
-
-/usr/bin/mysqld_safe &
-sleep 10s
-
-cd /usr/lib/hive/scripts/metastore/upgrade/mysql/
-echo "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION; FLUSH PRIVILEGES;" | mysql
-echo "CREATE DATABASE metastore; USE metastore; SOURCE hive-schema-1.1.0.mysql.sql;" | mysql
-/usr/bin/mysqladmin -u root password 'root'
-
-killall mysqld
-sleep 10s
-mkdir /var/log/mysql/
-chown mysql:mysql /var/log/mysql/
diff --git a/testing/hdp2.6-hive-kerberized-2/Dockerfile b/testing/hdp2.6-hive-kerberized-2/Dockerfile
deleted file mode 100644
index f9eec813..00000000
--- a/testing/hdp2.6-hive-kerberized-2/Dockerfile
+++ /dev/null
@@ -1,104 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM testing/hdp2.6-hive:unlabelled
-
-ARG ADDPRINC_ARGS="-maxrenewlife \"10 days\" +allow_renewable"
-
-# INSTALL KERBEROS
-RUN yum install -y krb5-libs krb5-server krb5-workstation \
- && yum -y clean all && rm -rf /tmp/* /var/tmp/*
-
-# COPY CONFIGURATION
-COPY ./files /
-
-# Apply configuration overrides and remove them so they don't get reapplied
-RUN /usr/local/bin/apply-all-site-xml-overrides /overrides && rm -Rf /overrides
-
-# CREATE KERBEROS DATABASE
-RUN /usr/sbin/kdb5_util create -s -P password
-
-# ADD HADOOP PRINCIPALS
-RUN /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey hdfs/hadoop-master-2@OTHERREALM.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey mapred/hadoop-master-2@OTHERREALM.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey yarn/hadoop-master-2@OTHERREALM.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey HTTP/hadoop-master-2@OTHERREALM.COM"
-
-# CREATE HADOOP KEYTAB FILES
-RUN /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hadoop/conf/hdfs.keytab hdfs/hadoop-master-2 HTTP/hadoop-master-2" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hadoop/conf/mapred.keytab mapred/hadoop-master-2 HTTP/hadoop-master-2" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hadoop/conf/yarn.keytab yarn/hadoop-master-2 HTTP/hadoop-master-2" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hadoop/conf/HTTP.keytab HTTP/hadoop-master-2"
-RUN chown hdfs:hadoop /etc/hadoop/conf/hdfs.keytab \
- && chown mapred:hadoop /etc/hadoop/conf/mapred.keytab \
- && chown yarn:hadoop /etc/hadoop/conf/yarn.keytab \
- && chown hdfs:hadoop /etc/hadoop/conf/HTTP.keytab \
- && chmod 644 /etc/hadoop/conf/*.keytab
-
-# CREATE HIVE PRINCIPAL AND KEYTAB
-RUN /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey hive/hadoop-master-2@OTHERREALM.COM" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hive/conf/hive.keytab hive/hadoop-master-2"
-RUN chown hive:hadoop /etc/hive/conf/hive.keytab \
- && chmod 644 /etc/hive/conf/hive.keytab
-
-# Create legacy Presto and updated Trino principals and add them to keytabs
-RUN set -xeu && \
- for hostname in presto-master trino-coordinator presto-worker trino-worker presto-worker-1 trino-worker-1 presto-worker-2 trino-worker-2; do \
- /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey presto-server/${hostname}.docker.cluster@OTHERREALM.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey trino-server/${hostname}.docker.cluster@OTHERREALM.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey HTTP/${hostname}.docker.cluster@OTHERREALM.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey presto-client/${hostname}.docker.cluster@OTHERREALM.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey trino-client/${hostname}.docker.cluster@OTHERREALM.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey hive/${hostname}.docker.cluster@OTHERREALM.COM" \
- && mkdir -p /etc/trino/conf \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/presto-server.keytab presto-server/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/trino-server.keytab trino-server/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/presto-server-HTTP.keytab HTTP/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/trino-client.keytab trino-client/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/presto-client.keytab presto-client/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/hive-presto-master.keytab hive/${hostname}.docker.cluster"; \
- done && echo "OK" && \
- chmod 644 /etc/trino/conf/*.keytab
-
-# CREATE SSL KEYSTORE
-RUN keytool -genkeypair \
- -alias presto \
- -keyalg RSA \
- -keystore /etc/trino/conf/keystore.jks \
- -keypass password \
- -storepass password \
- -dname "CN=presto-master, OU=, O=, L=, S=, C=" \
- -validity 100000 && \
- keytool -genkeypair \
- -alias trino \
- -keyalg RSA \
- -keystore /etc/trino/conf/keystore.jks \
- -keypass password \
- -storepass password \
- -dname "CN=trino-coordinator, OU=, O=, L=, S=, C=" \
- -validity 100000
-RUN chmod 644 /etc/trino/conf/keystore.jks
-
-# Provide convenience bash history
-RUN set -xeu; \
- for user in root hive hdfs; do \
- sudo -u "${user}" bash -c ' echo "klist -kt /etc/hive/conf/hive.keytab" >> ~/.bash_history '; \
- sudo -u "${user}" bash -c ' echo "kinit -kt /etc/hive/conf/hive.keytab hive/hadoop-master-2@OTHERREALM.COM" >> ~/.bash_history '; \
- sudo -u "${user}" bash -c ' echo "beeline -u \"jdbc:hive2://hadoop-master-2:10000/default;principal=hive/hadoop-master-2@OTHERREALM.COM\"" >> ~/.bash_history '; \
- done
-
-# EXPOSE KERBEROS PORTS
-EXPOSE 88
-EXPOSE 89
-EXPOSE 749
-
-CMD supervisord -c /etc/supervisord.conf
diff --git a/testing/hdp2.6-hive-kerberized-2/files/etc/hive/conf/hiveserver2-site.xml b/testing/hdp2.6-hive-kerberized-2/files/etc/hive/conf/hiveserver2-site.xml
deleted file mode 100644
index 41280243..00000000
--- a/testing/hdp2.6-hive-kerberized-2/files/etc/hive/conf/hiveserver2-site.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-
-
-
-
-
-
- hive.security.authenticator.manager
- org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator
-
-
-
-
- hive.security.authorization.manager
- org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory
- SQL standards based Hive authorization
-
-
-
- hive.security.authorization.enabled
- true
-
-
-
diff --git a/testing/hdp2.6-hive-kerberized-2/files/etc/krb5.conf b/testing/hdp2.6-hive-kerberized-2/files/etc/krb5.conf
deleted file mode 100644
index 1d922551..00000000
--- a/testing/hdp2.6-hive-kerberized-2/files/etc/krb5.conf
+++ /dev/null
@@ -1,17 +0,0 @@
-[logging]
- default = FILE:/var/log/krb5libs.log
- kdc = FILE:/var/log/krb5kdc.log
- admin_server = FILE:/var/log/kadmind.log
-
-[libdefaults]
- default_realm = OTHERREALM.COM
- dns_lookup_realm = false
- dns_lookup_kdc = false
- forwardable = true
- allow_weak_crypto = true
-
-[realms]
- OTHERREALM.COM = {
- kdc = hadoop-master-2:88
- admin_server = hadoop-master-2
- }
diff --git a/testing/hdp2.6-hive-kerberized-2/files/etc/supervisord.d/kdc.conf b/testing/hdp2.6-hive-kerberized-2/files/etc/supervisord.d/kdc.conf
deleted file mode 100644
index 4c8146b2..00000000
--- a/testing/hdp2.6-hive-kerberized-2/files/etc/supervisord.d/kdc.conf
+++ /dev/null
@@ -1,15 +0,0 @@
-[program:krb5kdc]
-command=/bin/bash -c "exec /usr/sbin/krb5kdc -P /var/run/krb5kdc.pid -n -r OTHERREALM.COM"
-autostart=true
-autorestart=true
-redirect_stderr=true
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
-
-[program:kadmind]
-command=/bin/bash -c "exec /usr/sbin/kadmind -P /var/run/kadmind.pid -nofork -r OTHERREALM.COM"
-autostart=true
-autorestart=true
-redirect_stderr=true
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
diff --git a/testing/hdp2.6-hive-kerberized-2/files/overrides/etc/hadoop/conf/core-site.xml b/testing/hdp2.6-hive-kerberized-2/files/overrides/etc/hadoop/conf/core-site.xml
deleted file mode 100644
index 413daa26..00000000
--- a/testing/hdp2.6-hive-kerberized-2/files/overrides/etc/hadoop/conf/core-site.xml
+++ /dev/null
@@ -1,47 +0,0 @@
-
-
-
-
-
- fs.defaultFS
- hdfs://hadoop-master-2:9000
-
-
-
-
- hadoop.proxyuser.presto-server.groups
- *
-
-
-
- hadoop.proxyuser.presto-server.hosts
- *
-
-
-
-
- hadoop.security.authentication
- kerberos
-
-
-
- hadoop.security.authorization
- true
-
-
-
diff --git a/testing/hdp2.6-hive-kerberized-2/files/overrides/etc/hadoop/conf/hdfs-site.xml b/testing/hdp2.6-hive-kerberized-2/files/overrides/etc/hadoop/conf/hdfs-site.xml
deleted file mode 100644
index 804f4415..00000000
--- a/testing/hdp2.6-hive-kerberized-2/files/overrides/etc/hadoop/conf/hdfs-site.xml
+++ /dev/null
@@ -1,89 +0,0 @@
-
-
-
-
-
-
- dfs.block.access.token.enable
- true
-
-
-
-
- dfs.namenode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
- dfs.namenode.kerberos.principal
- hdfs/hadoop-master-2@OTHERREALM.COM
-
-
- dfs.namenode.kerberos.internal.spnego.principal
- HTTP/hadoop-master-2@OTHERREALM.COM
-
-
-
-
- dfs.secondary.namenode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
- dfs.secondary.namenode.kerberos.principal
- hdfs/hadoop-master-2@OTHERREALM.COM
-
-
- dfs.secondary.namenode.kerberos.internal.spnego.principal
- HTTP/hadoop-master-2@OTHERREALM.COM
-
-
-
-
- dfs.datanode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
- dfs.datanode.kerberos.principal
- hdfs/hadoop-master-2@OTHERREALM.COM
-
-
-
-
- dfs.webhdfs.enabled
- true
-
-
-
-
- dfs.web.authentication.kerberos.principal
- HTTP/hadoop-master-2@OTHERREALM.COM
-
-
-
- dfs.web.authentication.kerberos.keytab
- /etc/hadoop/conf/HTTP.keytab
-
-
-
- ignore.secure.ports.for.testing
- true
-
-
-
- dfs.http.policy
- HTTP_ONLY
-
-
-
diff --git a/testing/hdp2.6-hive-kerberized-2/files/overrides/etc/hadoop/conf/mapred-site.xml b/testing/hdp2.6-hive-kerberized-2/files/overrides/etc/hadoop/conf/mapred-site.xml
deleted file mode 100644
index 6270a22f..00000000
--- a/testing/hdp2.6-hive-kerberized-2/files/overrides/etc/hadoop/conf/mapred-site.xml
+++ /dev/null
@@ -1,68 +0,0 @@
-
-
-
-
-
- mapred.job.tracker
- hadoop-master-2:8021
-
-
-
- mapreduce.jobhistory.address
- hadoop-master-2:10020
-
-
-
- mapreduce.jobhistory.webapp.address
- hadoop-master-2:19888
-
-
-
-
- mapreduce.jobhistory.keytab
- /etc/hadoop/conf/mapred.keytab
-
-
-
- mapreduce.jobhistory.principal
- mapred/hadoop-master-2@OTHERREALM.COM
-
-
-
-
- mapreduce.jobtracker.kerberos.principal
- mapred/hadoop-master-2@OTHERREALM.COM
-
-
-
- mapreduce.jobtracker.keytab.file
- /etc/hadoop/conf/mapred.keytab
-
-
-
-
- mapreduce.tasktracker.kerberos.principal
- mapred/hadoop-master-2@OTHERREALM.COM
-
-
-
- mapreduce.tasktracker.keytab.file
- /etc/hadoop/conf/mapred.keytab
-
-
-
diff --git a/testing/hdp2.6-hive-kerberized-2/files/overrides/etc/hadoop/conf/yarn-site.xml b/testing/hdp2.6-hive-kerberized-2/files/overrides/etc/hadoop/conf/yarn-site.xml
deleted file mode 100644
index 7688c9e1..00000000
--- a/testing/hdp2.6-hive-kerberized-2/files/overrides/etc/hadoop/conf/yarn-site.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-
-
-
-
-
-
- yarn.resourcemanager.hostname
- hadoop-master-2
-
-
-
- yarn.log.server.url
- http://hadoop-master-2:19888/jobhistory/logs
-
-
-
-
- yarn.resourcemanager.keytab
- /etc/hadoop/conf/yarn.keytab
-
-
-
- yarn.resourcemanager.principal
- yarn/hadoop-master-2@OTHERREALM.COM
-
-
-
-
- yarn.nodemanager.keytab
- /etc/hadoop/conf/yarn.keytab
-
-
-
- yarn.nodemanager.principal
- yarn/hadoop-master-2@OTHERREALM.COM
-
-
-
diff --git a/testing/hdp2.6-hive-kerberized-2/files/overrides/etc/hive/conf/hive-site.xml b/testing/hdp2.6-hive-kerberized-2/files/overrides/etc/hive/conf/hive-site.xml
deleted file mode 100644
index d61e1542..00000000
--- a/testing/hdp2.6-hive-kerberized-2/files/overrides/etc/hive/conf/hive-site.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-
-
-
-
-
-
- hive.server2.authentication
- KERBEROS
-
-
-
- hive.server2.enable.impersonation
- true
-
-
-
- hive.server2.authentication.kerberos.principal
- hive/hadoop-master-2@OTHERREALM.COM
-
-
-
- hive.server2.authentication.kerberos.keytab
- /etc/hive/conf/hive.keytab
-
-
-
- hive.metastore.sasl.enabled
- true
-
-
-
- hive.metastore.kerberos.keytab.file
- /etc/hive/conf/hive.keytab
-
-
-
- hive.metastore.kerberos.principal
- hive/hadoop-master-2@OTHERREALM.COM
-
-
-
-
- hive.security.authorization.manager
- org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory
-
-
-
- hive.security.authorization.task.factory
- org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl
-
-
-
diff --git a/testing/hdp2.6-hive-kerberized-2/files/var/kerberos/krb5kdc/kadm5.acl b/testing/hdp2.6-hive-kerberized-2/files/var/kerberos/krb5kdc/kadm5.acl
deleted file mode 100644
index c508fa67..00000000
--- a/testing/hdp2.6-hive-kerberized-2/files/var/kerberos/krb5kdc/kadm5.acl
+++ /dev/null
@@ -1 +0,0 @@
-*/admin@OTHERREALM.COM *
diff --git a/testing/hdp2.6-hive-kerberized-2/files/var/kerberos/krb5kdc/kdc.conf b/testing/hdp2.6-hive-kerberized-2/files/var/kerberos/krb5kdc/kdc.conf
deleted file mode 100644
index ccd0cad0..00000000
--- a/testing/hdp2.6-hive-kerberized-2/files/var/kerberos/krb5kdc/kdc.conf
+++ /dev/null
@@ -1,11 +0,0 @@
-[kdcdefaults]
- kdc_ports = 88
- kdc_tcp_ports = 88
-
-[realms]
- OTHERREALM.COM = {
- acl_file = /var/kerberos/krb5kdc/kadm5.acl
- dict_file = /usr/share/dict/words
- admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
- supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
- }
diff --git a/testing/hdp2.6-hive-kerberized/Dockerfile b/testing/hdp2.6-hive-kerberized/Dockerfile
deleted file mode 100644
index 3c501a88..00000000
--- a/testing/hdp2.6-hive-kerberized/Dockerfile
+++ /dev/null
@@ -1,123 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM testing/hdp2.6-hive:unlabelled
-
-ARG ADDPRINC_ARGS="-maxrenewlife \"10 days\" +allow_renewable"
-
-# INSTALL KERBEROS
-RUN yum install -y krb5-libs krb5-server krb5-workstation \
- && yum -y clean all && rm -rf /tmp/* /var/tmp/*
-
-# COPY CONFIGURATION
-COPY ./files /
-
-# Apply configuration overrides and remove them so they don't get reapplied
-RUN /usr/local/bin/apply-all-site-xml-overrides /overrides && rm -Rf /overrides
-
-# CREATE KERBEROS DATABASE
-RUN /usr/sbin/kdb5_util create -s -P password
-
-# CREATE ANOTHER KERBEROS DATABASE
-RUN /usr/sbin/kdb5_util create -d /var/kerberos/krb5kdc/principal-other -r OTHERLABS.TERADATA.COM -s -P password
-
-# ADD HADOOP PRINCIPALS
-RUN /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey hdfs/hadoop-master@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey mapred/hadoop-master@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey yarn/hadoop-master@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey HTTP/hadoop-master@LABS.TERADATA.COM"
-
-# CREATE HADOOP KEYTAB FILES
-RUN /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hadoop/conf/hdfs.keytab hdfs/hadoop-master HTTP/hadoop-master" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hadoop/conf/mapred.keytab mapred/hadoop-master HTTP/hadoop-master" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hadoop/conf/yarn.keytab yarn/hadoop-master HTTP/hadoop-master" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hadoop/conf/HTTP.keytab HTTP/hadoop-master"
-RUN chown hdfs:hadoop /etc/hadoop/conf/hdfs.keytab \
- && chown mapred:hadoop /etc/hadoop/conf/mapred.keytab \
- && chown yarn:hadoop /etc/hadoop/conf/yarn.keytab \
- && chown hdfs:hadoop /etc/hadoop/conf/HTTP.keytab \
- && chmod 644 /etc/hadoop/conf/*.keytab
-
-# CREATE HIVE PRINCIPAL AND KEYTAB
-RUN /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey hive/hadoop-master@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/hive/conf/hive.keytab hive/hadoop-master"
-RUN chown hive:hadoop /etc/hive/conf/hive.keytab \
- && chmod 644 /etc/hive/conf/hive.keytab
-
-# CREATE HIVE PRINCIPAL IN THE OTHER REALM
-RUN /usr/sbin/kadmin.local -r OTHERLABS.TERADATA.COM -d /var/kerberos/krb5kdc/principal-other -q "addprinc ${ADDPRINC_ARGS} -randkey hive/hadoop-master@OTHERLABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -r OTHERLABS.TERADATA.COM -d /var/kerberos/krb5kdc/principal-other -q "xst -norandkey -k /etc/hive/conf/hive-other.keytab hive/hadoop-master"
-RUN chown hive:hadoop /etc/hive/conf/hive-other.keytab \
- && chmod 644 /etc/hive/conf/hive-other.keytab
-
-# CREATE HDFS PRINCIPAL IN OTHER REALM
-RUN /usr/sbin/kadmin.local -r OTHERLABS.TERADATA.COM -d /var/kerberos/krb5kdc/principal-other -q "addprinc ${ADDPRINC_ARGS} -randkey hdfs/hadoop-master@OTHERLABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -r OTHERLABS.TERADATA.COM -d /var/kerberos/krb5kdc/principal-other -q "xst -norandkey -k /etc/hadoop/conf/hdfs-other.keytab hdfs/hadoop-master"
-RUN chown hdfs:hadoop /etc/hadoop/conf/hdfs-other.keytab \
- && chmod 644 /etc/hadoop/conf/hdfs-other.keytab
-
-# MAKE 'LABS.TERADATA.COM' TRUST 'OTHERLABS.TERADATA.COM'
-RUN /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -pw 123456 krbtgt/LABS.TERADATA.COM@OTHERLABS.TERADATA.COM"
-RUN /usr/sbin/kadmin.local -r OTHERLABS.TERADATA.COM -d /var/kerberos/krb5kdc/principal-other -q "addprinc ${ADDPRINC_ARGS} -pw 123456 krbtgt/LABS.TERADATA.COM"
-
-# Create legacy Presto and Trino principals and add them to keytabs
-RUN set -xeu && \
- for hostname in presto-master trino-coordinator presto-worker trino-worker presto-worker-1 trino-worker-1 presto-worker-2 trino-worker-2; do \
- /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey presto-server/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey trino-server/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey HTTP/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey presto-client/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey trino-client/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && /usr/sbin/kadmin.local -q "addprinc ${ADDPRINC_ARGS} -randkey hive/${hostname}.docker.cluster@LABS.TERADATA.COM" \
- && mkdir -p /etc/trino/conf \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/presto-server.keytab presto-server/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/trino-server.keytab trino-server/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/presto-server-HTTP.keytab HTTP/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/trino-client.keytab trino-client/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/presto-client.keytab presto-client/${hostname}.docker.cluster" \
- && /usr/sbin/kadmin.local -q "xst -norandkey -k /etc/trino/conf/hive-presto-master.keytab hive/${hostname}.docker.cluster"; \
- done && echo "OK" && \
- chmod 644 /etc/trino/conf/*.keytab
-
-# CREATE SSL KEYSTORE
-RUN keytool -genkeypair \
- -alias presto \
- -keyalg RSA \
- -keystore /etc/trino/conf/keystore.jks \
- -keypass password \
- -storepass password \
- -dname "CN=presto-master, OU=, O=, L=, S=, C=" \
- -validity 100000 && \
- keytool -genkeypair \
- -alias trino \
- -keyalg RSA \
- -keystore /etc/trino/conf/keystore.jks \
- -keypass password \
- -storepass password \
- -dname "CN=trino-coordinator, OU=, O=, L=, S=, C=" \
- -validity 100000
-RUN chmod 644 /etc/trino/conf/keystore.jks
-
-# Provide convenience bash history
-RUN set -xeu; \
- for user in root hive hdfs; do \
- sudo -u "${user}" bash -c ' echo "klist -kt /etc/hive/conf/hive.keytab" >> ~/.bash_history '; \
- sudo -u "${user}" bash -c ' echo "kinit -kt /etc/hive/conf/hive.keytab hive/hadoop-master@LABS.TERADATA.COM" >> ~/.bash_history '; \
- sudo -u "${user}" bash -c ' echo "beeline -u \"jdbc:hive2://hadoop-master:10000/default;principal=hive/hadoop-master@LABS.TERADATA.COM\"" >> ~/.bash_history '; \
- done
-
-# EXPOSE KERBEROS PORTS
-EXPOSE 88
-EXPOSE 89
-EXPOSE 749
-
-CMD supervisord -c /etc/supervisord.conf
diff --git a/testing/hdp2.6-hive-kerberized/files/etc/hive/conf/hiveserver2-site.xml b/testing/hdp2.6-hive-kerberized/files/etc/hive/conf/hiveserver2-site.xml
deleted file mode 100644
index 41280243..00000000
--- a/testing/hdp2.6-hive-kerberized/files/etc/hive/conf/hiveserver2-site.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-
-
-
-
-
-
- hive.security.authenticator.manager
- org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator
-
-
-
-
- hive.security.authorization.manager
- org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory
- SQL standards based Hive authorization
-
-
-
- hive.security.authorization.enabled
- true
-
-
-
diff --git a/testing/hdp2.6-hive-kerberized/files/etc/krb5.conf b/testing/hdp2.6-hive-kerberized/files/etc/krb5.conf
deleted file mode 100644
index ed0b5502..00000000
--- a/testing/hdp2.6-hive-kerberized/files/etc/krb5.conf
+++ /dev/null
@@ -1,21 +0,0 @@
-[logging]
- default = FILE:/var/log/krb5libs.log
- kdc = FILE:/var/log/krb5kdc.log
- admin_server = FILE:/var/log/kadmind.log
-
-[libdefaults]
- default_realm = LABS.TERADATA.COM
- dns_lookup_realm = false
- dns_lookup_kdc = false
- forwardable = true
- allow_weak_crypto = true
-
-[realms]
- LABS.TERADATA.COM = {
- kdc = hadoop-master:88
- admin_server = hadoop-master
- }
- OTHERLABS.TERADATA.COM = {
- kdc = hadoop-master:89
- admin_server = hadoop-master
- }
diff --git a/testing/hdp2.6-hive-kerberized/files/etc/supervisord.d/kdc.conf b/testing/hdp2.6-hive-kerberized/files/etc/supervisord.d/kdc.conf
deleted file mode 100644
index 8cf4e35f..00000000
--- a/testing/hdp2.6-hive-kerberized/files/etc/supervisord.d/kdc.conf
+++ /dev/null
@@ -1,15 +0,0 @@
-[program:krb5kdc]
-command=/bin/bash -c "exec /usr/sbin/krb5kdc -P /var/run/krb5kdc.pid -n -r LABS.TERADATA.COM -n -d /var/kerberos/krb5kdc/principal-other -r OTHERLABS.TERADATA.COM"
-autostart=true
-autorestart=true
-redirect_stderr=true
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
-
-[program:kadmind]
-command=/bin/bash -c "exec /usr/sbin/kadmind -P /var/run/kadmind.pid -nofork -r LABS.TERADATA.COM"
-autostart=true
-autorestart=true
-redirect_stderr=true
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
diff --git a/testing/hdp2.6-hive-kerberized/files/overrides/etc/hadoop/conf/core-site.xml b/testing/hdp2.6-hive-kerberized/files/overrides/etc/hadoop/conf/core-site.xml
deleted file mode 100644
index 8e66fd50..00000000
--- a/testing/hdp2.6-hive-kerberized/files/overrides/etc/hadoop/conf/core-site.xml
+++ /dev/null
@@ -1,50 +0,0 @@
-
-
-
-
-
-
- hadoop.proxyuser.presto-server.groups
- *
-
-
-
- hadoop.proxyuser.presto-server.hosts
- *
-
-
-
-
- hadoop.security.authentication
- kerberos
-
-
-
- hadoop.security.authorization
- true
-
-
-
- hadoop.security.auth_to_local
-
- RULE:[2:$1@$0](.*@OTHERLABS.TERADATA.COM)s/@.*//
- DEFAULT
-
-
-
-
diff --git a/testing/hdp2.6-hive-kerberized/files/overrides/etc/hadoop/conf/hdfs-site.xml b/testing/hdp2.6-hive-kerberized/files/overrides/etc/hadoop/conf/hdfs-site.xml
deleted file mode 100644
index 048712b5..00000000
--- a/testing/hdp2.6-hive-kerberized/files/overrides/etc/hadoop/conf/hdfs-site.xml
+++ /dev/null
@@ -1,89 +0,0 @@
-
-
-
-
-
-
- dfs.block.access.token.enable
- true
-
-
-
-
- dfs.namenode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
- dfs.namenode.kerberos.principal
- hdfs/hadoop-master@LABS.TERADATA.COM
-
-
- dfs.namenode.kerberos.internal.spnego.principal
- HTTP/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- dfs.secondary.namenode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
- dfs.secondary.namenode.kerberos.principal
- hdfs/hadoop-master@LABS.TERADATA.COM
-
-
- dfs.secondary.namenode.kerberos.internal.spnego.principal
- HTTP/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- dfs.datanode.keytab.file
- /etc/hadoop/conf/hdfs.keytab
-
-
- dfs.datanode.kerberos.principal
- hdfs/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- dfs.webhdfs.enabled
- true
-
-
-
-
- dfs.web.authentication.kerberos.principal
- HTTP/hadoop-master@LABS.TERADATA.COM
-
-
-
- dfs.web.authentication.kerberos.keytab
- /etc/hadoop/conf/HTTP.keytab
-
-
-
- ignore.secure.ports.for.testing
- true
-
-
-
- dfs.http.policy
- HTTP_ONLY
-
-
-
diff --git a/testing/hdp2.6-hive-kerberized/files/overrides/etc/hadoop/conf/mapred-site.xml b/testing/hdp2.6-hive-kerberized/files/overrides/etc/hadoop/conf/mapred-site.xml
deleted file mode 100644
index 63e213a7..00000000
--- a/testing/hdp2.6-hive-kerberized/files/overrides/etc/hadoop/conf/mapred-site.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-
-
-
-
-
-
- mapreduce.jobhistory.keytab
- /etc/hadoop/conf/mapred.keytab
-
-
-
- mapreduce.jobhistory.principal
- mapred/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- mapreduce.jobtracker.kerberos.principal
- mapred/hadoop-master@LABS.TERADATA.COM
-
-
-
- mapreduce.jobtracker.keytab.file
- /etc/hadoop/conf/mapred.keytab
-
-
-
-
- mapreduce.tasktracker.kerberos.principal
- mapred/hadoop-master@LABS.TERADATA.COM
-
-
-
- mapreduce.tasktracker.keytab.file
- /etc/hadoop/conf/mapred.keytab
-
-
-
diff --git a/testing/hdp2.6-hive-kerberized/files/overrides/etc/hadoop/conf/yarn-site.xml b/testing/hdp2.6-hive-kerberized/files/overrides/etc/hadoop/conf/yarn-site.xml
deleted file mode 100644
index 5940f105..00000000
--- a/testing/hdp2.6-hive-kerberized/files/overrides/etc/hadoop/conf/yarn-site.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-
-
-
-
-
-
-
- yarn.resourcemanager.keytab
- /etc/hadoop/conf/yarn.keytab
-
-
-
- yarn.resourcemanager.principal
- yarn/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- yarn.nodemanager.keytab
- /etc/hadoop/conf/yarn.keytab
-
-
-
- yarn.nodemanager.principal
- yarn/hadoop-master@LABS.TERADATA.COM
-
-
-
diff --git a/testing/hdp2.6-hive-kerberized/files/overrides/etc/hive/conf/hive-site.xml b/testing/hdp2.6-hive-kerberized/files/overrides/etc/hive/conf/hive-site.xml
deleted file mode 100644
index 5039bc29..00000000
--- a/testing/hdp2.6-hive-kerberized/files/overrides/etc/hive/conf/hive-site.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-
-
-
-
-
-
- hive.server2.authentication
- KERBEROS
-
-
-
- hive.server2.enable.impersonation
- true
-
-
-
- hive.server2.authentication.kerberos.principal
- hive/hadoop-master@LABS.TERADATA.COM
-
-
-
- hive.server2.authentication.kerberos.keytab
- /etc/hive/conf/hive.keytab
-
-
-
- hive.metastore.sasl.enabled
- true
-
-
-
- hive.metastore.kerberos.keytab.file
- /etc/hive/conf/hive.keytab
-
-
-
- hive.metastore.kerberos.principal
- hive/hadoop-master@LABS.TERADATA.COM
-
-
-
-
- hive.security.authorization.manager
- org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory
-
-
-
- hive.security.authorization.task.factory
- org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl
-
-
-
diff --git a/testing/hdp2.6-hive-kerberized/files/var/kerberos/krb5kdc/kadm5-other.acl b/testing/hdp2.6-hive-kerberized/files/var/kerberos/krb5kdc/kadm5-other.acl
deleted file mode 100644
index e115bde2..00000000
--- a/testing/hdp2.6-hive-kerberized/files/var/kerberos/krb5kdc/kadm5-other.acl
+++ /dev/null
@@ -1 +0,0 @@
-*/admin@OTHERLABS.TERADATA.COM *
diff --git a/testing/hdp2.6-hive-kerberized/files/var/kerberos/krb5kdc/kadm5.acl b/testing/hdp2.6-hive-kerberized/files/var/kerberos/krb5kdc/kadm5.acl
deleted file mode 100644
index 0530526a..00000000
--- a/testing/hdp2.6-hive-kerberized/files/var/kerberos/krb5kdc/kadm5.acl
+++ /dev/null
@@ -1 +0,0 @@
-*/admin@LABS.TERADATA.COM *
diff --git a/testing/hdp2.6-hive-kerberized/files/var/kerberos/krb5kdc/kdc.conf b/testing/hdp2.6-hive-kerberized/files/var/kerberos/krb5kdc/kdc.conf
deleted file mode 100644
index 03c5b468..00000000
--- a/testing/hdp2.6-hive-kerberized/files/var/kerberos/krb5kdc/kdc.conf
+++ /dev/null
@@ -1,23 +0,0 @@
-[kdcdefaults]
- kdc_ports = 88
- kdc_tcp_ports = 88
-
-[realms]
- LABS.TERADATA.COM = {
- acl_file = /var/kerberos/krb5kdc/kadm5.acl
- dict_file = /usr/share/dict/words
- admin_keytab = /var/kerberos/krb5kdc/kadm5.keytab
- supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
- }
-
- OTHERLABS.TERADATA.COM = {
- acl_file = /var/kerberos/krb5kdc/kadm5-other.acl
- dict_file = /usr/share/dict/words
- admin_keytab = /var/kerberos/krb5kdc/kadm5-other.keytab
- supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
- kdc_listen = 89
- kdc_tcp_listen = 89
- kdc_ports = 89
- kdc_tcp_ports = 89
- }
-
diff --git a/testing/hdp2.6-hive/Dockerfile b/testing/hdp2.6-hive/Dockerfile
deleted file mode 100644
index e63f0013..00000000
--- a/testing/hdp2.6-hive/Dockerfile
+++ /dev/null
@@ -1,43 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Cloudera removed an access to HDP repositories in April 2021.
-# In order to build new image version we use last released version as base.
-# Previous Dockerfile version is archived in archived/hdp2.6-hive.
-FROM ghcr.io/trinodb/testing/hdp2.6-hive:38
-
-COPY ./files /
-
-RUN \
- set -xeu && \
- # Remove unaccessible HDP2 repos so yum is still usable
- rm /etc/yum.repos.d/hdp*.repo && \
- # Install Zulu JDK 17.0.4
- rpm -i https://cdn.azul.com/zulu/bin/zulu17.36.13-ca-jdk17.0.4-linux.x86_64.rpm && \
- # Set JDK 17 as a default one
- alternatives --set java /usr/lib/jvm/zulu-17/bin/java && \
- alternatives --set javac /usr/lib/jvm/zulu-17/bin/javac && \
- echo "Done"
-
-# HDFS ports
-EXPOSE 1004 1006 8020 50010 50020 50070 50075 50470
-
-# YARN ports
-EXPOSE 8030 8031 8032 8033 8040 8041 8042 8088 10020 19888
-
-# HIVE ports
-EXPOSE 9083 10000
-
-# SOCKS port
-EXPOSE 1180
-
-CMD supervisord -c /etc/supervisord.conf
diff --git a/testing/hdp2.6-hive/files/etc/hadoop/conf/capacity-scheduler.xml b/testing/hdp2.6-hive/files/etc/hadoop/conf/capacity-scheduler.xml
deleted file mode 100644
index 0e15fb6b..00000000
--- a/testing/hdp2.6-hive/files/etc/hadoop/conf/capacity-scheduler.xml
+++ /dev/null
@@ -1,100 +0,0 @@
-
-
-
-
-
- yarn.scheduler.capacity.maximum-applications
- 10000
-
- Maximum number of applications that can be pending and running.
-
-
-
-
- yarn.scheduler.capacity.maximum-am-resource-percent
- 1
-
- Maximum percent of resources in the cluster which can be used to run
- application masters i.e. controls number of concurrent running
- applications.
-
-
-
-
- yarn.scheduler.capacity.root.queues
- default
-
- The queues at the this level (root is the root queue).
-
-
-
-
- yarn.scheduler.capacity.root.default.capacity
- 100
- Default queue target capacity.
-
-
-
- yarn.scheduler.capacity.root.default.maximum-capacity
- 100
-
- The maximum capacity of the default queue.
-
-
-
-
- yarn.scheduler.capacity.root.default.state
- RUNNING
-
- The state of the default queue. State can be one of RUNNING or STOPPED.
-
-
-
-
- yarn.scheduler.capacity.root.default.acl_submit_applications
- *
-
- The ACL of who can submit jobs to the default queue.
-
-
-
-
- yarn.scheduler.capacity.root.default.user-limit-factor
- 1
-
- Default queue user limit a percentage from 0.0 to 1.0.
-
-
-
-
- yarn.scheduler.capacity.root.default.acl_administer_queue
- *
-
- The ACL of who can administer jobs on the default queue.
-
-
-
-
- yarn.scheduler.capacity.node-locality-delay
- -1
-
- Number of missed scheduling opportunities after which the CapacityScheduler
- attempts to schedule rack-local containers.
- Typically this should be set to number of racks in the cluster, this
- feature is disabled by default, set to -1.
-
-
-
-
diff --git a/testing/hdp2.6-hive/files/etc/hadoop/conf/core-site.xml b/testing/hdp2.6-hive/files/etc/hadoop/conf/core-site.xml
deleted file mode 100644
index 4dbdd062..00000000
--- a/testing/hdp2.6-hive/files/etc/hadoop/conf/core-site.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-
-
-
-
-
- fs.defaultFS
- hdfs://hadoop-master:9000
-
-
-
-
- hadoop.proxyuser.oozie.hosts
- *
-
-
- hadoop.proxyuser.oozie.groups
- *
-
-
-
-
- hadoop.proxyuser.httpfs.hosts
- *
-
-
- hadoop.proxyuser.httpfs.groups
- *
-
-
-
-
- hadoop.proxyuser.llama.hosts
- *
-
-
- hadoop.proxyuser.llama.groups
- *
-
-
-
-
- hadoop.proxyuser.hue.hosts
- *
-
-
- hadoop.proxyuser.hue.groups
- *
-
-
-
-
- hadoop.proxyuser.mapred.hosts
- *
-
-
- hadoop.proxyuser.mapred.groups
- *
-
-
-
-
- hadoop.proxyuser.hive.hosts
- *
-
-
-
- hadoop.proxyuser.hive.groups
- *
-
-
-
-
- hadoop.proxyuser.hdfs.groups
- *
-
-
-
- hadoop.proxyuser.hdfs.hosts
- *
-
-
-
diff --git a/testing/hdp2.6-hive/files/etc/hadoop/conf/hadoop-env.sh b/testing/hdp2.6-hive/files/etc/hadoop/conf/hadoop-env.sh
deleted file mode 100644
index cb995f10..00000000
--- a/testing/hdp2.6-hive/files/etc/hadoop/conf/hadoop-env.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hadoop-specific environment variables here.
-# Forcing YARN-based mapreduce implementaion.
-# Make sure to comment out if you want to go back to the default or
-# if you want this to be tweakable on a per-user basis
-# export HADOOP_MAPRED_HOME=/usr/lib/hadoop-mapreduce
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE=256
-
-# Extra Java runtime options. Empty by default.
-export HADOOP_NAMENODE_OPTS="$HADOOP_NAMENODE_OPTS -Xmx512m"
-export YARN_OPTS="$YARN_OPTS -Xmx256m"
diff --git a/testing/hdp2.6-hive/files/etc/hadoop/conf/hdfs-site.xml b/testing/hdp2.6-hive/files/etc/hadoop/conf/hdfs-site.xml
deleted file mode 100644
index 9d1e71aa..00000000
--- a/testing/hdp2.6-hive/files/etc/hadoop/conf/hdfs-site.xml
+++ /dev/null
@@ -1,33 +0,0 @@
-
-
-
-
-
- dfs.namenode.name.dir
- /var/lib/hadoop-hdfs/cache/name/
-
-
-
- dfs.datanode.data.dir
- /var/lib/hadoop-hdfs/cache/data/
-
-
-
- fs.viewfs.mounttable.hadoop-viewfs.link./default
- hdfs://hadoop-master:9000/user/hive/warehouse
-
-
-
diff --git a/testing/hdp2.6-hive/files/etc/hadoop/conf/mapred-site.xml b/testing/hdp2.6-hive/files/etc/hadoop/conf/mapred-site.xml
deleted file mode 100644
index aff356cf..00000000
--- a/testing/hdp2.6-hive/files/etc/hadoop/conf/mapred-site.xml
+++ /dev/null
@@ -1,46 +0,0 @@
-
-
-
-
-
- mapred.job.tracker
- hadoop-master:8021
-
-
-
- mapreduce.framework.name
- yarn
-
-
-
- mapreduce.jobhistory.address
- hadoop-master:10020
-
-
-
- mapreduce.jobhistory.webapp.address
- hadoop-master:19888
-
-
-
- To set the value of tmp directory for map and reduce tasks.
- mapreduce.task.tmp.dir
- /var/lib/hadoop-mapreduce/cache/${user.name}/tasks
-
-
-
diff --git a/testing/hdp2.6-hive/files/etc/hadoop/conf/yarn-site.xml b/testing/hdp2.6-hive/files/etc/hadoop/conf/yarn-site.xml
deleted file mode 100644
index e7b04a6a..00000000
--- a/testing/hdp2.6-hive/files/etc/hadoop/conf/yarn-site.xml
+++ /dev/null
@@ -1,110 +0,0 @@
-
-
-
-
-
-
- yarn.nodemanager.aux-services
- mapreduce_shuffle
-
-
-
- yarn.nodemanager.aux-services.mapreduce_shuffle.class
- org.apache.hadoop.mapred.ShuffleHandler
-
-
-
- yarn.log-aggregation-enable
- true
-
-
-
- yarn.dispatcher.exit-on-error
- true
-
-
-
- List of directories to store localized files in.
- yarn.nodemanager.local-dirs
- /var/lib/hadoop-yarn/cache/${user.name}/nm-local-dir
-
-
-
- Where to store container logs.
- yarn.nodemanager.log-dirs
- /var/log/hadoop-yarn/containers
-
-
-
- Where to aggregate logs to.
- yarn.nodemanager.remote-app-log-dir
- /var/log/hadoop-yarn/apps
-
-
-
- Classpath for typical applications.
- yarn.application.classpath
-
- /etc/hadoop/conf,
- /usr/hdp/current/hadoop-client/*,
- /usr/hdp/current/hadoop-client/lib/*,
- /usr/hdp/current/hadoop-hdfs-client/*,
- /usr/hdp/current/hadoop-hdfs-client/lib/*,
- /usr/hdp/current/hadoop-yarn-client/*,
- /usr/hdp/current/hadoop-yarn-client/lib/*,
- /usr/hdp/current/hadoop-mapreduce-client/*,
- /usr/hdp/current/hadoop-mapreduce-client/lib/*
-
-
-
-
- yarn.resourcemanager.hostname
- hadoop-master
-
-
-
- yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage
- 100
-
-
-
- yarn.nodemanager.pmem-check-enabled
- false
-
-
-
- yarn.nodemanager.vmem-check-enabled
- false
-
-
-
- yarn.nodemanager.resource.memory.enforced
- false
-
-
-
- yarn.nodemanager.elastic-memory-control.enabled
- false
-
-
-
- yarn.log.server.url
- http://hadoop-master:19888/jobhistory/logs
-
-
-
diff --git a/testing/hdp2.6-hive/files/etc/hive/conf/hive-site.xml b/testing/hdp2.6-hive/files/etc/hive/conf/hive-site.xml
deleted file mode 100644
index 4dfaa2a5..00000000
--- a/testing/hdp2.6-hive/files/etc/hive/conf/hive-site.xml
+++ /dev/null
@@ -1,67 +0,0 @@
-
-
-
-
-
- javax.jdo.option.ConnectionURL
- jdbc:mysql://localhost/metastore
- the URL of the MySQL database
-
-
-
- javax.jdo.option.ConnectionDriverName
- com.mysql.jdbc.Driver
-
-
-
- javax.jdo.option.ConnectionUserName
- root
-
-
-
- javax.jdo.option.ConnectionPassword
- root
-
-
-
- datanucleus.autoCreateSchema
- false
-
-
-
- datanucleus.fixedDatastore
- true
-
-
-
- datanucleus.autoStartMechanism
- SchemaTable
-
-
-
- hive.security.authorization.createtable.owner.grants
- ALL
- The set of privileges automatically granted to the owner whenever a table gets created.
-
-
-
- hive.users.in.admin.role
- hdfs,hive
-
-
-
diff --git a/testing/hdp2.6-hive/files/etc/supervisord.conf b/testing/hdp2.6-hive/files/etc/supervisord.conf
deleted file mode 100644
index 2ac8dba5..00000000
--- a/testing/hdp2.6-hive/files/etc/supervisord.conf
+++ /dev/null
@@ -1,21 +0,0 @@
-[supervisord]
-logfile = /var/log/supervisord.log
-logfile_maxbytes = 50MB
-logfile_backups=10
-loglevel = info
-pidfile = /var/run/supervisord.pid
-nodaemon = true
-directory = /tmp
-strip_ansi = false
-
-[unix_http_server]
-file = /tmp/supervisor.sock
-
-[rpcinterface:supervisor]
-supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
-
-[supervisorctl]
-serverurl = unix:///tmp/supervisor.sock
-
-[include]
-files = /etc/supervisord.d/*.conf
diff --git a/testing/hdp2.6-hive/files/etc/supervisord.d/hdfs-datanode.conf b/testing/hdp2.6-hive/files/etc/supervisord.d/hdfs-datanode.conf
deleted file mode 100644
index 87fbcbed..00000000
--- a/testing/hdp2.6-hive/files/etc/supervisord.d/hdfs-datanode.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hdfs-datanode]
-command=hdfs datanode
-startsecs=2
-stopwaitsecs=10
-user=hdfs
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-datanode.log
-autostart=true
diff --git a/testing/hdp2.6-hive/files/etc/supervisord.d/hdfs-namenode.conf b/testing/hdp2.6-hive/files/etc/supervisord.d/hdfs-namenode.conf
deleted file mode 100644
index 0d8de8db..00000000
--- a/testing/hdp2.6-hive/files/etc/supervisord.d/hdfs-namenode.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hdfs-namenode]
-command=hdfs namenode
-startsecs=2
-stopwaitsecs=10
-user=hdfs
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-hdfs/hadoop-hdfs-namenode.log
-autostart=true
diff --git a/testing/hdp2.6-hive/files/etc/supervisord.d/hive-metastore.conf b/testing/hdp2.6-hive/files/etc/supervisord.d/hive-metastore.conf
deleted file mode 100644
index f5aaf8ca..00000000
--- a/testing/hdp2.6-hive/files/etc/supervisord.d/hive-metastore.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:hive-metastore]
-# Add `--debug:port=5006` for debugging
-command=hive --service metastore
-startsecs=2
-stopwaitsecs=10
-user=hive
-redirect_stderr=true
-stdout_logfile=/var/log/hive/hive-metastore.log
-autostart=true
diff --git a/testing/hdp2.6-hive/files/etc/supervisord.d/hive-server2.conf b/testing/hdp2.6-hive/files/etc/supervisord.d/hive-server2.conf
deleted file mode 100644
index d090496f..00000000
--- a/testing/hdp2.6-hive/files/etc/supervisord.d/hive-server2.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:hive-server2]
-command=hive --service hiveserver2
-startsecs=2
-stopwaitsecs=10
-user=hive
-redirect_stderr=true
-stdout_logfile=/var/log/hive/hive-server2.log
-autostart=true
diff --git a/testing/hdp2.6-hive/files/etc/supervisord.d/mysql-metastore.conf b/testing/hdp2.6-hive/files/etc/supervisord.d/mysql-metastore.conf
deleted file mode 100644
index e95544e5..00000000
--- a/testing/hdp2.6-hive/files/etc/supervisord.d/mysql-metastore.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:mysql-metastore]
-command=/usr/bin/pidproxy /var/run/mysqld/mysqld.pid /usr/bin/mysqld_safe
-startsecs=2
-stopwaitsecs=10
-user=mysql
-redirect_stderr=true
-stdout_logfile=/var/log/mysql/mysql.log
-autostart=true
diff --git a/testing/hdp2.6-hive/files/etc/supervisord.d/socks-proxy.conf b/testing/hdp2.6-hive/files/etc/supervisord.d/socks-proxy.conf
deleted file mode 100644
index 43602d26..00000000
--- a/testing/hdp2.6-hive/files/etc/supervisord.d/socks-proxy.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:socks-proxy]
-command=/usr/bin/ssh -o StrictHostKeyChecking=no -v -N -D 0.0.0.0:1180 localhost
-startsecs=2
-stopwaitsecs=10
-startretries=30
-user=root
-redirect_stderr=true
-stdout_logfile=/var/log/socks-proxy
-autostart=true
diff --git a/testing/hdp2.6-hive/files/etc/supervisord.d/sshd.conf b/testing/hdp2.6-hive/files/etc/supervisord.d/sshd.conf
deleted file mode 100644
index 3930b4c0..00000000
--- a/testing/hdp2.6-hive/files/etc/supervisord.d/sshd.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:sshd]
-command=/usr/sbin/sshd -D -e
-startsecs=2
-stopwaitsecs=10
-startretries=30
-user=root
-redirect_stderr=true
-stdout_logfile=/var/log/sshd
-autostart=true
diff --git a/testing/hdp2.6-hive/files/etc/supervisord.d/yarn-nodemanager.conf b/testing/hdp2.6-hive/files/etc/supervisord.d/yarn-nodemanager.conf
deleted file mode 100644
index 8f81e3c7..00000000
--- a/testing/hdp2.6-hive/files/etc/supervisord.d/yarn-nodemanager.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:yarn-nodemanager]
-command=yarn nodemanager
-startsecs=2
-stopwaitsecs=10
-user=yarn
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-yarn/hadoop-yarn-nodemanager.log
-autostart=true
diff --git a/testing/hdp2.6-hive/files/etc/supervisord.d/yarn-resourcemanager.conf b/testing/hdp2.6-hive/files/etc/supervisord.d/yarn-resourcemanager.conf
deleted file mode 100644
index 2ef16026..00000000
--- a/testing/hdp2.6-hive/files/etc/supervisord.d/yarn-resourcemanager.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[program:yarn-resourcemanager]
-command=yarn resourcemanager
-startsecs=2
-stopwaitsecs=10
-user=yarn
-redirect_stderr=true
-stdout_logfile=/var/log/hadoop-yarn/hadoop-yarn-resourcemanager.log
-autostart=true
diff --git a/testing/hdp2.6-hive/files/root/setup.sh b/testing/hdp2.6-hive/files/root/setup.sh
deleted file mode 100755
index 30ca16a7..00000000
--- a/testing/hdp2.6-hive/files/root/setup.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash -ex
-
-# format namenode
-chown hdfs:hdfs /var/lib/hadoop-hdfs/cache/
-
-# workaround for 'could not open session' bug as suggested here:
-# https://github.com/docker/docker/issues/7056#issuecomment-49371610
-rm -f /etc/security/limits.d/hdfs.conf
-su -c "echo 'N' | hdfs namenode -format" hdfs
-
-# start hdfs
-su -c "hdfs namenode 2>&1 > /var/log/hadoop-hdfs/hadoop-hdfs-namenode.log" hdfs&
-
-# wait for process starting
-sleep 15
-
-# init basic hdfs directories
-/usr/hdp/current/hadoop-client/libexec/init-hdfs.sh
-
-# 4.1 Create an hdfs home directory for the yarn user. For some reason, init-hdfs doesn't do so.
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /user/yarn && /usr/bin/hadoop fs -chown yarn:yarn /user/yarn'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chmod -R 1777 /tmp/hadoop-yarn'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /tmp/hadoop-yarn/staging && /usr/bin/hadoop fs -chown mapred:mapred /tmp/hadoop-yarn/staging && /usr/bin/hadoop fs -chmod -R 1777 /tmp/hadoop-yarn/staging'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /tmp/hadoop-yarn/staging/history && /usr/bin/hadoop fs -chown mapred:mapred /tmp/hadoop-yarn/staging/history && /usr/bin/hadoop fs -chmod -R 1777 /tmp/hadoop-yarn/staging/history'
-
-# init hive directories
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -mkdir /user/hive/warehouse'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chmod 1777 /user/hive/warehouse'
-su -s /bin/bash hdfs -c '/usr/bin/hadoop fs -chown hive /user/hive/warehouse'
-
-# stop hdfs
-killall java
-
-# setup metastore
-mysql_install_db
-
-/usr/bin/mysqld_safe &
-sleep 10s
-
-cd /usr/hdp/current/hive-metastore/scripts/metastore/upgrade/mysql/
-echo "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION; FLUSH PRIVILEGES;" | mysql
-echo "CREATE DATABASE metastore; USE metastore; SOURCE hive-schema-1.2.1000.mysql.sql;" | mysql
-/usr/bin/mysqladmin -u root password 'root'
-
-killall mysqld
-sleep 10s
-mkdir /var/log/mysql/
-chown mysql:mysql /var/log/mysql/
-
-# Additional libs
-cp -av /usr/hdp/current/hadoop-client/lib/native/Linux-amd64-64/* /usr/lib64/