diff --git a/docker/test/upgrade/run.sh b/docker/test/upgrade/run.sh
index 8fd514eaa937..82a88272df9b 100644
--- a/docker/test/upgrade/run.sh
+++ b/docker/test/upgrade/run.sh
@@ -76,7 +76,8 @@ sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-serv
# But we still need default disk because some tables loaded only into it
sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml \
| sed "s|s3|s3default|" \
- > /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
+ > /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp
+mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
diff --git a/docs/en/engines/table-engines/integrations/odbc.md b/docs/en/engines/table-engines/integrations/odbc.md
index e29e56c10b24..71085feb6260 100644
--- a/docs/en/engines/table-engines/integrations/odbc.md
+++ b/docs/en/engines/table-engines/integrations/odbc.md
@@ -54,7 +54,7 @@ $ sudo mysql
``` sql
mysql> CREATE USER 'clickhouse'@'localhost' IDENTIFIED BY 'clickhouse';
-mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'clickhouse' WITH GRANT OPTION;
+mysql> GRANT ALL PRIVILEGES ON *.* TO 'clickhouse'@'localhost' WITH GRANT OPTION;
```
Then configure the connection in `/etc/odbc.ini`.
@@ -66,7 +66,7 @@ DRIVER = /usr/local/lib/libmyodbc5w.so
SERVER = 127.0.0.1
PORT = 3306
DATABASE = test
-USERNAME = clickhouse
+USER = clickhouse
PASSWORD = clickhouse
```
@@ -83,6 +83,9 @@ $ isql -v mysqlconn
Table in MySQL:
``` text
+mysql> CREATE DATABASE test;
+Query OK, 1 row affected (0,01 sec)
+
mysql> CREATE TABLE `test`.`test` (
-> `int_id` INT NOT NULL AUTO_INCREMENT,
-> `int_nullable` INT NULL DEFAULT NULL,
@@ -91,10 +94,10 @@ mysql> CREATE TABLE `test`.`test` (
-> PRIMARY KEY (`int_id`));
Query OK, 0 rows affected (0,09 sec)
-mysql> insert into test (`int_id`, `float`) VALUES (1,2);
+mysql> insert into test.test (`int_id`, `float`) VALUES (1,2);
Query OK, 1 row affected (0,00 sec)
-mysql> select * from test;
+mysql> select * from test.test;
+------+----------+-----+----------+
| int_id | int_nullable | float | float_nullable |
+------+----------+-----+----------+
diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp
index caca7cfb50d2..2afcd48dafb1 100644
--- a/programs/local/LocalServer.cpp
+++ b/programs/local/LocalServer.cpp
@@ -8,7 +8,9 @@
#include
#include
#include
+#include
#include
+#include
#include
#include
#include
@@ -50,6 +52,8 @@
#include
#include
+#include "config.h"
+
#if defined(FUZZING_MODE)
#include
#endif
@@ -170,6 +174,13 @@ static DatabasePtr createMemoryDatabaseIfNotExists(ContextPtr context, const Str
return system_database;
}
+static DatabasePtr createClickHouseLocalDatabaseOverlay(const String & name_, ContextPtr context_)
+{
+ auto databaseCombiner = std::make_shared(name_, context_);
+ databaseCombiner->registerNextDatabase(std::make_shared(name_, "", context_));
+ databaseCombiner->registerNextDatabase(std::make_shared(name_, context_));
+ return databaseCombiner;
+}
/// If path is specified and not empty, will try to setup server environment and load existing metadata
void LocalServer::tryInitPath()
@@ -669,7 +680,7 @@ void LocalServer::processConfig()
* if such tables will not be dropped, clickhouse-server will not be able to load them due to security reasons.
*/
std::string default_database = config().getString("default_database", "_local");
- DatabaseCatalog::instance().attachDatabase(default_database, std::make_shared(default_database, global_context));
+ DatabaseCatalog::instance().attachDatabase(default_database, createClickHouseLocalDatabaseOverlay(default_database, global_context));
global_context->setCurrentDatabase(default_database);
applyCmdOptions(global_context);
diff --git a/programs/server/dashboard.html b/programs/server/dashboard.html
index 951b7db3aa38..ea818e05e31c 100644
--- a/programs/server/dashboard.html
+++ b/programs/server/dashboard.html
@@ -12,7 +12,8 @@
--chart-background: white;
--shadow-color: rgba(0, 0, 0, 0.25);
--input-shadow-color: rgba(0, 255, 0, 1);
- --error-color: white;
+ --error-color: red;
+ --auth-error-color: white;
--legend-background: rgba(255, 255, 255, 0.75);
--title-color: #666;
--text-color: black;
@@ -258,7 +259,7 @@
width: 60%;
padding: .5rem;
- color: var(--error-color);
+ color: var(--auth-error-color);
display: flex;
flex-flow: row nowrap;
@@ -906,9 +907,9 @@
if (error) {
const errorMatch = errorMessages.find(({ regex }) => error.match(regex))
- if (errorMatch) {
- const match = error.match(errorMatch.regex)
- const message = errorMatch.messageFunc(match)
+ const match = error.match(errorMatch.regex)
+ const message = errorMatch.messageFunc(match)
+ if (message) {
const authError = new Error(message)
throw authError
}
@@ -930,7 +931,7 @@
let title_div = chart.querySelector('.title');
if (error) {
error_div.firstChild.data = error;
- title_div.style.display = 'none';
+ title_div.style.display = 'none';
error_div.style.display = 'block';
return false;
} else {
@@ -1019,13 +1020,15 @@
firstLoad = false;
} else {
enableReloadButton();
+ enableRunButton();
}
- if (!results.includes(false)) {
+ if (results.includes(true)) {
const element = document.querySelector('.inputs');
element.classList.remove('unconnected');
const add = document.querySelector('#add');
add.style.display = 'block';
- } else {
+ }
+ else {
const charts = document.querySelector('#charts')
charts.style.height = '0px';
}
@@ -1050,6 +1053,13 @@
reloadButton.classList.add('disabled')
}
+function disableRunButton() {
+ const runButton = document.getElementById('run')
+ runButton.value = 'Reloading...'
+ runButton.disabled = true
+ runButton.classList.add('disabled')
+}
+
function enableReloadButton() {
const reloadButton = document.getElementById('reload')
reloadButton.value = 'Reload'
@@ -1057,11 +1067,19 @@
reloadButton.classList.remove('disabled')
}
+function enableRunButton() {
+ const runButton = document.getElementById('run')
+ runButton.value = 'Ok'
+ runButton.disabled = false
+ runButton.classList.remove('disabled')
+}
+
function reloadAll() {
updateParams();
drawAll();
saveState();
- disableReloadButton()
+ disableReloadButton();
+ disableRunButton();
}
document.getElementById('params').onsubmit = function(event) {
diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp
index cf1c2ed87793..9befe7342984 100644
--- a/src/Client/ClientBase.cpp
+++ b/src/Client/ClientBase.cpp
@@ -2297,7 +2297,9 @@ void ClientBase::runInteractive()
catch (const ErrnoException & e)
{
if (e.getErrno() != EEXIST)
- throw;
+ {
+ std::cerr << getCurrentExceptionMessage(false) << '\n';
+ }
}
}
diff --git a/src/Client/ConnectionParameters.cpp b/src/Client/ConnectionParameters.cpp
index c47d217d4328..f6630a06939b 100644
--- a/src/Client/ConnectionParameters.cpp
+++ b/src/Client/ConnectionParameters.cpp
@@ -60,7 +60,15 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati
quota_key = config.getString("quota_key", "");
/// By default compression is disabled if address looks like localhost.
- compression = config.getBool("compression", !isLocalAddress(DNSResolver::instance().resolveHost(host)))
+
+ /// Avoid DNS request if the host is "localhost".
+ /// If ClickHouse is run under QEMU-user with a binary for a different architecture,
+ /// and there are all listed startup dependency shared libraries available, but not the runtime dependencies of glibc,
+ /// the glibc cannot open "plugins" for DNS resolving, and the DNS resolution does not work.
+ /// At the same time, I want clickhouse-local to always work, regardless.
+ /// TODO: get rid of glibc, or replace getaddrinfo to c-ares.
+
+ compression = config.getBool("compression", host != "localhost" && !isLocalAddress(DNSResolver::instance().resolveHost(host)))
? Protocol::Compression::Enable : Protocol::Compression::Disable;
timeouts = ConnectionTimeouts(
diff --git a/src/Common/OptimizedRegularExpression.cpp b/src/Common/OptimizedRegularExpression.cpp
index 5df9ce760984..c542945c78db 100644
--- a/src/Common/OptimizedRegularExpression.cpp
+++ b/src/Common/OptimizedRegularExpression.cpp
@@ -540,7 +540,7 @@ bool OptimizedRegularExpressionImpl::match(const char * subject, si
}
}
- return re2->Match(StringPieceType(subject, subject_size), 0, subject_size, RegexType::UNANCHORED, nullptr, 0);
+ return re2->Match({subject, subject_size}, 0, subject_size, RegexType::UNANCHORED, nullptr, 0);
}
}
@@ -585,9 +585,9 @@ bool OptimizedRegularExpressionImpl::match(const char * subject, si
return false;
}
- StringPieceType piece;
+ std::string_view piece;
- if (!RegexType::PartialMatch(StringPieceType(subject, subject_size), *re2, &piece))
+ if (!RegexType::PartialMatch({subject, subject_size}, *re2, &piece))
return false;
else
{
@@ -652,10 +652,10 @@ unsigned OptimizedRegularExpressionImpl::match(const char * subject
return 0;
}
- DB::PODArrayWithStackMemory pieces(limit);
+ DB::PODArrayWithStackMemory pieces(limit);
if (!re2->Match(
- StringPieceType(subject, subject_size),
+ {subject, subject_size},
0,
subject_size,
RegexType::UNANCHORED,
diff --git a/src/Common/OptimizedRegularExpression.h b/src/Common/OptimizedRegularExpression.h
index f6b59f0a4656..51f1bc200e4e 100644
--- a/src/Common/OptimizedRegularExpression.h
+++ b/src/Common/OptimizedRegularExpression.h
@@ -52,7 +52,6 @@ class OptimizedRegularExpressionImpl
using MatchVec = std::vector;
using RegexType = std::conditional_t;
- using StringPieceType = std::conditional_t;
OptimizedRegularExpressionImpl(const std::string & regexp_, int options = 0); /// NOLINT
/// StringSearcher store pointers to required_substring, it must be updated on move.
diff --git a/src/Common/SensitiveDataMasker.cpp b/src/Common/SensitiveDataMasker.cpp
index 34db78d00fb9..b59a47588225 100644
--- a/src/Common/SensitiveDataMasker.cpp
+++ b/src/Common/SensitiveDataMasker.cpp
@@ -5,7 +5,6 @@
#include
#include
-#include
#include
@@ -44,7 +43,7 @@ class SensitiveDataMasker::MaskingRule
const std::string regexp_string;
const RE2 regexp;
- const re2::StringPiece replacement;
+ const std::string_view replacement;
#ifndef NDEBUG
mutable std::atomic matches_count = 0;
diff --git a/src/Common/checkStackSize.cpp b/src/Common/checkStackSize.cpp
index 67d163938b44..8847d37df3a9 100644
--- a/src/Common/checkStackSize.cpp
+++ b/src/Common/checkStackSize.cpp
@@ -27,7 +27,7 @@ static thread_local size_t max_stack_size = 0;
* @param out_address - if not nullptr, here the address of the stack will be written.
* @return stack size
*/
-size_t getStackSize(void ** out_address)
+static size_t getStackSize(void ** out_address)
{
using namespace DB;
@@ -54,7 +54,15 @@ size_t getStackSize(void ** out_address)
throwFromErrno("Cannot pthread_attr_get_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
# else
if (0 != pthread_getattr_np(pthread_self(), &attr))
- throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
+ {
+ if (errno == ENOENT)
+ {
+ /// Most likely procfs is not mounted.
+ return 0;
+ }
+ else
+ throwFromErrno("Cannot pthread_getattr_np", ErrorCodes::CANNOT_PTHREAD_ATTR);
+ }
# endif
SCOPE_EXIT({ pthread_attr_destroy(&attr); });
@@ -83,6 +91,10 @@ __attribute__((__weak__)) void checkStackSize()
if (!stack_address)
max_stack_size = getStackSize(&stack_address);
+ /// The check is impossible.
+ if (!max_stack_size)
+ return;
+
const void * frame_address = __builtin_frame_address(0);
uintptr_t int_frame_address = reinterpret_cast(frame_address);
uintptr_t int_stack_address = reinterpret_cast(stack_address);
diff --git a/src/Common/parseGlobs.cpp b/src/Common/parseGlobs.cpp
index 07cce38afff5..33747f6eece1 100644
--- a/src/Common/parseGlobs.cpp
+++ b/src/Common/parseGlobs.cpp
@@ -3,7 +3,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -33,14 +32,14 @@ std::string makeRegexpPatternFromGlobs(const std::string & initial_str_with_glob
std::string escaped_with_globs = buf_for_escaping.str();
static const re2::RE2 enum_or_range(R"({([\d]+\.\.[\d]+|[^{}*,]+,[^{}*]*[^{}*,])})"); /// regexp for {expr1,expr2,expr3} or {M..N}, where M and N - non-negative integers, expr's should be without "{", "}", "*" and ","
- re2::StringPiece input(escaped_with_globs);
- re2::StringPiece matched;
+ std::string_view input(escaped_with_globs);
+ std::string_view matched;
std::ostringstream oss_for_replacing; // STYLE_CHECK_ALLOW_STD_STRING_STREAM
oss_for_replacing.exceptions(std::ios::failbit);
size_t current_index = 0;
while (RE2::FindAndConsume(&input, enum_or_range, &matched))
{
- std::string buffer{matched};
+ std::string buffer(matched);
oss_for_replacing << escaped_with_globs.substr(current_index, matched.data() - escaped_with_globs.data() - current_index - 1) << '(';
if (buffer.find(',') == std::string::npos)
diff --git a/src/Databases/DatabaseFactory.cpp b/src/Databases/DatabaseFactory.cpp
index e1c8afa52c0a..9d90c61bb41b 100644
--- a/src/Databases/DatabaseFactory.cpp
+++ b/src/Databases/DatabaseFactory.cpp
@@ -3,6 +3,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -47,6 +48,14 @@
#include
#endif
+#if USE_AWS_S3
+#include
+#endif
+
+#if USE_HDFS
+#include
+#endif
+
namespace fs = std::filesystem;
namespace DB
@@ -131,13 +140,13 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String
static const std::unordered_set database_engines{"Ordinary", "Atomic", "Memory",
"Dictionary", "Lazy", "Replicated", "MySQL", "MaterializeMySQL", "MaterializedMySQL",
- "PostgreSQL", "MaterializedPostgreSQL", "SQLite"};
+ "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3", "HDFS"};
if (!database_engines.contains(engine_name))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Database engine name `{}` does not exist", engine_name);
static const std::unordered_set engines_with_arguments{"MySQL", "MaterializeMySQL", "MaterializedMySQL",
- "Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite"};
+ "Lazy", "Replicated", "PostgreSQL", "MaterializedPostgreSQL", "SQLite", "Filesystem", "S3", "HDFS"};
static const std::unordered_set engines_with_table_overrides{"MaterializeMySQL", "MaterializedMySQL", "MaterializedPostgreSQL"};
bool engine_may_have_arguments = engines_with_arguments.contains(engine_name);
@@ -432,6 +441,63 @@ DatabasePtr DatabaseFactory::getImpl(const ASTCreateQuery & create, const String
}
#endif
+ else if (engine_name == "Filesystem")
+ {
+ const ASTFunction * engine = engine_define->engine;
+
+ /// If init_path is empty, then the current path will be used
+ std::string init_path;
+
+ if (engine->arguments && !engine->arguments->children.empty())
+ {
+ if (engine->arguments->children.size() != 1)
+ throw Exception(ErrorCodes::BAD_ARGUMENTS, "Filesystem database requires at most 1 argument: filesystem_path");
+
+ const auto & arguments = engine->arguments->children;
+ init_path = safeGetLiteralValue(arguments[0], engine_name);
+ }
+
+ return std::make_shared(database_name, init_path, context);
+ }
+
+#if USE_AWS_S3
+ else if (engine_name == "S3")
+ {
+ const ASTFunction * engine = engine_define->engine;
+
+ DatabaseS3::Configuration config;
+
+ if (engine->arguments && !engine->arguments->children.empty())
+ {
+ ASTs & engine_args = engine->arguments->children;
+ config = DatabaseS3::parseArguments(engine_args, context);
+ }
+
+ return std::make_shared(database_name, config, context);
+ }
+#endif
+
+#if USE_HDFS
+ else if (engine_name == "HDFS")
+ {
+ const ASTFunction * engine = engine_define->engine;
+
+ /// If source_url is empty, then table name must contain full url
+ std::string source_url;
+
+ if (engine->arguments && !engine->arguments->children.empty())
+ {
+ if (engine->arguments->children.size() != 1)
+ throw Exception(ErrorCodes::BAD_ARGUMENTS, "HDFS database requires at most 1 argument: source_url");
+
+ const auto & arguments = engine->arguments->children;
+ source_url = safeGetLiteralValue(arguments[0], engine_name);
+ }
+
+ return std::make_shared(database_name, source_url, context);
+ }
+#endif
+
throw Exception(ErrorCodes::UNKNOWN_DATABASE_ENGINE, "Unknown database engine: {}", engine_name);
}
diff --git a/src/Databases/DatabaseFilesystem.cpp b/src/Databases/DatabaseFilesystem.cpp
new file mode 100644
index 000000000000..7eaf474eea03
--- /dev/null
+++ b/src/Databases/DatabaseFilesystem.cpp
@@ -0,0 +1,245 @@
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+namespace fs = std::filesystem;
+
+namespace DB
+{
+
+namespace ErrorCodes
+{
+ extern const int LOGICAL_ERROR;
+ extern const int UNKNOWN_TABLE;
+ extern const int PATH_ACCESS_DENIED;
+ extern const int BAD_ARGUMENTS;
+ extern const int FILE_DOESNT_EXIST;
+}
+
+DatabaseFilesystem::DatabaseFilesystem(const String & name_, const String & path_, ContextPtr context_)
+ : IDatabase(name_), WithContext(context_->getGlobalContext()), path(path_), log(&Poco::Logger::get("DatabaseFileSystem(" + name_ + ")"))
+{
+ bool is_local = context_->getApplicationType() == Context::ApplicationType::LOCAL;
+ fs::path user_files_path = is_local ? "" : fs::canonical(getContext()->getUserFilesPath());
+
+ if (fs::path(path).is_relative())
+ {
+ path = user_files_path / path;
+ }
+ else if (!is_local && !pathStartsWith(fs::path(path), user_files_path))
+ {
+ throw Exception(ErrorCodes::BAD_ARGUMENTS,
+ "Path must be inside user-files path: {}", user_files_path.string());
+ }
+
+ path = fs::absolute(path).lexically_normal();
+ if (!fs::exists(path))
+ throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path does not exist: {}", path);
+}
+
+std::string DatabaseFilesystem::getTablePath(const std::string & table_name) const
+{
+ fs::path table_path = fs::path(path) / table_name;
+ return table_path.lexically_normal().string();
+}
+
+void DatabaseFilesystem::addTable(const std::string & table_name, StoragePtr table_storage) const
+{
+ std::lock_guard lock(mutex);
+ auto [_, inserted] = loaded_tables.emplace(table_name, table_storage);
+ if (!inserted)
+ throw Exception(
+ ErrorCodes::LOGICAL_ERROR,
+ "Table with name `{}` already exists in database `{}` (engine {})",
+ table_name, getDatabaseName(), getEngineName());
+}
+
+bool DatabaseFilesystem::checkTableFilePath(const std::string & table_path, ContextPtr context_, bool throw_on_error) const
+{
+ /// If run in Local mode, no need for path checking.
+ bool check_path = context_->getApplicationType() != Context::ApplicationType::LOCAL;
+ const auto & user_files_path = context_->getUserFilesPath();
+
+ /// Check access for file before checking its existence.
+ if (check_path && !fileOrSymlinkPathStartsWith(table_path, user_files_path))
+ {
+ if (throw_on_error)
+ throw Exception(ErrorCodes::PATH_ACCESS_DENIED, "File is not inside {}", user_files_path);
+ else
+ return false;
+ }
+
+ /// Check if the corresponding file exists.
+ if (!fs::exists(table_path))
+ {
+ if (throw_on_error)
+ throw Exception(ErrorCodes::FILE_DOESNT_EXIST, "File does not exist: {}", table_path);
+ else
+ return false;
+ }
+
+ if (!fs::is_regular_file(table_path))
+ {
+ if (throw_on_error)
+ throw Exception(ErrorCodes::FILE_DOESNT_EXIST,
+ "File is directory, but expected a file: {}", table_path);
+ else
+ return false;
+ }
+
+ return true;
+}
+
+StoragePtr DatabaseFilesystem::tryGetTableFromCache(const std::string & name) const
+{
+ StoragePtr table = nullptr;
+ {
+ std::lock_guard lock(mutex);
+ auto it = loaded_tables.find(name);
+ if (it != loaded_tables.end())
+ table = it->second;
+ }
+
+ /// Invalidate cache if file no longer exists.
+ if (table && !fs::exists(getTablePath(name)))
+ {
+ std::lock_guard lock(mutex);
+ loaded_tables.erase(name);
+ return nullptr;
+ }
+
+ return table;
+}
+
+bool DatabaseFilesystem::isTableExist(const String & name, ContextPtr context_) const
+{
+ if (tryGetTableFromCache(name))
+ return true;
+
+ return checkTableFilePath(getTablePath(name), context_, /* throw_on_error */false);
+}
+
+StoragePtr DatabaseFilesystem::getTableImpl(const String & name, ContextPtr context_) const
+{
+ /// Check if table exists in loaded tables map.
+ if (auto table = tryGetTableFromCache(name))
+ return table;
+
+ auto table_path = getTablePath(name);
+ checkTableFilePath(table_path, context_, /* throw_on_error */true);
+
+ /// If the file exists, create a new table using TableFunctionFile and return it.
+ auto args = makeASTFunction("file", std::make_shared(table_path));
+
+ auto table_function = TableFunctionFactory::instance().get(args, context_);
+ if (!table_function)
+ return nullptr;
+
+ /// TableFunctionFile throws exceptions, if table cannot be created.
+ auto table_storage = table_function->execute(args, context_, name);
+ if (table_storage)
+ addTable(name, table_storage);
+
+ return table_storage;
+}
+
+StoragePtr DatabaseFilesystem::getTable(const String & name, ContextPtr context_) const
+{
+ /// getTableImpl can throw exceptions, do not catch them to show correct error to user.
+ if (auto storage = getTableImpl(name, context_))
+ return storage;
+
+ throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist",
+ backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name));
+}
+
+StoragePtr DatabaseFilesystem::tryGetTable(const String & name, ContextPtr context_) const
+{
+ try
+ {
+ return getTableImpl(name, context_);
+ }
+ catch (const Exception & e)
+ {
+ /// Ignore exceptions thrown by TableFunctionFile, which indicate that there is no table
+ /// see tests/02722_database_filesystem.sh for more details.
+ if (e.code() == ErrorCodes::FILE_DOESNT_EXIST)
+ {
+ return nullptr;
+ }
+ throw;
+ }
+}
+
+bool DatabaseFilesystem::empty() const
+{
+ std::lock_guard lock(mutex);
+ return loaded_tables.empty();
+}
+
+ASTPtr DatabaseFilesystem::getCreateDatabaseQuery() const
+{
+ const auto & settings = getContext()->getSettingsRef();
+ const String query = fmt::format("CREATE DATABASE {} ENGINE = Filesystem('{}')", backQuoteIfNeed(getDatabaseName()), path);
+
+ ParserCreateQuery parser;
+ ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth);
+
+ if (const auto database_comment = getDatabaseComment(); !database_comment.empty())
+ {
+ auto & ast_create_query = ast->as();
+ ast_create_query.set(ast_create_query.comment, std::make_shared(database_comment));
+ }
+
+ return ast;
+}
+
+void DatabaseFilesystem::shutdown()
+{
+ Tables tables_snapshot;
+ {
+ std::lock_guard lock(mutex);
+ tables_snapshot = loaded_tables;
+ }
+
+ for (const auto & kv : tables_snapshot)
+ {
+ auto table_id = kv.second->getStorageID();
+ kv.second->flushAndShutdown();
+ }
+
+ std::lock_guard lock(mutex);
+ loaded_tables.clear();
+}
+
+/**
+ * Returns an empty vector because the database is read-only and no tables can be backed up
+ */
+std::vector> DatabaseFilesystem::getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const
+{
+ return {};
+}
+
+/**
+ *
+ * Returns an empty iterator because the database does not have its own tables
+ * But only caches them for quick access
+ */
+DatabaseTablesIteratorPtr DatabaseFilesystem::getTablesIterator(ContextPtr, const FilterByNameFunction &) const
+{
+ return std::make_unique(Tables{}, getDatabaseName());
+}
+
+}
diff --git a/src/Databases/DatabaseFilesystem.h b/src/Databases/DatabaseFilesystem.h
new file mode 100644
index 000000000000..7fe620401dc6
--- /dev/null
+++ b/src/Databases/DatabaseFilesystem.h
@@ -0,0 +1,67 @@
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+
+namespace DB
+{
+
+class Context;
+
+/**
+ * DatabaseFilesystem allows to interact with files stored on the local filesystem.
+ * Uses TableFunctionFile to implicitly load file when a user requests the table,
+ * and provides a read-only access to the data in the file.
+ * Tables are cached inside the database for quick access
+ *
+ * Used in clickhouse-local to access local files.
+ * For clickhouse-server requires allows to access file only from user_files directory.
+ */
+class DatabaseFilesystem : public IDatabase, protected WithContext
+{
+public:
+ DatabaseFilesystem(const String & name, const String & path, ContextPtr context);
+
+ String getEngineName() const override { return "Filesystem"; }
+
+ bool isTableExist(const String & name, ContextPtr context) const override;
+
+ StoragePtr getTable(const String & name, ContextPtr context) const override;
+
+ StoragePtr tryGetTable(const String & name, ContextPtr context) const override;
+
+ bool shouldBeEmptyOnDetach() const override { return false; } /// Contains only temporary tables.
+
+ bool empty() const override;
+
+ bool isReadOnly() const override { return true; }
+
+ ASTPtr getCreateDatabaseQuery() const override;
+
+ void shutdown() override;
+
+ std::vector> getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const override;
+
+ DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override;
+
+protected:
+ StoragePtr getTableImpl(const String & name, ContextPtr context) const;
+
+ StoragePtr tryGetTableFromCache(const std::string & name) const;
+
+ std::string getTablePath(const std::string & table_name) const;
+
+ void addTable(const std::string & table_name, StoragePtr table_storage) const;
+
+ bool checkTableFilePath(const std::string & table_path, ContextPtr context_, bool throw_on_error) const;
+
+private:
+ String path;
+ mutable Tables loaded_tables TSA_GUARDED_BY(mutex);
+ Poco::Logger * log;
+};
+
+}
diff --git a/src/Databases/DatabaseHDFS.cpp b/src/Databases/DatabaseHDFS.cpp
new file mode 100644
index 000000000000..1a0145b90152
--- /dev/null
+++ b/src/Databases/DatabaseHDFS.cpp
@@ -0,0 +1,234 @@
+#include "config.h"
+
+#if USE_HDFS
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+#include
+
+namespace fs = std::filesystem;
+
+namespace DB
+{
+
+namespace ErrorCodes
+{
+ extern const int LOGICAL_ERROR;
+ extern const int UNKNOWN_TABLE;
+ extern const int BAD_ARGUMENTS;
+ extern const int FILE_DOESNT_EXIST;
+ extern const int UNACCEPTABLE_URL;
+ extern const int ACCESS_DENIED;
+ extern const int DATABASE_ACCESS_DENIED;
+ extern const int HDFS_ERROR;
+ extern const int CANNOT_EXTRACT_TABLE_STRUCTURE;
+}
+
+static constexpr std::string_view HDFS_HOST_REGEXP = "^hdfs://[^/]*";
+
+
+DatabaseHDFS::DatabaseHDFS(const String & name_, const String & source_url, ContextPtr context_)
+ : IDatabase(name_)
+ , WithContext(context_->getGlobalContext())
+ , source(source_url)
+ , log(&Poco::Logger::get("DatabaseHDFS(" + name_ + ")"))
+{
+ if (!source.empty())
+ {
+ if (!re2::RE2::FullMatch(source, std::string(HDFS_HOST_REGEXP)))
+ throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad hdfs host: {}. "
+ "It should have structure 'hdfs://:'", source);
+
+ context_->getGlobalContext()->getRemoteHostFilter().checkURL(Poco::URI(source));
+ }
+}
+
+void DatabaseHDFS::addTable(const std::string & table_name, StoragePtr table_storage) const
+{
+ std::lock_guard lock(mutex);
+ auto [_, inserted] = loaded_tables.emplace(table_name, table_storage);
+ if (!inserted)
+ throw Exception(
+ ErrorCodes::LOGICAL_ERROR,
+ "Table with name `{}` already exists in database `{}` (engine {})",
+ table_name, getDatabaseName(), getEngineName());
+}
+
+std::string DatabaseHDFS::getTablePath(const std::string & table_name) const
+{
+ if (table_name.starts_with("hdfs://"))
+ return table_name;
+
+ if (source.empty())
+ throw Exception(ErrorCodes::BAD_ARGUMENTS, "Bad hdfs url: {}. "
+ "It should have structure 'hdfs://:/path'", table_name);
+
+ return fs::path(source) / table_name;
+}
+
+bool DatabaseHDFS::checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const
+{
+ try
+ {
+ checkHDFSURL(url);
+ context_->getGlobalContext()->getRemoteHostFilter().checkURL(Poco::URI(url));
+ }
+ catch (...)
+ {
+ if (throw_on_error)
+ throw;
+ return false;
+ }
+
+ return true;
+}
+
+bool DatabaseHDFS::isTableExist(const String & name, ContextPtr context_) const
+{
+ std::lock_guard lock(mutex);
+ if (loaded_tables.find(name) != loaded_tables.end())
+ return true;
+
+ return checkUrl(name, context_, false);
+}
+
+StoragePtr DatabaseHDFS::getTableImpl(const String & name, ContextPtr context_) const
+{
+ /// Check if the table exists in the loaded tables map.
+ {
+ std::lock_guard lock(mutex);
+ auto it = loaded_tables.find(name);
+ if (it != loaded_tables.end())
+ return it->second;
+ }
+
+ auto url = getTablePath(name);
+
+ checkUrl(url, context_, true);
+
+ auto args = makeASTFunction("hdfs", std::make_shared(url));
+
+ auto table_function = TableFunctionFactory::instance().get(args, context_);
+ if (!table_function)
+ return nullptr;
+
+ /// TableFunctionHDFS throws exceptions, if table cannot be created.
+ auto table_storage = table_function->execute(args, context_, name);
+ if (table_storage)
+ addTable(name, table_storage);
+
+ return table_storage;
+}
+
+StoragePtr DatabaseHDFS::getTable(const String & name, ContextPtr context_) const
+{
+ /// Rethrow all exceptions from TableFunctionHDFS to show correct error to user.
+ if (auto storage = getTableImpl(name, context_))
+ return storage;
+
+ throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist",
+ backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name));
+}
+
+StoragePtr DatabaseHDFS::tryGetTable(const String & name, ContextPtr context_) const
+{
+ try
+ {
+ return getTableImpl(name, context_);
+ }
+ catch (const Exception & e)
+ {
+ // Ignore exceptions thrown by TableFunctionHDFS, which indicate that there is no table
+ if (e.code() == ErrorCodes::BAD_ARGUMENTS
+ || e.code() == ErrorCodes::ACCESS_DENIED
+ || e.code() == ErrorCodes::DATABASE_ACCESS_DENIED
+ || e.code() == ErrorCodes::FILE_DOESNT_EXIST
+ || e.code() == ErrorCodes::UNACCEPTABLE_URL
+ || e.code() == ErrorCodes::HDFS_ERROR
+ || e.code() == ErrorCodes::CANNOT_EXTRACT_TABLE_STRUCTURE)
+ {
+ return nullptr;
+ }
+ throw;
+ }
+ catch (const Poco::URISyntaxException &)
+ {
+ return nullptr;
+ }
+}
+
+bool DatabaseHDFS::empty() const
+{
+ std::lock_guard lock(mutex);
+ return loaded_tables.empty();
+}
+
+ASTPtr DatabaseHDFS::getCreateDatabaseQuery() const
+{
+ const auto & settings = getContext()->getSettingsRef();
+ ParserCreateQuery parser;
+
+ const String query = fmt::format("CREATE DATABASE {} ENGINE = HDFS('{}')", backQuoteIfNeed(getDatabaseName()), source);
+ ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth);
+
+ if (const auto database_comment = getDatabaseComment(); !database_comment.empty())
+ {
+ auto & ast_create_query = ast->as();
+ ast_create_query.set(ast_create_query.comment, std::make_shared(database_comment));
+ }
+
+ return ast;
+}
+
+void DatabaseHDFS::shutdown()
+{
+ Tables tables_snapshot;
+ {
+ std::lock_guard lock(mutex);
+ tables_snapshot = loaded_tables;
+ }
+
+ for (const auto & kv : tables_snapshot)
+ {
+ auto table_id = kv.second->getStorageID();
+ kv.second->flushAndShutdown();
+ }
+
+ std::lock_guard lock(mutex);
+ loaded_tables.clear();
+}
+
+/**
+ * Returns an empty vector because the database is read-only and no tables can be backed up
+ */
+std::vector> DatabaseHDFS::getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const
+{
+ return {};
+}
+
+/**
+ *
+ * Returns an empty iterator because the database does not have its own tables
+ * But only caches them for quick access
+ */
+DatabaseTablesIteratorPtr DatabaseHDFS::getTablesIterator(ContextPtr, const FilterByNameFunction &) const
+{
+ return std::make_unique(Tables{}, getDatabaseName());
+}
+
+} // DB
+
+#endif
diff --git a/src/Databases/DatabaseHDFS.h b/src/Databases/DatabaseHDFS.h
new file mode 100644
index 000000000000..957b2080135c
--- /dev/null
+++ b/src/Databases/DatabaseHDFS.h
@@ -0,0 +1,68 @@
+#pragma once
+
+#include "config.h"
+
+#if USE_HDFS
+
+#include
+#include
+#include
+#include
+#include
+
+namespace DB
+{
+
+class Context;
+
+/**
+ * DatabaseHDFS allows to interact with files stored on the file system.
+ * Uses TableFunctionHDFS to implicitly load file when a user requests the table,
+ * and provides read-only access to the data in the file.
+ * Tables are cached inside the database for quick access.
+ */
+class DatabaseHDFS : public IDatabase, protected WithContext
+{
+public:
+ DatabaseHDFS(const String & name, const String & source_url, ContextPtr context);
+
+ String getEngineName() const override { return "S3"; }
+
+ bool isTableExist(const String & name, ContextPtr context) const override;
+
+ StoragePtr getTable(const String & name, ContextPtr context) const override;
+
+ StoragePtr tryGetTable(const String & name, ContextPtr context) const override;
+
+ bool shouldBeEmptyOnDetach() const override { return false; } /// Contains only temporary tables.
+
+ bool empty() const override;
+
+ bool isReadOnly() const override { return true; }
+
+ ASTPtr getCreateDatabaseQuery() const override;
+
+ void shutdown() override;
+
+ std::vector> getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const override;
+ DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override;
+
+protected:
+ StoragePtr getTableImpl(const String & name, ContextPtr context) const;
+
+ void addTable(const std::string & table_name, StoragePtr table_storage) const;
+
+ bool checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const;
+
+ std::string getTablePath(const std::string & table_name) const;
+
+private:
+ const String source;
+
+ mutable Tables loaded_tables TSA_GUARDED_BY(mutex);
+ Poco::Logger * log;
+};
+
+}
+
+#endif
diff --git a/src/Databases/DatabaseS3.cpp b/src/Databases/DatabaseS3.cpp
new file mode 100644
index 000000000000..11655f5f100f
--- /dev/null
+++ b/src/Databases/DatabaseS3.cpp
@@ -0,0 +1,312 @@
+#include "config.h"
+
+#if USE_AWS_S3
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+namespace fs = std::filesystem;
+
+namespace DB
+{
+
+static const std::unordered_set optional_configuration_keys = {
+ "url",
+ "access_key_id",
+ "secret_access_key",
+ "no_sign_request"
+};
+
+namespace ErrorCodes
+{
+ extern const int LOGICAL_ERROR;
+ extern const int UNKNOWN_TABLE;
+ extern const int BAD_ARGUMENTS;
+ extern const int FILE_DOESNT_EXIST;
+ extern const int UNACCEPTABLE_URL;
+ extern const int S3_ERROR;
+
+ extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
+}
+
+DatabaseS3::DatabaseS3(const String & name_, const Configuration& config_, ContextPtr context_)
+ : IDatabase(name_)
+ , WithContext(context_->getGlobalContext())
+ , config(config_)
+ , log(&Poco::Logger::get("DatabaseS3(" + name_ + ")"))
+{
+}
+
+void DatabaseS3::addTable(const std::string & table_name, StoragePtr table_storage) const
+{
+ std::lock_guard lock(mutex);
+ auto [_, inserted] = loaded_tables.emplace(table_name, table_storage);
+ if (!inserted)
+ throw Exception(
+ ErrorCodes::LOGICAL_ERROR,
+ "Table with name `{}` already exists in database `{}` (engine {})",
+ table_name, getDatabaseName(), getEngineName());
+}
+
+std::string DatabaseS3::getFullUrl(const std::string & name) const
+{
+ if (!config.url_prefix.empty())
+ return fs::path(config.url_prefix) / name;
+
+ return name;
+}
+
+bool DatabaseS3::checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const
+{
+ try
+ {
+ S3::URI uri(url);
+ context_->getGlobalContext()->getRemoteHostFilter().checkURL(uri.uri);
+ }
+ catch (...)
+ {
+ if (throw_on_error)
+ throw;
+ return false;
+ }
+ return true;
+}
+
+bool DatabaseS3::isTableExist(const String & name, ContextPtr context_) const
+{
+ std::lock_guard lock(mutex);
+ if (loaded_tables.find(name) != loaded_tables.end())
+ return true;
+
+ return checkUrl(getFullUrl(name), context_, false);
+}
+
+StoragePtr DatabaseS3::getTableImpl(const String & name, ContextPtr context_) const
+{
+ /// Check if the table exists in the loaded tables map.
+ {
+ std::lock_guard lock(mutex);
+ auto it = loaded_tables.find(name);
+ if (it != loaded_tables.end())
+ return it->second;
+ }
+
+ auto url = getFullUrl(name);
+ checkUrl(url, context_, /* throw_on_error */true);
+
+ auto function = std::make_shared();
+ function->name = "s3";
+ function->arguments = std::make_shared();
+ function->children.push_back(function->arguments);
+
+ function->arguments->children.push_back(std::make_shared(url));
+ if (config.no_sign_request)
+ {
+ function->arguments->children.push_back(std::make_shared("NOSIGN"));
+ }
+ else if (config.access_key_id.has_value() && config.secret_access_key.has_value())
+ {
+ function->arguments->children.push_back(std::make_shared(config.access_key_id.value()));
+ function->arguments->children.push_back(std::make_shared(config.secret_access_key.value()));
+ }
+
+ auto table_function = TableFunctionFactory::instance().get(function, context_);
+ if (!table_function)
+ return nullptr;
+
+ /// TableFunctionS3 throws exceptions, if table cannot be created.
+ auto table_storage = table_function->execute(function, context_, name);
+ if (table_storage)
+ addTable(name, table_storage);
+
+ return table_storage;
+}
+
+StoragePtr DatabaseS3::getTable(const String & name, ContextPtr context_) const
+{
+ /// Rethrow all exceptions from TableFunctionS3 to show correct error to user.
+ if (auto storage = getTableImpl(name, context_))
+ return storage;
+
+ throw Exception(ErrorCodes::UNKNOWN_TABLE, "Table {}.{} doesn't exist",
+ backQuoteIfNeed(getDatabaseName()), backQuoteIfNeed(name));
+}
+
+StoragePtr DatabaseS3::tryGetTable(const String & name, ContextPtr context_) const
+{
+ try
+ {
+ return getTableImpl(name, context_);
+ }
+ catch (const Exception & e)
+ {
+ /// Ignore exceptions thrown by TableFunctionS3, which indicate that there is no table.
+ if (e.code() == ErrorCodes::BAD_ARGUMENTS
+ || e.code() == ErrorCodes::S3_ERROR
+ || e.code() == ErrorCodes::FILE_DOESNT_EXIST
+ || e.code() == ErrorCodes::UNACCEPTABLE_URL)
+ {
+ return nullptr;
+ }
+ throw;
+ }
+ catch (const Poco::URISyntaxException &)
+ {
+ return nullptr;
+ }
+}
+
+bool DatabaseS3::empty() const
+{
+ std::lock_guard lock(mutex);
+ return loaded_tables.empty();
+}
+
+ASTPtr DatabaseS3::getCreateDatabaseQuery() const
+{
+ const auto & settings = getContext()->getSettingsRef();
+ ParserCreateQuery parser;
+
+ std::string creation_args;
+ creation_args += fmt::format("'{}'", config.url_prefix);
+ if (config.no_sign_request)
+ creation_args += ", 'NOSIGN'";
+ else if (config.access_key_id.has_value() && config.secret_access_key.has_value())
+ creation_args += fmt::format(", '{}', '{}'", config.access_key_id.value(), config.secret_access_key.value());
+
+ const String query = fmt::format("CREATE DATABASE {} ENGINE = S3({})", backQuoteIfNeed(getDatabaseName()), creation_args);
+ ASTPtr ast = parseQuery(parser, query.data(), query.data() + query.size(), "", 0, settings.max_parser_depth);
+
+ if (const auto database_comment = getDatabaseComment(); !database_comment.empty())
+ {
+ auto & ast_create_query = ast->as();
+ ast_create_query.set(ast_create_query.comment, std::make_shared(database_comment));
+ }
+
+ return ast;
+}
+
+void DatabaseS3::shutdown()
+{
+ Tables tables_snapshot;
+ {
+ std::lock_guard lock(mutex);
+ tables_snapshot = loaded_tables;
+ }
+
+ for (const auto & kv : tables_snapshot)
+ {
+ auto table_id = kv.second->getStorageID();
+ kv.second->flushAndShutdown();
+ }
+
+ std::lock_guard lock(mutex);
+ loaded_tables.clear();
+}
+
+DatabaseS3::Configuration DatabaseS3::parseArguments(ASTs engine_args, ContextPtr context_)
+{
+ Configuration result;
+
+ if (auto named_collection = tryGetNamedCollectionWithOverrides(engine_args, context_))
+ {
+ auto & collection = *named_collection;
+
+ validateNamedCollection(collection, {}, optional_configuration_keys);
+
+ result.url_prefix = collection.getOrDefault("url", "");
+ result.no_sign_request = collection.getOrDefault("no_sign_request", false);
+
+ auto key_id = collection.getOrDefault("access_key_id", "");
+ auto secret_key = collection.getOrDefault("secret_access_key", "");
+
+ if (!key_id.empty())
+ result.access_key_id = key_id;
+
+ if (!secret_key.empty())
+ result.secret_access_key = secret_key;
+ }
+ else
+ {
+ const std::string supported_signature =
+ " - S3()\n"
+ " - S3('url')\n"
+ " - S3('url', 'NOSIGN')\n"
+ " - S3('url', 'access_key_id', 'secret_access_key')\n";
+ const auto error_message =
+ fmt::format("Engine DatabaseS3 must have the following arguments signature\n{}", supported_signature);
+
+ for (auto & arg : engine_args)
+ arg = evaluateConstantExpressionOrIdentifierAsLiteral(arg, context_);
+
+ if (engine_args.size() > 3)
+ throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, error_message.c_str());
+
+ if (engine_args.empty())
+ return result;
+
+ result.url_prefix = checkAndGetLiteralArgument(engine_args[0], "url");
+
+ // url, NOSIGN
+ if (engine_args.size() == 2)
+ {
+ auto second_arg = checkAndGetLiteralArgument(engine_args[1], "NOSIGN");
+ if (boost::iequals(second_arg, "NOSIGN"))
+ result.no_sign_request = true;
+ else
+ throw Exception(ErrorCodes::BAD_ARGUMENTS, error_message.c_str());
+ }
+
+ // url, access_key_id, secret_access_key
+ if (engine_args.size() == 3)
+ {
+ auto key_id = checkAndGetLiteralArgument(engine_args[1], "access_key_id");
+ auto secret_key = checkAndGetLiteralArgument(engine_args[2], "secret_access_key");
+
+ if (key_id.empty() || secret_key.empty() || boost::iequals(key_id, "NOSIGN"))
+ throw Exception(ErrorCodes::BAD_ARGUMENTS, error_message.c_str());
+
+ result.access_key_id = key_id;
+ result.secret_access_key = secret_key;
+ }
+ }
+
+ return result;
+}
+
+/**
+ * Returns an empty vector because the database is read-only and no tables can be backed up
+ */
+std::vector> DatabaseS3::getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const
+{
+ return {};
+}
+
+/**
+ *
+ * Returns an empty iterator because the database does not have its own tables
+ * But only caches them for quick access
+ */
+DatabaseTablesIteratorPtr DatabaseS3::getTablesIterator(ContextPtr, const FilterByNameFunction &) const
+{
+ return std::make_unique(Tables{}, getDatabaseName());
+}
+
+}
+
+#endif
diff --git a/src/Databases/DatabaseS3.h b/src/Databases/DatabaseS3.h
new file mode 100644
index 000000000000..8297ae4e02d6
--- /dev/null
+++ b/src/Databases/DatabaseS3.h
@@ -0,0 +1,81 @@
+#pragma once
+
+#include "config.h"
+
+#if USE_AWS_S3
+
+#include
+#include
+#include
+#include
+#include
+
+namespace DB
+{
+
+class Context;
+
+/**
+ * DatabaseS3 provides access to data stored in S3.
+ * Uses TableFunctionS3 to implicitly load file when a user requests the table,
+ * and provides read-only access to the data in the file.
+ * Tables are cached inside the database for quick access.
+ */
+class DatabaseS3 : public IDatabase, protected WithContext
+{
+public:
+ struct Configuration
+ {
+ std::string url_prefix;
+
+ bool no_sign_request = false;
+
+ std::optional access_key_id;
+ std::optional secret_access_key;
+ };
+
+ DatabaseS3(const String & name, const Configuration& config, ContextPtr context);
+
+ String getEngineName() const override { return "S3"; }
+
+ bool isTableExist(const String & name, ContextPtr context) const override;
+
+ StoragePtr getTable(const String & name, ContextPtr context) const override;
+
+ StoragePtr tryGetTable(const String & name, ContextPtr context) const override;
+
+ // Contains only temporary tables
+ bool shouldBeEmptyOnDetach() const override { return false; }
+
+ bool empty() const override;
+
+ bool isReadOnly() const override { return true; }
+
+ ASTPtr getCreateDatabaseQuery() const override;
+
+ void shutdown() override;
+
+ std::vector> getTablesForBackup(const FilterByNameFunction &, const ContextPtr &) const override;
+ DatabaseTablesIteratorPtr getTablesIterator(ContextPtr, const FilterByNameFunction &) const override;
+
+ static Configuration parseArguments(ASTs engine_args, ContextPtr context);
+
+protected:
+ StoragePtr getTableImpl(const String & name, ContextPtr context) const;
+
+ void addTable(const std::string & table_name, StoragePtr table_storage) const;
+
+ bool checkUrl(const std::string & url, ContextPtr context_, bool throw_on_error) const;
+
+ std::string getFullUrl(const std::string & name) const;
+
+private:
+ const Configuration config;
+
+ mutable Tables loaded_tables TSA_GUARDED_BY(mutex);
+ Poco::Logger * log;
+};
+
+}
+
+#endif
diff --git a/src/Databases/DatabasesOverlay.cpp b/src/Databases/DatabasesOverlay.cpp
new file mode 100644
index 000000000000..b44a97980725
--- /dev/null
+++ b/src/Databases/DatabasesOverlay.cpp
@@ -0,0 +1,266 @@
+#include
+
+#include
+#include
+#include
+#include
+
+#include
+
+namespace DB
+{
+
+namespace ErrorCodes
+{
+ extern const int LOGICAL_ERROR;
+ extern const int CANNOT_GET_CREATE_TABLE_QUERY;
+}
+
+DatabasesOverlay::DatabasesOverlay(const String & name_, ContextPtr context_)
+ : IDatabase(name_), WithContext(context_->getGlobalContext()), log(&Poco::Logger::get("DatabaseOverlay(" + name_ + ")"))
+{
+}
+
+DatabasesOverlay & DatabasesOverlay::registerNextDatabase(DatabasePtr database)
+{
+ databases.push_back(std::move(database));
+ return *this;
+}
+
+bool DatabasesOverlay::isTableExist(const String & table_name, ContextPtr context_) const
+{
+ for (const auto & db : databases)
+ {
+ if (db->isTableExist(table_name, context_))
+ return true;
+ }
+ return false;
+}
+
+StoragePtr DatabasesOverlay::tryGetTable(const String & table_name, ContextPtr context_) const
+{
+ StoragePtr result = nullptr;
+ for (const auto & db : databases)
+ {
+ result = db->tryGetTable(table_name, context_);
+ if (result)
+ break;
+ }
+ return result;
+}
+
+void DatabasesOverlay::createTable(ContextPtr context_, const String & table_name, const StoragePtr & table, const ASTPtr & query)
+{
+ for (auto & db : databases)
+ {
+ if (!db->isReadOnly())
+ {
+ db->createTable(context_, table_name, table, query);
+ return;
+ }
+ }
+ throw Exception(
+ ErrorCodes::LOGICAL_ERROR,
+ "There is no databases for CREATE TABLE `{}` query in database `{}` (engine {})",
+ table_name,
+ getDatabaseName(),
+ getEngineName());
+}
+
+void DatabasesOverlay::dropTable(ContextPtr context_, const String & table_name, bool sync)
+{
+ for (auto & db : databases)
+ {
+ if (db->isTableExist(table_name, context_))
+ {
+ db->dropTable(context_, table_name, sync);
+ return;
+ }
+ }
+ throw Exception(
+ ErrorCodes::LOGICAL_ERROR,
+ "There is no databases for DROP TABLE `{}` query in database `{}` (engine {})",
+ table_name,
+ getDatabaseName(),
+ getEngineName());
+}
+
+void DatabasesOverlay::attachTable(
+ ContextPtr context_, const String & table_name, const StoragePtr & table, const String & relative_table_path)
+{
+ for (auto & db : databases)
+ {
+ try
+ {
+ db->attachTable(context_, table_name, table, relative_table_path);
+ return;
+ }
+ catch (...)
+ {
+ continue;
+ }
+ }
+ throw Exception(
+ ErrorCodes::LOGICAL_ERROR,
+ "There is no databases for ATTACH TABLE `{}` query in database `{}` (engine {})",
+ table_name,
+ getDatabaseName(),
+ getEngineName());
+}
+
+StoragePtr DatabasesOverlay::detachTable(ContextPtr context_, const String & table_name)
+{
+ StoragePtr result = nullptr;
+ for (auto & db : databases)
+ {
+ if (db->isTableExist(table_name, context_))
+ return db->detachTable(context_, table_name);
+ }
+ throw Exception(
+ ErrorCodes::LOGICAL_ERROR,
+ "There is no databases for DETACH TABLE `{}` query in database `{}` (engine {})",
+ table_name,
+ getDatabaseName(),
+ getEngineName());
+}
+
+ASTPtr DatabasesOverlay::getCreateTableQueryImpl(const String & name, ContextPtr context_, bool throw_on_error) const
+{
+ ASTPtr result = nullptr;
+ for (const auto & db : databases)
+ {
+ result = db->tryGetCreateTableQuery(name, context_);
+ if (result)
+ break;
+ }
+ if (!result && throw_on_error)
+ throw Exception(
+ ErrorCodes::CANNOT_GET_CREATE_TABLE_QUERY,
+ "There is no metadata of table `{}` in database `{}` (engine {})",
+ name,
+ getDatabaseName(),
+ getEngineName());
+ return result;
+}
+
+/*
+ * DatabaseOverlay cannot be constructed by "CREATE DATABASE" query, as it is not a traditional ClickHouse database
+ * To use DatabaseOverlay, it must be constructed programmatically in code
+ */
+ASTPtr DatabasesOverlay::getCreateDatabaseQuery() const
+{
+ return std::make_shared();
+}
+
+String DatabasesOverlay::getTableDataPath(const String & table_name) const
+{
+ String result;
+ for (const auto & db : databases)
+ {
+ result = db->getTableDataPath(table_name);
+ if (!result.empty())
+ break;
+ }
+ return result;
+}
+
+String DatabasesOverlay::getTableDataPath(const ASTCreateQuery & query) const
+{
+ String result;
+ for (const auto & db : databases)
+ {
+ result = db->getTableDataPath(query);
+ if (!result.empty())
+ break;
+ }
+ return result;
+}
+
+UUID DatabasesOverlay::tryGetTableUUID(const String & table_name) const
+{
+ UUID result = UUIDHelpers::Nil;
+ for (const auto & db : databases)
+ {
+ result = db->tryGetTableUUID(table_name);
+ if (result != UUIDHelpers::Nil)
+ break;
+ }
+ return result;
+}
+
+void DatabasesOverlay::drop(ContextPtr context_)
+{
+ for (auto & db : databases)
+ db->drop(context_);
+}
+
+void DatabasesOverlay::alterTable(ContextPtr local_context, const StorageID & table_id, const StorageInMemoryMetadata & metadata)
+{
+ for (auto & db : databases)
+ {
+ if (!db->isReadOnly() && db->isTableExist(table_id.table_name, local_context))
+ {
+ db->alterTable(local_context, table_id, metadata);
+ return;
+ }
+ }
+ throw Exception(
+ ErrorCodes::LOGICAL_ERROR,
+ "There is no databases for ALTER TABLE `{}` query in database `{}` (engine {})",
+ table_id.table_name,
+ getDatabaseName(),
+ getEngineName());
+}
+
+std::vector>
+DatabasesOverlay::getTablesForBackup(const FilterByNameFunction & filter, const ContextPtr & local_context) const
+{
+ std::vector> result;
+ for (const auto & db : databases)
+ {
+ auto db_backup = db->getTablesForBackup(filter, local_context);
+ result.insert(result.end(), std::make_move_iterator(db_backup.begin()), std::make_move_iterator(db_backup.end()));
+ }
+ return result;
+}
+
+void DatabasesOverlay::createTableRestoredFromBackup(
+ const ASTPtr & create_table_query,
+ ContextMutablePtr local_context,
+ std::shared_ptr /*restore_coordination*/,
+ UInt64 /*timeout_ms*/)
+{
+ /// Creates a tables by executing a "CREATE TABLE" query.
+ InterpreterCreateQuery interpreter{create_table_query, local_context};
+ interpreter.setInternal(true);
+ interpreter.execute();
+}
+
+bool DatabasesOverlay::empty() const
+{
+ for (const auto & db : databases)
+ {
+ if (!db->empty())
+ return false;
+ }
+ return true;
+}
+
+void DatabasesOverlay::shutdown()
+{
+ for (auto & db : databases)
+ db->shutdown();
+}
+
+DatabaseTablesIteratorPtr DatabasesOverlay::getTablesIterator(ContextPtr context_, const FilterByNameFunction & filter_by_table_name) const
+{
+ Tables tables;
+ for (const auto & db : databases)
+ {
+ for (auto table_it = db->getTablesIterator(context_, filter_by_table_name); table_it->isValid(); table_it->next())
+ tables.insert({table_it->name(), table_it->table()});
+ }
+ return std::make_unique(std::move(tables), getDatabaseName());
+}
+
+}
diff --git a/src/Databases/DatabasesOverlay.h b/src/Databases/DatabasesOverlay.h
new file mode 100644
index 000000000000..0f31bbd6a47f
--- /dev/null
+++ b/src/Databases/DatabasesOverlay.h
@@ -0,0 +1,66 @@
+#pragma once
+
+#include
+#include
+
+namespace DB
+{
+
+/**
+ * Implements the IDatabase interface and combines multiple other databases
+ * Searches for tables in each database in order until found, and delegates operations to the appropriate database
+ * Useful for combining databases
+ *
+ * Used in clickhouse-local to combine DatabaseFileSystem and DatabaseMemory
+ */
+class DatabasesOverlay : public IDatabase, protected WithContext
+{
+public:
+ DatabasesOverlay(const String & name_, ContextPtr context_);
+
+ /// Not thread-safe. Use only as factory to initialize database
+ DatabasesOverlay & registerNextDatabase(DatabasePtr database);
+
+ String getEngineName() const override { return "Overlay"; }
+
+public:
+ bool isTableExist(const String & table_name, ContextPtr context) const override;
+
+ StoragePtr tryGetTable(const String & table_name, ContextPtr context) const override;
+
+ void createTable(ContextPtr context, const String & table_name, const StoragePtr & table, const ASTPtr & query) override;
+
+ void dropTable(ContextPtr context, const String & table_name, bool sync) override;
+
+ void attachTable(ContextPtr context, const String & table_name, const StoragePtr & table, const String & relative_table_path) override;
+
+ StoragePtr detachTable(ContextPtr context, const String & table_name) override;
+
+ ASTPtr getCreateTableQueryImpl(const String & name, ContextPtr context, bool throw_on_error) const override;
+ ASTPtr getCreateDatabaseQuery() const override;
+
+ String getTableDataPath(const String & table_name) const override;
+ String getTableDataPath(const ASTCreateQuery & query) const override;
+
+ UUID tryGetTableUUID(const String & table_name) const override;
+
+ void drop(ContextPtr context) override;
+
+ void alterTable(ContextPtr local_context, const StorageID & table_id, const StorageInMemoryMetadata & metadata) override;
+
+ std::vector> getTablesForBackup(const FilterByNameFunction & filter, const ContextPtr & local_context) const override;
+
+ void createTableRestoredFromBackup(const ASTPtr & create_table_query, ContextMutablePtr local_context, std::shared_ptr restore_coordination, UInt64 timeout_ms) override;
+
+ DatabaseTablesIteratorPtr getTablesIterator(ContextPtr context, const FilterByNameFunction & filter_by_table_name) const override;
+
+ bool empty() const override;
+
+ void shutdown() override;
+
+protected:
+ std::vector databases;
+ Poco::Logger * log;
+};
+
+}
diff --git a/src/Databases/IDatabase.h b/src/Databases/IDatabase.h
index aadae3e2491a..a9577dfc84a0 100644
--- a/src/Databases/IDatabase.h
+++ b/src/Databases/IDatabase.h
@@ -170,7 +170,7 @@ class IDatabase : public std::enable_shared_from_this
/// Get the table for work. Return nullptr if there is no table.
virtual StoragePtr tryGetTable(const String & name, ContextPtr context) const = 0;
- StoragePtr getTable(const String & name, ContextPtr context) const;
+ virtual StoragePtr getTable(const String & name, ContextPtr context) const;
virtual UUID tryGetTableUUID(const String & /*table_name*/) const { return UUIDHelpers::Nil; }
@@ -183,6 +183,8 @@ class IDatabase : public std::enable_shared_from_this
/// Is the database empty.
virtual bool empty() const = 0;
+ virtual bool isReadOnly() const { return false; }
+
/// Add the table to the database. Record its presence in the metadata.
virtual void createTable(
ContextPtr /*context*/,
diff --git a/src/Dictionaries/RegExpTreeDictionary.cpp b/src/Dictionaries/RegExpTreeDictionary.cpp
index 074b179c48e7..a9846dc06e93 100644
--- a/src/Dictionaries/RegExpTreeDictionary.cpp
+++ b/src/Dictionaries/RegExpTreeDictionary.cpp
@@ -30,8 +30,6 @@
#include
#include
-#include
-
#include "config.h"
#if USE_VECTORSCAN
@@ -469,10 +467,9 @@ class RegExpTreeDictionary::AttributeCollector : public std::unordered_map processBackRefs(const String & data, const re2_st::RE2 & searcher, const std::vector & pieces)
{
- re2_st::StringPiece haystack(data.data(), data.size());
- re2_st::StringPiece matches[10];
+ std::string_view matches[10];
String result;
- searcher.Match(haystack, 0, data.size(), re2_st::RE2::Anchor::UNANCHORED, matches, 10);
+ searcher.Match({data.data(), data.size()}, 0, data.size(), re2_st::RE2::Anchor::UNANCHORED, matches, 10);
/// if the pattern is a single '$1' but fails to match, we would use the default value.
if (pieces.size() == 1 && pieces[0].ref_num >= 0 && pieces[0].ref_num < 10 && matches[pieces[0].ref_num].empty())
return std::make_pair(result, true);
diff --git a/src/Functions/ReplaceRegexpImpl.h b/src/Functions/ReplaceRegexpImpl.h
index 7e3af1e62d93..9395489dac3e 100644
--- a/src/Functions/ReplaceRegexpImpl.h
+++ b/src/Functions/ReplaceRegexpImpl.h
@@ -99,8 +99,8 @@ struct ReplaceRegexpImpl
int num_captures,
const Instructions & instructions)
{
- re2_st::StringPiece haystack(haystack_data, haystack_length);
- re2_st::StringPiece matches[max_captures];
+ std::string_view haystack(haystack_data, haystack_length);
+ std::string_view matches[max_captures];
size_t copy_pos = 0;
size_t match_pos = 0;
diff --git a/src/Functions/checkHyperscanRegexp.cpp b/src/Functions/checkHyperscanRegexp.cpp
index 441e35cc5db7..0dd4c5740c3e 100644
--- a/src/Functions/checkHyperscanRegexp.cpp
+++ b/src/Functions/checkHyperscanRegexp.cpp
@@ -45,8 +45,8 @@ bool isLargerThanFifty(std::string_view str)
/// Check for sub-patterns of the form x{n} or x{n,} can be expensive. Ignore spaces before/after n and m.
bool SlowWithHyperscanChecker::isSlowOneRepeat(std::string_view regexp)
{
- re2_st::StringPiece haystack(regexp.data(), regexp.size());
- re2_st::StringPiece matches[2];
+ std::string_view haystack(regexp.data(), regexp.size());
+ std::string_view matches[2];
size_t start_pos = 0;
while (start_pos < haystack.size())
{
@@ -67,8 +67,8 @@ bool SlowWithHyperscanChecker::isSlowOneRepeat(std::string_view regexp)
/// Check if sub-patterns of the form x{n,m} can be expensive. Ignore spaces before/after n and m.
bool SlowWithHyperscanChecker::isSlowTwoRepeats(std::string_view regexp)
{
- re2_st::StringPiece haystack(regexp.data(), regexp.size());
- re2_st::StringPiece matches[3];
+ std::string_view haystack(regexp.data(), regexp.size());
+ std::string_view matches[3];
size_t start_pos = 0;
while (start_pos < haystack.size())
{
diff --git a/src/Functions/extractAllGroups.h b/src/Functions/extractAllGroups.h
index faee25aa0ab3..3a7987be93e5 100644
--- a/src/Functions/extractAllGroups.h
+++ b/src/Functions/extractAllGroups.h
@@ -94,7 +94,6 @@ class FunctionExtractAllGroups : public IFunction
if (needle.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Length of 'needle' argument must be greater than 0.");
- using StringPiece = typename Regexps::Regexp::StringPieceType;
const Regexps::Regexp holder = Regexps::createRegexp(needle);
const auto & regexp = holder.getRE2();
@@ -111,7 +110,7 @@ class FunctionExtractAllGroups : public IFunction
groups_count, std::to_string(MAX_GROUPS_COUNT - 1));
// Including 0-group, which is the whole regexp.
- PODArrayWithStackMemory matched_groups(groups_count + 1);
+ PODArrayWithStackMemory matched_groups(groups_count + 1);
ColumnArray::ColumnOffsets::MutablePtr root_offsets_col = ColumnArray::ColumnOffsets::create();
ColumnArray::ColumnOffsets::MutablePtr nested_offsets_col = ColumnArray::ColumnOffsets::create();
@@ -160,7 +159,7 @@ class FunctionExtractAllGroups : public IFunction
/// Additional limit to fail fast on supposedly incorrect usage.
const auto max_matches_per_row = context->getSettingsRef().regexp_max_matches_per_row;
- PODArray all_matches;
+ PODArray all_matches;
/// Number of times RE matched on each row of haystack column.
PODArray number_of_matches_per_row;
diff --git a/src/Functions/extractGroups.cpp b/src/Functions/extractGroups.cpp
index 6744edda9229..21b8a68fc105 100644
--- a/src/Functions/extractGroups.cpp
+++ b/src/Functions/extractGroups.cpp
@@ -75,7 +75,7 @@ class FunctionExtractGroups : public IFunction
throw Exception(ErrorCodes::BAD_ARGUMENTS, "There are no groups in regexp: {}", needle);
// Including 0-group, which is the whole regexp.
- PODArrayWithStackMemory matched_groups(groups_count + 1);
+ PODArrayWithStackMemory matched_groups(groups_count + 1);
ColumnArray::ColumnOffsets::MutablePtr offsets_col = ColumnArray::ColumnOffsets::create();
ColumnString::MutablePtr data_col = ColumnString::create();
@@ -89,7 +89,7 @@ class FunctionExtractGroups : public IFunction
{
std::string_view current_row = column_haystack->getDataAt(i).toView();
- if (re2->Match(re2_st::StringPiece(current_row.data(), current_row.size()),
+ if (re2->Match({current_row.data(), current_row.size()},
0, current_row.size(), re2_st::RE2::UNANCHORED, matched_groups.data(),
static_cast(matched_groups.size())))
{
diff --git a/src/Interpreters/DatabaseCatalog.cpp b/src/Interpreters/DatabaseCatalog.cpp
index 4cb2f6e3b3d8..e0b6348ed3c7 100644
--- a/src/Interpreters/DatabaseCatalog.cpp
+++ b/src/Interpreters/DatabaseCatalog.cpp
@@ -356,7 +356,8 @@ DatabaseAndTable DatabaseCatalog::getTableImpl(
auto table = database->tryGetTable(table_id.table_name, context_);
if (!table && exception)
- exception->emplace(Exception(ErrorCodes::UNKNOWN_TABLE, "Table {} doesn't exist", table_id.getNameForLogs()));
+ exception->emplace(Exception(ErrorCodes::UNKNOWN_TABLE, "Table {} doesn't exist", table_id.getNameForLogs()));
+
if (!table)
database = nullptr;
diff --git a/src/Processors/Formats/Impl/RegexpRowInputFormat.h b/src/Processors/Formats/Impl/RegexpRowInputFormat.h
index d6696ffe7519..2469774aaf9f 100644
--- a/src/Processors/Formats/Impl/RegexpRowInputFormat.h
+++ b/src/Processors/Formats/Impl/RegexpRowInputFormat.h
@@ -1,7 +1,6 @@
#pragma once
#include
-#include
#include
#include
#include
@@ -28,14 +27,14 @@ class RegexpFieldExtractor
/// Return true if row was successfully parsed and row fields were extracted.
bool parseRow(PeekableReadBuffer & buf);
- re2_st::StringPiece getField(size_t index) { return matched_fields[index]; }
+ std::string_view getField(size_t index) { return matched_fields[index]; }
size_t getMatchedFieldsSize() const { return matched_fields.size(); }
size_t getNumberOfGroups() const { return regexp.NumberOfCapturingGroups(); }
private:
const re2_st::RE2 regexp;
// The vector of fields extracted from line using regexp.
- std::vector matched_fields;
+ std::vector matched_fields;
// These two vectors are needed to use RE2::FullMatchN (function for extracting fields).
std::vector re2_arguments;
std::vector re2_arguments_ptrs;
diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp
index fe98ae5f69e6..42459340c57e 100644
--- a/src/Server/HTTPHandler.cpp
+++ b/src/Server/HTTPHandler.cpp
@@ -44,6 +44,8 @@
#include
#include
+#include
+
#include
#include
@@ -1163,8 +1165,8 @@ void PredefinedQueryHandler::customizeContext(HTTPServerRequest & request, Conte
{
int num_captures = compiled_regex->NumberOfCapturingGroups() + 1;
- re2::StringPiece matches[num_captures];
- re2::StringPiece input(begin, end - begin);
+ std::string_view matches[num_captures];
+ std::string_view input(begin, end - begin);
if (compiled_regex->Match(input, 0, end - begin, re2::RE2::Anchor::ANCHOR_BOTH, matches, num_captures))
{
for (const auto & [capturing_name, capturing_index] : compiled_regex->NamedCapturingGroups())
diff --git a/src/Server/HTTPHandlerRequestFilter.h b/src/Server/HTTPHandlerRequestFilter.h
index c6bcdb211e11..25cbb9508719 100644
--- a/src/Server/HTTPHandlerRequestFilter.h
+++ b/src/Server/HTTPHandlerRequestFilter.h
@@ -6,7 +6,6 @@
#include
#include
-#include
#include
#include
@@ -26,9 +25,8 @@ static inline bool checkRegexExpression(std::string_view match_str, const Compil
{
int num_captures = compiled_regex->NumberOfCapturingGroups() + 1;
- re2::StringPiece matches[num_captures];
- re2::StringPiece match_input(match_str.data(), match_str.size());
- return compiled_regex->Match(match_input, 0, match_str.size(), re2::RE2::Anchor::ANCHOR_BOTH, matches, num_captures);
+ std::string_view matches[num_captures];
+ return compiled_regex->Match({match_str.data(), match_str.size()}, 0, match_str.size(), re2::RE2::Anchor::ANCHOR_BOTH, matches, num_captures);
}
static inline bool checkExpression(std::string_view match_str, const std::pair & expression)
diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp
index 6bbf80944a7f..fa9bfd38a234 100644
--- a/src/Storages/MergeTree/MergeTreeData.cpp
+++ b/src/Storages/MergeTree/MergeTreeData.cpp
@@ -7196,7 +7196,10 @@ QueryProcessingStage::Enum MergeTreeData::getQueryProcessingStage(
if (query_context->canUseParallelReplicasOnInitiator() && to_stage >= QueryProcessingStage::WithMergeableState)
{
if (!canUseParallelReplicasBasedOnPKAnalysis(query_context, storage_snapshot, query_info))
+ {
+ query_info.parallel_replicas_disabled = true;
return QueryProcessingStage::Enum::FetchColumns;
+ }
/// ReplicatedMergeTree
if (supportsReplication())
diff --git a/src/Storages/SelectQueryInfo.h b/src/Storages/SelectQueryInfo.h
index 8fbc64b7a24a..13d6909fd525 100644
--- a/src/Storages/SelectQueryInfo.h
+++ b/src/Storages/SelectQueryInfo.h
@@ -255,6 +255,8 @@ struct SelectQueryInfo
Block minmax_count_projection_block;
MergeTreeDataSelectAnalysisResultPtr merge_tree_select_result_ptr;
+ bool parallel_replicas_disabled = false;
+
bool is_parameterized_view = false;
NameToNameMap parameterized_view_values;
diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp
index 4c0c0c8e3fa8..4653d81594fb 100644
--- a/src/Storages/StorageMergeTree.cpp
+++ b/src/Storages/StorageMergeTree.cpp
@@ -209,7 +209,9 @@ void StorageMergeTree::read(
size_t max_block_size,
size_t num_streams)
{
- if (local_context->canUseParallelReplicasOnInitiator() && local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree)
+ if (!query_info.parallel_replicas_disabled &&
+ local_context->canUseParallelReplicasOnInitiator() &&
+ local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree)
{
auto table_id = getStorageID();
@@ -240,7 +242,10 @@ void StorageMergeTree::read(
}
else
{
- const bool enable_parallel_reading = local_context->canUseParallelReplicasOnFollower() && local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree;
+ const bool enable_parallel_reading =
+ !query_info.parallel_replicas_disabled &&
+ local_context->canUseParallelReplicasOnFollower() &&
+ local_context->getSettingsRef().parallel_replicas_for_non_replicated_merge_tree;
if (auto plan = reader.read(
column_names, storage_snapshot, query_info,
@@ -929,44 +934,70 @@ MergeMutateSelectedEntryPtr StorageMergeTree::selectPartsToMerge(
SelectPartsDecision select_decision = SelectPartsDecision::CANNOT_SELECT;
- if (!canEnqueueBackgroundTask())
+ auto is_background_memory_usage_ok = [](String * disable_reason) -> bool
{
- if (out_disable_reason)
- *out_disable_reason = fmt::format("Current background tasks memory usage ({}) is more than the limit ({})",
+ if (canEnqueueBackgroundTask())
+ return true;
+ if (disable_reason)
+ *disable_reason = fmt::format("Current background tasks memory usage ({}) is more than the limit ({})",
formatReadableSizeWithBinarySuffix(background_memory_tracker.get()),
formatReadableSizeWithBinarySuffix(background_memory_tracker.getSoftLimit()));
- }
- else if (partition_id.empty())
- {
- UInt64 max_source_parts_size = merger_mutator.getMaxSourcePartsSizeForMerge();
- bool merge_with_ttl_allowed = getTotalMergesWithTTLInMergeList() < data_settings->max_number_of_merges_with_ttl_in_pool;
+ return false;
+ };
- /// TTL requirements is much more strict than for regular merge, so
- /// if regular not possible, than merge with ttl is not also not
- /// possible.
- if (max_source_parts_size > 0)
+ if (partition_id.empty())
+ {
+ if (is_background_memory_usage_ok(out_disable_reason))
{
- select_decision = merger_mutator.selectPartsToMerge(
- future_part,
- aggressive,
- max_source_parts_size,
- can_merge,
- merge_with_ttl_allowed,
- txn,
- out_disable_reason);
+ UInt64 max_source_parts_size = merger_mutator.getMaxSourcePartsSizeForMerge();
+ bool merge_with_ttl_allowed = getTotalMergesWithTTLInMergeList() < data_settings->max_number_of_merges_with_ttl_in_pool;
+
+ /// TTL requirements is much more strict than for regular merge, so
+ /// if regular not possible, than merge with ttl is not also not
+ /// possible.
+ if (max_source_parts_size > 0)
+ {
+ select_decision = merger_mutator.selectPartsToMerge(
+ future_part,
+ aggressive,
+ max_source_parts_size,
+ can_merge,
+ merge_with_ttl_allowed,
+ txn,
+ out_disable_reason);
+ }
+ else if (out_disable_reason)
+ *out_disable_reason = "Current value of max_source_parts_size is zero";
}
- else if (out_disable_reason)
- *out_disable_reason = "Current value of max_source_parts_size is zero";
}
else
{
while (true)
{
- select_decision = merger_mutator.selectAllPartsToMergeWithinPartition(
- future_part, can_merge, partition_id, final, metadata_snapshot, txn, out_disable_reason, optimize_skip_merged_partitions);
auto timeout_ms = getSettings()->lock_acquire_timeout_for_background_operations.totalMilliseconds();
auto timeout = std::chrono::milliseconds(timeout_ms);
+ if (!is_background_memory_usage_ok(out_disable_reason))
+ {
+ constexpr auto poll_interval = std::chrono::seconds(1);
+ Int64 attempts = timeout / poll_interval;
+ bool ok = false;
+ for (Int64 i = 0; i < attempts; ++i)
+ {
+ std::this_thread::sleep_for(poll_interval);
+ if (is_background_memory_usage_ok(out_disable_reason))
+ {
+ ok = true;
+ break;
+ }
+ }
+ if (!ok)
+ break;
+ }
+
+ select_decision = merger_mutator.selectAllPartsToMergeWithinPartition(
+ future_part, can_merge, partition_id, final, metadata_snapshot, txn, out_disable_reason, optimize_skip_merged_partitions);
+
/// If final - we will wait for currently processing merges to finish and continue.
if (final
&& select_decision != SelectPartsDecision::SELECTED
diff --git a/tests/ci/stress_tests.lib b/tests/ci/stress_tests.lib
index 2b8ac77b9523..190f3f39f9ea 100644
--- a/tests/ci/stress_tests.lib
+++ b/tests/ci/stress_tests.lib
@@ -243,7 +243,7 @@ function check_logs_for_critical_errors()
# Remove file fatal_messages.txt if it's empty
[ -s /test_output/fatal_messages.txt ] || rm /test_output/fatal_messages.txt
- rg -Fa "########################################" /test_output/* > /dev/null \
+ rg -Faz "########################################" /test_output/* > /dev/null \
&& echo -e "Killed by signal (output files)$FAIL" >> /test_output/test_results.tsv
function get_gdb_log_context()
diff --git a/tests/config/config.d/named_collection.xml b/tests/config/config.d/named_collection.xml
index 2e49c0c596f8..5b716a7b8da5 100644
--- a/tests/config/config.d/named_collection.xml
+++ b/tests/config/config.d/named_collection.xml
@@ -32,5 +32,10 @@
testtest
auto
+
+ http://localhost:11111/test/
+ test
+ testtest
+
diff --git a/tests/integration/test_storage_kerberized_hdfs/hdfs_configs/bootstrap.sh b/tests/integration/test_storage_kerberized_hdfs/hdfs_configs/bootstrap.sh
index 687ddd8fb462..db6921bc1c80 100755
--- a/tests/integration/test_storage_kerberized_hdfs/hdfs_configs/bootstrap.sh
+++ b/tests/integration/test_storage_kerberized_hdfs/hdfs_configs/bootstrap.sh
@@ -111,6 +111,23 @@ cat > /usr/local/hadoop/etc/hadoop/hdfs-site.xml << EOF
dfs.datanode.http.address
0.0.0.0:1006
+
+
+ dfs.datanode.ipc.address
+ 0.0.0.0:0
+
+
+ dfs.namenode.secondary.http-address
+ 0.0.0.0:0
+
+
+ dfs.namenode.backup.address
+ 0.0.0.0:0
+
+
+ dfs.namenode.backup.http-address
+ 0.0.0.0:0
+