diff --git a/.gitmodules b/.gitmodules index e4d63a341183..ed61ddb96ba1 100644 --- a/.gitmodules +++ b/.gitmodules @@ -253,9 +253,6 @@ [submodule "contrib/qpl"] path = contrib/qpl url = https://github.com/intel/qpl -[submodule "contrib/idxd-config"] - path = contrib/idxd-config - url = https://github.com/intel/idxd-config [submodule "contrib/wyhash"] path = contrib/wyhash url = https://github.com/wangyi-fudan/wyhash @@ -296,6 +293,9 @@ [submodule "contrib/libdivide"] path = contrib/libdivide url = https://github.com/ridiculousfish/libdivide +[submodule "contrib/libbcrypt"] + path = contrib/libbcrypt + url = https://github.com/rg3/libbcrypt.git [submodule "contrib/ulid-c"] path = contrib/ulid-c url = https://github.com/ClickHouse/ulid-c.git diff --git a/CHANGELOG.md b/CHANGELOG.md index e2505856d0c8..1ccd4f9846d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ # 2023 Changelog -### ClickHouse release 23.4 LTS, 2023-04-26 +### ClickHouse release 23.4, 2023-04-26 #### Backward Incompatible Change * Formatter '%M' in function formatDateTime() now prints the month name instead of the minutes. This makes the behavior consistent with MySQL. The previous behavior can be restored using setting "formatdatetime_parsedatetime_m_is_month_name = 0". [#47246](https://github.com/ClickHouse/ClickHouse/pull/47246) ([Robert Schulze](https://github.com/rschu1ze)). diff --git a/CMakeLists.txt b/CMakeLists.txt index 0554403cce51..263b202049b3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -170,12 +170,6 @@ else () set(NO_WHOLE_ARCHIVE --no-whole-archive) endif () -option(ENABLE_CURL_BUILD "Enable curl, azure, sentry build on by default except MacOS." ON) -if (OS_DARWIN) - # Disable the curl, azure, senry build on MacOS - set (ENABLE_CURL_BUILD OFF) -endif () - if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE") # Can be lld or ld-lld or lld-13 or /path/to/lld. if (LINKER_NAME MATCHES "lld") @@ -393,9 +387,9 @@ else() endif () option (ENABLE_GWP_ASAN "Enable Gwp-Asan" ON) -# We use mmap for allocations more heavily in debug builds, -# but GWP-ASan also wants to use mmap frequently, -# and due to a large number of memory mappings, +# We use mmap for allocations more heavily in debug builds, +# but GWP-ASan also wants to use mmap frequently, +# and due to a large number of memory mappings, # it does not work together well. if ((NOT OS_LINUX AND NOT OS_ANDROID) OR (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")) set(ENABLE_GWP_ASAN OFF) diff --git a/SECURITY.md b/SECURITY.md index 44a122956b45..75c1a9d7d6aa 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -22,13 +22,7 @@ The following versions of ClickHouse server are currently being supported with s | 22.10 | ❌ | | 22.9 | ❌ | | 22.8 | ✔️ | -| 22.7 | ❌ | -| 22.6 | ❌ | -| 22.5 | ❌ | -| 22.4 | ❌ | -| 22.3 | ❌ | -| 22.2 | ❌ | -| 22.1 | ❌ | +| 22.* | ❌ | | 21.* | ❌ | | 20.* | ❌ | | 19.* | ❌ | diff --git a/base/harmful/harmful.c b/base/harmful/harmful.c index 6112f9a339c0..78796ca0c054 100644 --- a/base/harmful/harmful.c +++ b/base/harmful/harmful.c @@ -31,7 +31,8 @@ TRAP(argp_state_help) TRAP(argp_usage) TRAP(asctime) TRAP(clearenv) -TRAP(crypt) +// Redefined at contrib/libbcrypt/crypt_blowfish/wrapper.c:186 +// TRAP(crypt) TRAP(ctime) TRAP(cuserid) TRAP(drand48) diff --git a/cmake/fuzzer.cmake b/cmake/fuzzer.cmake index 578a97572701..52f301ab8ad4 100644 --- a/cmake/fuzzer.cmake +++ b/cmake/fuzzer.cmake @@ -7,10 +7,6 @@ if (FUZZER) set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} -fsanitize=fuzzer-no-link") - if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=fuzzer-no-link") - endif() - # NOTE: oss-fuzz can change LIB_FUZZING_ENGINE variable if (NOT LIB_FUZZING_ENGINE) set (LIB_FUZZING_ENGINE "-fsanitize=fuzzer") diff --git a/cmake/sanitize.cmake b/cmake/sanitize.cmake index fc9793d8f356..bf5eddf09f5d 100644 --- a/cmake/sanitize.cmake +++ b/cmake/sanitize.cmake @@ -16,49 +16,24 @@ if (SANITIZE) set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${ASAN_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${ASAN_FLAGS}") - if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${ASAN_FLAGS}") - endif() - if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libasan") - endif () - elseif (SANITIZE STREQUAL "memory") # MemorySanitizer flags are set according to the official documentation: # https://clang.llvm.org/docs/MemorySanitizer.html#usage - # - # For now, it compiles with `cmake -DSANITIZE=memory -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_CXX_FLAGS_ADD="-O1" -DCMAKE_C_FLAGS_ADD="-O1"` - # Compiling with -DCMAKE_BUILD_TYPE=Debug leads to ld.lld failures because - # of large files (was not tested with ld.gold). This is why we compile with - # RelWithDebInfo, and downgrade optimizations to -O1 but not to -Og, to - # keep the binary size down. - # TODO: try compiling with -Og and with ld.gold. + + # Linking can fail due to relocation overflows (see #49145), caused by too big object files / libraries. + # Work around this with position-independent builds (-fPIC and -fpie), this is slightly slower than non-PIC/PIE but that's okay. set (MSAN_FLAGS "-fsanitize=memory -fsanitize-memory-use-after-dtor -fsanitize-memory-track-origins -fno-optimize-sibling-calls -fPIC -fpie -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/msan_suppressions.txt") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${MSAN_FLAGS}") - if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=memory") - endif() - if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libmsan") - endif () - elseif (SANITIZE STREQUAL "thread") set (TSAN_FLAGS "-fsanitize=thread") if (COMPILER_CLANG) set (TSAN_FLAGS "${TSAN_FLAGS} -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/tsan_suppressions.txt") endif() - set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${TSAN_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${TSAN_FLAGS}") - if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread") - endif() - if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libtsan") - endif () elseif (SANITIZE STREQUAL "undefined") set (UBSAN_FLAGS "-fsanitize=undefined -fno-sanitize-recover=all -fno-sanitize=float-divide-by-zero") @@ -77,12 +52,6 @@ if (SANITIZE) set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${UBSAN_FLAGS}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${UBSAN_FLAGS}") - if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined") - endif() - if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libubsan") - endif () # llvm-tblgen, that is used during LLVM build, doesn't work with UBSan. set (ENABLE_EMBEDDED_COMPILER 0 CACHE BOOL "") diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 0ff8b550a982..0c92ff17f115 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -141,20 +141,19 @@ add_contrib (libuv-cmake libuv) add_contrib (liburing-cmake liburing) add_contrib (amqpcpp-cmake AMQP-CPP) # requires: libuv add_contrib (cassandra-cmake cassandra) # requires: libuv - -if (ENABLE_CURL_BUILD) +if (NOT OS_DARWIN) add_contrib (curl-cmake curl) add_contrib (azure-cmake azure) add_contrib (sentry-native-cmake sentry-native) # requires: curl endif() - add_contrib (fmtlib-cmake fmtlib) add_contrib (krb5-cmake krb5) add_contrib (cyrus-sasl-cmake cyrus-sasl) # for krb5 add_contrib (libgsasl-cmake libgsasl) # requires krb5 add_contrib (librdkafka-cmake librdkafka) # requires: libgsasl add_contrib (nats-io-cmake nats-io) -add_contrib (libhdfs3-cmake libhdfs3) # requires: protobuf, krb5 +add_contrib (isa-l-cmake isa-l) +add_contrib (libhdfs3-cmake libhdfs3) # requires: protobuf, krb5, isa-l add_contrib (hive-metastore-cmake hive-metastore) # requires: thrift/avro/arrow/libhdfs3 add_contrib (cppkafka-cmake cppkafka) add_contrib (libpqxx-cmake libpqxx) @@ -178,21 +177,17 @@ add_contrib (s2geometry-cmake s2geometry) add_contrib (c-ares-cmake c-ares) add_contrib (qpl-cmake qpl) add_contrib (morton-nd-cmake morton-nd) - if (ARCH_S390X) add_contrib(crc32-s390x-cmake crc32-s390x) endif() - add_contrib (annoy-cmake annoy) - add_contrib (xxHash-cmake xxHash) -add_contrib (google-benchmark-cmake google-benchmark) +add_contrib (libbcrypt-cmake libbcrypt) +add_contrib (google-benchmark-cmake google-benchmark) add_contrib (ulid-c-cmake ulid-c) -add_contrib (isa-l-cmake isa-l) - # Put all targets defined here and in subdirectories under "contrib/" folders in GUI-based IDEs. # Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear # in "contrib/..." as originally planned, so we workaround this by fixing FOLDER properties of all targets manually, diff --git a/contrib/curl b/contrib/curl index c12fb3ddaf48..b0edf0b7dae4 160000 --- a/contrib/curl +++ b/contrib/curl @@ -1 +1 @@ -Subproject commit c12fb3ddaf48e709a7a4deaa55ec485e4df163ee +Subproject commit b0edf0b7dae44d9e66f270a257cf654b35d5263d diff --git a/contrib/curl-cmake/CMakeLists.txt b/contrib/curl-cmake/CMakeLists.txt index 8a570bd267c7..70d9c2816dc1 100644 --- a/contrib/curl-cmake/CMakeLists.txt +++ b/contrib/curl-cmake/CMakeLists.txt @@ -12,6 +12,9 @@ set (SRCS "${LIBRARY_DIR}/lib/noproxy.c" "${LIBRARY_DIR}/lib/idn.c" "${LIBRARY_DIR}/lib/cfilters.c" + "${LIBRARY_DIR}/lib/cf-socket.c" + "${LIBRARY_DIR}/lib/cf-haproxy.c" + "${LIBRARY_DIR}/lib/cf-https-connect.c" "${LIBRARY_DIR}/lib/file.c" "${LIBRARY_DIR}/lib/timeval.c" "${LIBRARY_DIR}/lib/base64.c" @@ -37,8 +40,8 @@ set (SRCS "${LIBRARY_DIR}/lib/strcase.c" "${LIBRARY_DIR}/lib/easy.c" "${LIBRARY_DIR}/lib/curl_fnmatch.c" + "${LIBRARY_DIR}/lib/curl_log.c" "${LIBRARY_DIR}/lib/fileinfo.c" - "${LIBRARY_DIR}/lib/wildcard.c" "${LIBRARY_DIR}/lib/krb5.c" "${LIBRARY_DIR}/lib/memdebug.c" "${LIBRARY_DIR}/lib/http_chunks.c" @@ -96,6 +99,7 @@ set (SRCS "${LIBRARY_DIR}/lib/rand.c" "${LIBRARY_DIR}/lib/curl_multibyte.c" "${LIBRARY_DIR}/lib/conncache.c" + "${LIBRARY_DIR}/lib/cf-h1-proxy.c" "${LIBRARY_DIR}/lib/http2.c" "${LIBRARY_DIR}/lib/smb.c" "${LIBRARY_DIR}/lib/curl_endian.c" @@ -113,12 +117,13 @@ set (SRCS "${LIBRARY_DIR}/lib/altsvc.c" "${LIBRARY_DIR}/lib/socketpair.c" "${LIBRARY_DIR}/lib/bufref.c" + "${LIBRARY_DIR}/lib/bufq.c" "${LIBRARY_DIR}/lib/dynbuf.c" + "${LIBRARY_DIR}/lib/dynhds.c" "${LIBRARY_DIR}/lib/hsts.c" "${LIBRARY_DIR}/lib/http_aws_sigv4.c" "${LIBRARY_DIR}/lib/mqtt.c" "${LIBRARY_DIR}/lib/rename.c" - "${LIBRARY_DIR}/lib/h2h3.c" "${LIBRARY_DIR}/lib/headers.c" "${LIBRARY_DIR}/lib/timediff.c" "${LIBRARY_DIR}/lib/vauth/vauth.c" @@ -133,6 +138,7 @@ set (SRCS "${LIBRARY_DIR}/lib/vauth/oauth2.c" "${LIBRARY_DIR}/lib/vauth/spnego_gssapi.c" "${LIBRARY_DIR}/lib/vauth/spnego_sspi.c" + "${LIBRARY_DIR}/lib/vquic/vquic.c" "${LIBRARY_DIR}/lib/vtls/openssl.c" "${LIBRARY_DIR}/lib/vtls/gtls.c" "${LIBRARY_DIR}/lib/vtls/vtls.c" @@ -147,9 +153,6 @@ set (SRCS "${LIBRARY_DIR}/lib/vtls/keylog.c" "${LIBRARY_DIR}/lib/vtls/x509asn1.c" "${LIBRARY_DIR}/lib/vtls/hostcheck.c" - "${LIBRARY_DIR}/lib/vquic/ngtcp2.c" - "${LIBRARY_DIR}/lib/vquic/quiche.c" - "${LIBRARY_DIR}/lib/vquic/msh3.c" "${LIBRARY_DIR}/lib/vssh/libssh2.c" "${LIBRARY_DIR}/lib/vssh/libssh.c" ) diff --git a/contrib/idxd-config b/contrib/idxd-config deleted file mode 160000 index f6605c41a735..000000000000 --- a/contrib/idxd-config +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f6605c41a735e3fdfef2d2d18655a33af6490b99 diff --git a/contrib/isa-l-cmake/CMakeLists.txt b/contrib/isa-l-cmake/CMakeLists.txt index fd0218a7b801..d4d6d648268b 100644 --- a/contrib/isa-l-cmake/CMakeLists.txt +++ b/contrib/isa-l-cmake/CMakeLists.txt @@ -1,6 +1,23 @@ +option(ENABLE_ISAL_LIBRARY "Enable ISA-L library" ${ENABLE_LIBRARIES}) +if (ARCH_AARCH64) + # Disable ISA-L libray on aarch64. + set (ENABLE_ISAL_LIBRARY OFF) +endif () + +if (NOT ENABLE_ISAL_LIBRARY) + message(STATUS "Not using isa-l") + return() +endif() + set(ISAL_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/isa-l") -# check nasm compiler +# The YASM and NASM assembers are somewhat mutually compatible. ISAL specifically needs NASM. If only YASM is installed, then check_language(ASM_NASM) +# below happily finds YASM, leading to weird errors at build time. Therefore, do an explicit check for NASM here. +find_program(NASM_PATH NAMES nasm) +if (NOT NASM_PATH) + message(FATAL_ERROR "Please install NASM from 'https://www.nasm.us/' because NASM compiler can not be found!") +endif () + include(CheckLanguage) check_language(ASM_NASM) if(NOT CMAKE_ASM_NASM_COMPILER) diff --git a/contrib/libbcrypt b/contrib/libbcrypt new file mode 160000 index 000000000000..8aa32ad94ebe --- /dev/null +++ b/contrib/libbcrypt @@ -0,0 +1 @@ +Subproject commit 8aa32ad94ebe06b76853b0767c910c9fbf7ccef4 diff --git a/contrib/libbcrypt-cmake/CMakeLists.txt b/contrib/libbcrypt-cmake/CMakeLists.txt new file mode 100644 index 000000000000..d40d7f9195ee --- /dev/null +++ b/contrib/libbcrypt-cmake/CMakeLists.txt @@ -0,0 +1,19 @@ +option(ENABLE_BCRYPT "Enable bcrypt" ${ENABLE_LIBRARIES}) + +if (NOT ENABLE_BCRYPT) + message(STATUS "Not using bcrypt") + return() +endif() + +set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/libbcrypt") + +set(SRCS + "${LIBRARY_DIR}/bcrypt.c" + "${LIBRARY_DIR}/crypt_blowfish/crypt_blowfish.c" + "${LIBRARY_DIR}/crypt_blowfish/crypt_gensalt.c" + "${LIBRARY_DIR}/crypt_blowfish/wrapper.c" +) + +add_library(_bcrypt ${SRCS}) +target_include_directories(_bcrypt SYSTEM PUBLIC "${LIBRARY_DIR}") +add_library(ch_contrib::bcrypt ALIAS _bcrypt) diff --git a/contrib/libhdfs3-cmake/CMakeLists.txt b/contrib/libhdfs3-cmake/CMakeLists.txt index d9f7009c1bd4..fd9ed7dc182c 100644 --- a/contrib/libhdfs3-cmake/CMakeLists.txt +++ b/contrib/libhdfs3-cmake/CMakeLists.txt @@ -172,8 +172,10 @@ if (TARGET OpenSSL::SSL) target_link_libraries(_hdfs3 PRIVATE OpenSSL::Crypto OpenSSL::SSL) endif() -target_link_libraries(_hdfs3 PRIVATE ch_contrib::isal) -add_definitions(-DHADOOP_ISAL_LIBRARY) +if (TARGET ch_contrib::isal) + target_link_libraries(_hdfs3 PRIVATE ch_contrib::isal) + add_definitions(-DHADOOP_ISAL_LIBRARY) +endif() add_library(ch_contrib::hdfs ALIAS _hdfs3) diff --git a/docker/images.json b/docker/images.json index 9150abe1f1cf..b4f3e755bd1f 100644 --- a/docker/images.json +++ b/docker/images.json @@ -123,7 +123,8 @@ "docker/test/stateless", "docker/test/integration/base", "docker/test/fuzzer", - "docker/test/keeper-jepsen" + "docker/test/keeper-jepsen", + "docker/test/server-jepsen" ] }, "docker/test/integration/kerberized_hadoop": { @@ -139,6 +140,10 @@ "name": "clickhouse/keeper-jepsen-test", "dependent": [] }, + "docker/test/server-jepsen": { + "name": "clickhouse/server-jepsen-test", + "dependent": [] + }, "docker/test/install/deb": { "name": "clickhouse/install-deb-test", "dependent": [] diff --git a/docker/keeper/Dockerfile b/docker/keeper/Dockerfile index 59e8d2ed3d87..73da4515ff4b 100644 --- a/docker/keeper/Dockerfile +++ b/docker/keeper/Dockerfile @@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \ esac ARG REPOSITORY="https://s3.amazonaws.com/clickhouse-builds/22.4/31c367d3cd3aefd316778601ff6565119fe36682/package_release" -ARG VERSION="23.4.1.1943" +ARG VERSION="23.4.2.11" ARG PACKAGES="clickhouse-keeper" # user/group precreated explicitly with fixed uid/gid on purpose. diff --git a/docker/server/Dockerfile.alpine b/docker/server/Dockerfile.alpine index d59a08c28052..1a5d2071f6b7 100644 --- a/docker/server/Dockerfile.alpine +++ b/docker/server/Dockerfile.alpine @@ -33,7 +33,7 @@ RUN arch=${TARGETARCH:-amd64} \ # lts / testing / prestable / etc ARG REPO_CHANNEL="stable" ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}" -ARG VERSION="23.4.1.1943" +ARG VERSION="23.4.2.11" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # user/group precreated explicitly with fixed uid/gid on purpose. diff --git a/docker/server/Dockerfile.ubuntu b/docker/server/Dockerfile.ubuntu index 390f347d549c..8792d419a165 100644 --- a/docker/server/Dockerfile.ubuntu +++ b/docker/server/Dockerfile.ubuntu @@ -22,7 +22,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list ARG REPO_CHANNEL="stable" ARG REPOSITORY="deb https://packages.clickhouse.com/deb ${REPO_CHANNEL} main" -ARG VERSION="23.4.1.1943" +ARG VERSION="23.4.2.11" ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static" # set non-empty deb_location_url url to create a docker image diff --git a/docker/test/server-jepsen/Dockerfile b/docker/test/server-jepsen/Dockerfile index 958dbfa066af..125b187aa5b9 100644 --- a/docker/test/server-jepsen/Dockerfile +++ b/docker/test/server-jepsen/Dockerfile @@ -16,6 +16,8 @@ ENV TESTS_TO_RUN="8" ENV TIME_LIMIT="30" ENV KEEPER_NODE="" +ENV NEMESIS="" +ENV WORKLOAD="" # volumes diff --git a/docker/test/server-jepsen/run.sh b/docker/test/server-jepsen/run.sh index 4a966d50f74b..4e90a74e7055 100644 --- a/docker/test/server-jepsen/run.sh +++ b/docker/test/server-jepsen/run.sh @@ -15,8 +15,38 @@ if [ -z "$CLICKHOUSE_REPO_PATH" ]; then ls -lath ||: fi +clickhouse_source="--clickhouse-source \'$CLICKHOUSE_PACKAGE\'" +if [ -n "$WITH_LOCAL_BINARY" ]; then + clickhouse_source="--clickhouse-source /clickhouse" +fi + +tests_count="--test-count \"$TESTS_TO_RUN\"" +tests_to_run="test-all" +workload="" +if [ -n "$WORKLOAD" ]; then + tests_to_run="test" + workload="--workload $WORKLOAD" + tests_count="" +fi + +nemesis="" +if [ -n "$NEMESIS" ]; then + nemesis="--nemesis $NEMESIS" +fi + +rate="" +if [ -n "$RATE" ]; then + rate="--rate $RATE" +fi + +concurrency="" +if [ -n "$CONCURRENCY" ]; then + concurrency="--concurrency $CONCURRENCY" +fi + + cd "$CLICKHOUSE_REPO_PATH/tests/jepsen.clickhouse" -(lein run server test-all --keeper "$KEEPER_NODE" --nodes-file "$NODES_FILE_PATH" --username "$NODES_USERNAME" --logging-json --password "$NODES_PASSWORD" --time-limit "$TIME_LIMIT" --concurrency 50 -r 50 --clickhouse-source "$CLICKHOUSE_PACKAGE" --test-count "$TESTS_TO_RUN" || true) | tee "$TEST_OUTPUT/jepsen_run_all_tests.log" +(lein run server $tests_to_run $workload --keeper "$KEEPER_NODE" $concurrency $nemesis $rate --nodes-file "$NODES_FILE_PATH" --username "$NODES_USERNAME" --logging-json --password "$NODES_PASSWORD" --time-limit "$TIME_LIMIT" --concurrency 50 $clickhouse_source $tests_count --reuse-binary || true) | tee "$TEST_OUTPUT/jepsen_run_all_tests.log" mv store "$TEST_OUTPUT/" diff --git a/docker/test/util/process_functional_tests_result.py b/docker/test/util/process_functional_tests_result.py index 3c1c6e2a795a..470eb61b3fad 100755 --- a/docker/test/util/process_functional_tests_result.py +++ b/docker/test/util/process_functional_tests_result.py @@ -80,11 +80,9 @@ def process_test_log(log_path, broken_tests): test_results.append( ( test_name, - "FAIL", + "SKIPPED", test_time, - [ - "Test is expected to fail! Please, update broken_tests.txt!\n" - ], + ["This test passed. Update broken_tests.txt.\n"], ) ) else: diff --git a/docs/changelogs/v23.4.2.11-stable.md b/docs/changelogs/v23.4.2.11-stable.md new file mode 100644 index 000000000000..3c572b9c1cb2 --- /dev/null +++ b/docs/changelogs/v23.4.2.11-stable.md @@ -0,0 +1,20 @@ +--- +sidebar_position: 1 +sidebar_label: 2023 +--- + +# 2023 Changelog + +### ClickHouse release v23.4.2.11-stable (b6442320f9d) FIXME as compared to v23.4.1.1943-stable (3920eb987f7) + +#### Bug Fix (user-visible misbehavior in an official stable release) + +* Revert "Fix GCS native copy ([#48981](https://github.com/ClickHouse/ClickHouse/issues/48981))" [#49194](https://github.com/ClickHouse/ClickHouse/pull/49194) ([Raúl Marín](https://github.com/Algunenano)). +* Fix race on Outdated parts loading [#49223](https://github.com/ClickHouse/ClickHouse/pull/49223) ([Alexander Tokmakov](https://github.com/tavplubix)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Implement status comment [#48468](https://github.com/ClickHouse/ClickHouse/pull/48468) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). +* Update curl to 8.0.1 (for CVEs) [#48765](https://github.com/ClickHouse/ClickHouse/pull/48765) ([Boris Kuschel](https://github.com/bkuschel)). +* Fallback auth gh api [#49314](https://github.com/ClickHouse/ClickHouse/pull/49314) ([Mikhail f. Shiryaev](https://github.com/Felixoid)). + diff --git a/docs/en/development/build.md b/docs/en/development/build.md index e3a63da6a3e1..a55d44bdf939 100644 --- a/docs/en/development/build.md +++ b/docs/en/development/build.md @@ -22,7 +22,7 @@ The minimum recommended Ubuntu version for development is 22.04 LTS. ### Install Prerequisites {#install-prerequisites} ``` bash -sudo apt-get install git cmake ccache python3 ninja-build yasm gawk +sudo apt-get install git cmake ccache python3 ninja-build nasm yasm gawk ``` ### Install and Use the Clang compiler @@ -72,7 +72,7 @@ cmake -S . -B build cmake --build build # or: `cd build; ninja` ``` -To create an executable, run `cmake --build --target clickhouse` (or: `cd build; ninja clickhouse`). +To create an executable, run `cmake --build build --target clickhouse` (or: `cd build; ninja clickhouse`). This will create executable `build/programs/clickhouse` which can be used with `client` or `server` arguments. ## Building on Any Linux {#how-to-build-clickhouse-on-any-linux} @@ -92,7 +92,7 @@ If all the components are installed, you may build in the same way as the steps Example for OpenSUSE Tumbleweed: ``` bash -sudo zypper install git cmake ninja clang-c++ python lld yasm gawk +sudo zypper install git cmake ninja clang-c++ python lld nasm yasm gawk git clone --recursive https://github.com/ClickHouse/ClickHouse.git mkdir build cmake -S . -B build @@ -103,7 +103,7 @@ Example for Fedora Rawhide: ``` bash sudo yum update -sudo yum --nogpg install git cmake make clang python3 ccache yasm gawk +sudo yum --nogpg install git cmake make clang python3 ccache nasm yasm gawk git clone --recursive https://github.com/ClickHouse/ClickHouse.git mkdir build cmake -S . -B build diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index d5189d4b9d99..7780dee41360 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -439,6 +439,50 @@ Syntax: `ngrambf_v1(n, size_of_bloom_filter_in_bytes, number_of_hash_functions, - `number_of_hash_functions` — The number of hash functions used in the Bloom filter. - `random_seed` — The seed for Bloom filter hash functions. +Users can create [UDF](/docs/en/sql-reference/statements/create/function.md) to estimate the parameters set of `ngrambf_v1`. Query statements are as follows: + +```sql +CREATE FUNCTION bfEstimateFunctions [ON CLUSTER cluster] +AS +(total_nubmer_of_all_grams, size_of_bloom_filter_in_bits) -> round((size_of_bloom_filter_in_bits / total_nubmer_of_all_grams) * log(2)); + +CREATE FUNCTION bfEstimateBmSize [ON CLUSTER cluster] +AS +(total_nubmer_of_all_grams, probability_of_false_positives) -> ceil((total_nubmer_of_all_grams * log(probability_of_false_positives)) / log(1 / pow(2, log(2)))); + +CREATE FUNCTION bfEstimateFalsePositive [ON CLUSTER cluster] +AS +(total_nubmer_of_all_grams, number_of_hash_functions, size_of_bloom_filter_in_bytes) -> pow(1 - exp(-number_of_hash_functions/ (size_of_bloom_filter_in_bytes / total_nubmer_of_all_grams)), number_of_hash_functions); + +CREATE FUNCTION bfEstimateGramNumber [ON CLUSTER cluster] +AS +(number_of_hash_functions, probability_of_false_positives, size_of_bloom_filter_in_bytes) -> ceil(size_of_bloom_filter_in_bytes / (-number_of_hash_functions / log(1 - exp(log(probability_of_false_positives) / number_of_hash_functions)))) + +``` +To use those functions,we need to specify two parameter at least. +For example, if there 4300 ngrams in the granule and we expect false positives to be less than 0.0001. The other parameters can be estimated by executing following queries: + + +```sql +--- estimate number of bits in the filter +SELECT bfEstimateBmSize(4300, 0.0001) / 8 as size_of_bloom_filter_in_bytes; + +┌─size_of_bloom_filter_in_bytes─┐ +│ 10304 │ +└───────────────────────────────┘ + +--- estimate number of hash functions +SELECT bfEstimateFunctions(4300, bfEstimateBmSize(4300, 0.0001)) as number_of_hash_functions + +┌─number_of_hash_functions─┐ +│ 13 │ +└──────────────────────────┘ + +``` +Of course, you can also use those functions to estimate parameters by other conditions. +The functions refer to the content [here](https://hur.st/bloomfilter). + + #### Token Bloom Filter The same as `ngrambf_v1`, but stores tokens instead of ngrams. Tokens are sequences separated by non-alphanumeric characters. @@ -731,7 +775,13 @@ The names given to the described entities can be found in the system tables, [sy ### Configuration {#table_engine-mergetree-multiple-volumes_configure} -Disks, volumes and storage policies should be declared inside the `` tag either in the main file `config.xml` or in a distinct file in the `config.d` directory. +Disks, volumes and storage policies should be declared inside the `` tag either in a file in the `config.d` directory. + +:::tip +Disks can also be declared in the `SETTINGS` section of a query. This is useful +for adhoc analysis to temporarily attach a disk that is, for example, hosted at a URL. +See [dynamic storage](#dynamic-storage) for more details. +::: Configuration structure: @@ -876,6 +926,87 @@ You could change storage policy after table creation with [ALTER TABLE ... MODIF The number of threads performing background moves of data parts can be changed by [background_move_pool_size](/docs/en/operations/server-configuration-parameters/settings.md/#background_move_pool_size) setting. +### Dynamic Storage + +This example query shows how to attach a table stored at a URL and configure the +remote storage within the query. The web storage is not configured in the ClickHouse +configuration files; all the settings are in the CREATE/ATTACH query. + +:::note +The example uses `type=web`, but any disk type can be configured as dynamic, even Local disk. Local disks require a path argument to be inside the server config parameter `custom_local_disks_base_directory`, which has no default, so set that also when using local disk. +::: + +```sql +ATTACH TABLE uk_price_paid UUID 'cf712b4f-2ca8-435c-ac23-c4393efe52f7' +( + price UInt32, + date Date, + postcode1 LowCardinality(String), + postcode2 LowCardinality(String), + type Enum8('other' = 0, 'terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4), + is_new UInt8, + duration Enum8('unknown' = 0, 'freehold' = 1, 'leasehold' = 2), + addr1 String, + addr2 String, + street LowCardinality(String), + locality LowCardinality(String), + town LowCardinality(String), + district LowCardinality(String), + county LowCardinality(String) +) +ENGINE = MergeTree +ORDER BY (postcode1, postcode2, addr1, addr2) + # highlight-start + SETTINGS disk = disk( + type=web, + endpoint='https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/' + ); + # highlight-end +``` + +### Nested Dynamic Storage + +This example query builds on the above dynamic disk configuration and shows how to +use a local disk to cache data from a table stored at a URL. Neither the cache disk +nor the web storage is configured in the ClickHouse configuration files; both are +configured in the CREATE/ATTACH query settings. + +In the settings highlighted below notice that the disk of `type=web` is nested within +the disk of `type=cache`. + +```sql +ATTACH TABLE uk_price_paid UUID 'cf712b4f-2ca8-435c-ac23-c4393efe52f7' +( + price UInt32, + date Date, + postcode1 LowCardinality(String), + postcode2 LowCardinality(String), + type Enum8('other' = 0, 'terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4), + is_new UInt8, + duration Enum8('unknown' = 0, 'freehold' = 1, 'leasehold' = 2), + addr1 String, + addr2 String, + street LowCardinality(String), + locality LowCardinality(String), + town LowCardinality(String), + district LowCardinality(String), + county LowCardinality(String) +) +ENGINE = MergeTree +ORDER BY (postcode1, postcode2, addr1, addr2) + # highlight-start + SETTINGS disk = disk( + type=cache, + max_size='1Gi', + path='/var/lib/clickhouse/custom_disk_cache/', + disk=disk( + type=web, + endpoint='https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/' + ) + ); + # highlight-end +``` + ### Details {#details} In the case of `MergeTree` tables, data is getting to disk in different ways: diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 02145a2fb6c9..113e42499fe7 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -1324,7 +1324,7 @@ The trailing slash is mandatory. /var/lib/clickhouse/ ``` -## prometheus {#server_configuration_parameters-prometheus} +## Prometheus {#server_configuration_parameters-prometheus} Exposing metrics data for scraping from [Prometheus](https://prometheus.io). @@ -1339,13 +1339,25 @@ Settings: **Example** ``` xml - - /metrics - 9363 - true - true - true - + + 0.0.0.0 + 8123 + 9000 + + + /metrics + 9363 + true + true + true + + + +``` + +Check (replace `127.0.0.1` with the IP addr or hostname of your ClickHouse server): +```bash +curl 127.0.0.1:9363/metrics ``` ## query_log {#server_configuration_parameters-query-log} diff --git a/docs/en/operations/system-tables/clusters.md b/docs/en/operations/system-tables/clusters.md index 4b1e75c25a1c..deb9a0aaeb37 100644 --- a/docs/en/operations/system-tables/clusters.md +++ b/docs/en/operations/system-tables/clusters.md @@ -20,6 +20,9 @@ Columns: - `errors_count` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of times this host failed to reach replica. - `slowdowns_count` ([UInt32](../../sql-reference/data-types/int-uint.md)) — The number of slowdowns that led to changing replica when establishing a connection with hedged requests. - `estimated_recovery_time` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Seconds remaining until the replica error count is zeroed and it is considered to be back to normal. +- `database_shard_name` ([String](../../sql-reference/data-types/string.md)) — The name of the `Replicated` database shard (for clusters that belong to a `Replicated` database). +- `database_replica_name` ([String](../../sql-reference/data-types/string.md)) — The name of the `Replicated` database replica (for clusters that belong to a `Replicated` database). +- `is_active` ([Nullable(UInt8)](../../sql-reference/data-types/int-uint.md)) — The status of the `Replicated` database replica (for clusters that belong to a `Replicated` database): 1 means "replica is online", 0 means "replica is offline", `NULL` means "unknown". **Example** @@ -47,6 +50,9 @@ default_database: errors_count: 0 slowdowns_count: 0 estimated_recovery_time: 0 +database_shard_name: +database_replica_name: +is_active: NULL Row 2: ────── @@ -63,6 +69,9 @@ default_database: errors_count: 0 slowdowns_count: 0 estimated_recovery_time: 0 +database_shard_name: +database_replica_name: +is_active: NULL ``` **See Also** diff --git a/docs/en/operations/system-tables/users.md b/docs/en/operations/system-tables/users.md index a90fa01a45db..58cdb82d31f8 100644 --- a/docs/en/operations/system-tables/users.md +++ b/docs/en/operations/system-tables/users.md @@ -12,7 +12,7 @@ Columns: - `storage` ([String](../../sql-reference/data-types/string.md)) — Path to the storage of users. Configured in the `access_control_path` parameter. -- `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)('no_password' = 0,'plaintext_password' = 1, 'sha256_password' = 2, 'double_sha1_password' = 3, 'ldap' = 4, 'kerberos' = 5, 'ssl_certificate' = 6)) — Shows the authentication type. There are multiple ways of user identification: with no password, with plain text password, with [SHA256](https://ru.wikipedia.org/wiki/SHA-2)-encoded password or with [double SHA-1](https://ru.wikipedia.org/wiki/SHA-1)-encoded password. +- `auth_type` ([Enum8](../../sql-reference/data-types/enum.md)('no_password' = 0, 'plaintext_password' = 1, 'sha256_password' = 2, 'double_sha1_password' = 3, 'ldap' = 4, 'kerberos' = 5, 'ssl_certificate' = 6, 'bcrypt_password' = 7)) — Shows the authentication type. There are multiple ways of user identification: with no password, with plain text password, with [SHA256](https://en.wikipedia.org/wiki/SHA-2)-encoded password, with [double SHA-1](https://en.wikipedia.org/wiki/SHA-1)-encoded password or with [bcrypt](https://en.wikipedia.org/wiki/Bcrypt)-encoded password. - `auth_params` ([String](../../sql-reference/data-types/string.md)) — Authentication parameters in the JSON format depending on the `auth_type`. diff --git a/docs/en/sql-reference/aggregate-functions/reference/kolmogorovsmirnovtest.md b/docs/en/sql-reference/aggregate-functions/reference/kolmogorovsmirnovtest.md new file mode 100644 index 000000000000..3da9645181ee --- /dev/null +++ b/docs/en/sql-reference/aggregate-functions/reference/kolmogorovsmirnovtest.md @@ -0,0 +1,118 @@ +--- +slug: /en/sql-reference/aggregate-functions/reference/kolmogorovsmirnovtest +sidebar_position: 300 +sidebar_label: kolmogorovSmirnovTest +--- + +# kolmogorovSmirnovTest + +Applies Kolmogorov-Smirnov's test to samples from two populations. + +**Syntax** + +``` sql +kolmogorovSmirnovTest([alternative, computation_method])(sample_data, sample_index) +``` + +Values of both samples are in the `sample_data` column. If `sample_index` equals to 0 then the value in that row belongs to the sample from the first population. Otherwise it belongs to the sample from the second population. +Samples must belong to continuous, one-dimensional probability distributions. + +**Arguments** + +- `sample_data` — Sample data. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md). +- `sample_index` — Sample index. [Integer](../../../sql-reference/data-types/int-uint.md). + +**Parameters** + +- `alternative` — alternative hypothesis. (Optional, default: `'two-sided'`.) [String](../../../sql-reference/data-types/string.md). + Let F(x) and G(x) be the CDFs of the first and second distributions respectively. + - `'two-sided'` + The null hypothesis is that samples come from the same distribution, e.g. F(x) = G(x) for all x. + And the alternative is that the distributions are not identical. + - `'greater'` + The null hypothesis is that values in the first sample are *stohastically smaller* than those in the second one, + e.g. the CDF of first distribution lies above and hence to the left of that for the second one. + Which in fact means that F(x) >= G(x) for all x. And the alternative in this case is that F(x) < G(x) for at least one x. + - `'less'`. + The null hypothesis is that values in the first sample are *stohastically greater* than those in the second one, + e.g. the CDF of first distribution lies below and hence to the right of that for the second one. + Which in fact means that F(x) <= G(x) for all x. And the alternative in this case is that F(x) > G(x) for at least one x. +- `computation_method` — the method used to compute p-value. (Optional, default: `'auto'`.) [String](../../../sql-reference/data-types/string.md). + - `'exact'` - calculation is performed using precise probability distribution of the test statistics. Compute intensive and wasteful except for small samples. + - `'asymp'` (`'asymptotic'`) - calculation is performed using an approximation. For large sample sizes, the exact and asymptotic p-values are very similar. + - `'auto'` - the `'exact'` method is used when a maximum number of samples is less than 10'000. + + +**Returned values** + +[Tuple](../../../sql-reference/data-types/tuple.md) with two elements: + +- calculated statistic. [Float64](../../../sql-reference/data-types/float.md). +- calculated p-value. [Float64](../../../sql-reference/data-types/float.md). + + +**Example** + +Query: + +``` sql +SELECT kolmogorovSmirnovTest('less', 'exact')(value, num) +FROM +( + SELECT + randNormal(0, 10) AS value, + 0 AS num + FROM numbers(10000) + UNION ALL + SELECT + randNormal(0, 10) AS value, + 1 AS num + FROM numbers(10000) +) +``` + +Result: + +``` text +┌─kolmogorovSmirnovTest('less', 'exact')(value, num)─┐ +│ (0.009899999999999996,0.37528595205132287) │ +└────────────────────────────────────────────────────┘ +``` + +Note: +P-value is bigger than 0.05 (for confidence level of 95%), so null hypothesis is not rejected. + + +Query: + +``` sql +SELECT kolmogorovSmirnovTest('two-sided', 'exact')(value, num) +FROM +( + SELECT + randStudentT(10) AS value, + 0 AS num + FROM numbers(100) + UNION ALL + SELECT + randNormal(0, 10) AS value, + 1 AS num + FROM numbers(100) +) +``` + +Result: + +``` text +┌─kolmogorovSmirnovTest('two-sided', 'exact')(value, num)─┐ +│ (0.4100000000000002,6.61735760482795e-8) │ +└─────────────────────────────────────────────────────────┘ +``` + +Note: +P-value is less than 0.05 (for confidence level of 95%), so null hypothesis is rejected. + + +**See Also** + +- [Kolmogorov-Smirnov'test](https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test) diff --git a/docs/en/sql-reference/data-types/index.md b/docs/en/sql-reference/data-types/index.md index c61a3069db66..2ad8ac4bb239 100644 --- a/docs/en/sql-reference/data-types/index.md +++ b/docs/en/sql-reference/data-types/index.md @@ -27,7 +27,7 @@ ClickHouse data types include: - **Aggregation function types**: use [`SimpleAggregateFunction`](./simpleaggregatefunction.md) and [`AggregateFunction`](./aggregatefunction.md) for storing the intermediate status of aggregate function results - **Nested data structures**: A [`Nested` data structure](./nested-data-structures/index.md) is like a table inside a cell - **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type. -- **Nullable**: [`Nullbale`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column gettings its default value for the data type) +- **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column gettings its default value for the data type) - **IP addresses**: use [`IPv4`](./domains/ipv4.md) and [`IPv6`](./domains/ipv6.md) to efficiently store IP addresses - **Geo types**: for[ geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon` - **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md) \ No newline at end of file diff --git a/docs/en/sql-reference/data-types/nullable.md b/docs/en/sql-reference/data-types/nullable.md index 230b4af7960b..28180f7f9919 100644 --- a/docs/en/sql-reference/data-types/nullable.md +++ b/docs/en/sql-reference/data-types/nullable.md @@ -8,7 +8,7 @@ sidebar_label: Nullable Allows to store special marker ([NULL](../../sql-reference/syntax.md)) that denotes “missing value” alongside normal values allowed by `TypeName`. For example, a `Nullable(Int8)` type column can store `Int8` type values, and the rows that do not have a value will store `NULL`. -For a `TypeName`, you can’t use composite data types [Array](../../sql-reference/data-types/array.md) and [Tuple](../../sql-reference/data-types/tuple.md). Composite data types can contain `Nullable` type values, such as `Array(Nullable(Int8))`. +For a `TypeName`, you can’t use composite data types [Array](../../sql-reference/data-types/array.md), [Map](../../sql-reference/data-types/map.md) and [Tuple](../../sql-reference/data-types/tuple.md). Composite data types can contain `Nullable` type values, such as `Array(Nullable(Int8))`. A `Nullable` type field can’t be included in table indexes. diff --git a/docs/en/sql-reference/dictionaries/index.md b/docs/en/sql-reference/dictionaries/index.md index 48a8ce45d332..189673cdae75 100644 --- a/docs/en/sql-reference/dictionaries/index.md +++ b/docs/en/sql-reference/dictionaries/index.md @@ -1658,6 +1658,7 @@ Example of settings: test dictionary_source + ssl=true ``` @@ -1672,6 +1673,7 @@ SOURCE(MONGODB( password '' db 'test' collection 'dictionary_source' + options 'ssl=true' )) ``` @@ -1683,6 +1685,8 @@ Setting fields: - `password` – Password of the MongoDB user. - `db` – Name of the database. - `collection` – Name of the collection. +- `options` - MongoDB connection string options (optional parameter). + ### Redis diff --git a/docs/en/sql-reference/statements/create/user.md b/docs/en/sql-reference/statements/create/user.md index 3548ef7cc071..d168be63c36c 100644 --- a/docs/en/sql-reference/statements/create/user.md +++ b/docs/en/sql-reference/statements/create/user.md @@ -32,9 +32,12 @@ There are multiple ways of user identification: - `IDENTIFIED WITH sha256_hash BY 'hash'` or `IDENTIFIED WITH sha256_hash BY 'hash' SALT 'salt'` - `IDENTIFIED WITH double_sha1_password BY 'qwerty'` - `IDENTIFIED WITH double_sha1_hash BY 'hash'` +- `IDENTIFIED WITH bcrypt_password BY 'qwerty'` +- `IDENTIFIED WITH bcrypt_hash BY 'hash'` - `IDENTIFIED WITH ldap SERVER 'server_name'` - `IDENTIFIED WITH kerberos` or `IDENTIFIED WITH kerberos REALM 'realm'` - `IDENTIFIED WITH ssl_certificate CN 'mysite.com:user'` +- `IDENTIFIED BY 'qwerty'` ## Examples @@ -54,21 +57,12 @@ There are multiple ways of user identification: The password is stored in a SQL text file in `/var/lib/clickhouse/access`, so it's not a good idea to use `plaintext_password`. Try `sha256_password` instead, as demonstrated next... ::: -3. The best option is to use a password that is hashed using SHA-256. ClickHouse will hash the password for you when you specify `IDENTIFIED WITH sha256_password`. For example: +3. The most common option is to use a password that is hashed using SHA-256. ClickHouse will hash the password for you when you specify `IDENTIFIED WITH sha256_password`. For example: ```sql CREATE USER name3 IDENTIFIED WITH sha256_password BY 'my_password' ``` - Notice ClickHouse generates and runs the following command for you: - - ```response - CREATE USER name3 - IDENTIFIED WITH sha256_hash - BY '8B3404953FCAA509540617F082DB13B3E0734F90FF6365C19300CC6A6EA818D6' - SALT 'D6489D8B5692D82FF944EA6415785A8A8A1AF33825456AFC554487725A74A609' - ``` - The `name3` user can now login using `my_password`, but the password is stored as the hashed value above. THe following SQL file was created in `/var/lib/clickhouse/access` and gets executed at server startup: ```bash @@ -92,6 +86,34 @@ There are multiple ways of user identification: CREATE USER name4 IDENTIFIED WITH double_sha1_hash BY 'CCD3A959D6A004B9C3807B728BC2E55B67E10518' ``` +5. The `bcrypt_password` is the most secure option for storing passwords. It uses the [bcrypt](https://en.wikipedia.org/wiki/Bcrypt) algorithm, which is resilient against brute force attacks even if the password hash is compromised. + + ```sql + CREATE USER name5 IDENTIFIED WITH bcrypt_password BY 'my_password' + ``` + + The length of the password is limited to 72 characters with this method. The bcrypt work factor parameter, which defines the amount of computations and time needed to compute the hash and verify the password, can be modified in the server configuration: + + ```xml + 12 + ``` + + The work factor must be between 4 and 31, with a default value of 12. + +6. The type of the password can also be omitted: + + ```sql + CREATE USER name6 IDENTIFIED BY 'my_password' + ``` + + In this case, ClickHouse will use the default password type specified in the server configuration: + + ```xml + sha256_password + ``` + + The available password types are: `plaintext_password`, `sha256_password`, `double_sha1_password`. + ## User Host User host is a host from which a connection to ClickHouse server could be established. The host can be specified in the `HOST` query section in the following ways: diff --git a/docs/en/sql-reference/statements/system.md b/docs/en/sql-reference/statements/system.md index 5a5a771f2393..c5596b7ba5f6 100644 --- a/docs/en/sql-reference/statements/system.md +++ b/docs/en/sql-reference/statements/system.md @@ -76,7 +76,7 @@ Resets the mark cache. ## DROP REPLICA -Dead replicas can be dropped using following syntax: +Dead replicas of `ReplicatedMergeTree` tables can be dropped using following syntax: ``` sql SYSTEM DROP REPLICA 'replica_name' FROM TABLE database.table; @@ -85,13 +85,25 @@ SYSTEM DROP REPLICA 'replica_name'; SYSTEM DROP REPLICA 'replica_name' FROM ZKPATH '/path/to/table/in/zk'; ``` -Queries will remove the replica path in ZooKeeper. It is useful when the replica is dead and its metadata cannot be removed from ZooKeeper by `DROP TABLE` because there is no such table anymore. It will only drop the inactive/stale replica, and it cannot drop local replica, please use `DROP TABLE` for that. `DROP REPLICA` does not drop any tables and does not remove any data or metadata from disk. +Queries will remove the `ReplicatedMergeTree` replica path in ZooKeeper. It is useful when the replica is dead and its metadata cannot be removed from ZooKeeper by `DROP TABLE` because there is no such table anymore. It will only drop the inactive/stale replica, and it cannot drop local replica, please use `DROP TABLE` for that. `DROP REPLICA` does not drop any tables and does not remove any data or metadata from disk. The first one removes metadata of `'replica_name'` replica of `database.table` table. The second one does the same for all replicated tables in the database. The third one does the same for all replicated tables on the local server. The fourth one is useful to remove metadata of dead replica when all other replicas of a table were dropped. It requires the table path to be specified explicitly. It must be the same path as was passed to the first argument of `ReplicatedMergeTree` engine on table creation. +## DROP DATABASE REPLICA + +Dead replicas of `Replicated` databases can be dropped using following syntax: + +``` sql +SYSTEM DROP DATABASE REPLICA 'replica_name' [FROM SHARD 'shard_name'] FROM DATABASE database; +SYSTEM DROP DATABASE REPLICA 'replica_name' [FROM SHARD 'shard_name']; +SYSTEM DROP DATABASE REPLICA 'replica_name' [FROM SHARD 'shard_name'] FROM ZKPATH '/path/to/table/in/zk'; +``` + +Similar to `SYSTEM DROP REPLICA`, but removes the `Replicated` database replica path from ZooKeeper when there's no database to run `DROP DATABASE`. Please note that it does not remove `ReplicatedMergeTree` replicas (so you may need `SYSTEM DROP REPLICA` as well). Shard and replica names are the names that were specified in `Replicated` engine arguments when creating the database. Also, these names can be obtained from `database_shard_name` and `database_replica_name` columns in `system.clusters`. If the `FROM SHARD` clause is missing, then `replica_name` must be a full replica name in `shard_name|replica_name` format. + ## DROP UNCOMPRESSED CACHE Reset the uncompressed data cache. diff --git a/docs/ru/sql-reference/aggregate-functions/reference/kolmogorovsmirnovtest.md b/docs/ru/sql-reference/aggregate-functions/reference/kolmogorovsmirnovtest.md new file mode 100644 index 000000000000..2f8c6bb6760c --- /dev/null +++ b/docs/ru/sql-reference/aggregate-functions/reference/kolmogorovsmirnovtest.md @@ -0,0 +1,117 @@ +--- +slug: /ru/sql-reference/aggregate-functions/reference/kolmogorovsmirnovtest +sidebar_position: 300 +sidebar_label: kolmogorovSmirnovTest +--- + +# kolmogorovSmirnovTest {#kolmogorovSmirnovTest} + +Проводит статистический тест Колмогорова-Смирнова для двух независимых выборок. + +**Синтаксис** + +``` sql +kolmogorovSmirnovTest([alternative, computation_method])(sample_data, sample_index) +``` + +Значения выборок берутся из столбца `sample_data`. Если `sample_index` равно 0, то значение из этой строки принадлежит первой выборке. Во всех остальных случаях значение принадлежит второй выборке. +Выборки должны принадлежать непрерывным одномерным распределениям. + +**Аргументы** + +- `sample_data` — данные выборок. [Integer](../../../sql-reference/data-types/int-uint.md), [Float](../../../sql-reference/data-types/float.md) or [Decimal](../../../sql-reference/data-types/decimal.md). +- `sample_index` — индексы выборок. [Integer](../../../sql-reference/data-types/int-uint.md). + +**Параметры** + +- `alternative` — альтернативная гипотеза (Необязательный параметр, по умолчанию: `'two-sided'`.) [String](../../../sql-reference/data-types/string.md). + Пусть F(x) и G(x) - функции распределения первой и второй выборки соотвественно. + - `'two-sided'` + Нулевая гипотеза состоит в том, что выборки происходит из одного и того же распределение, то есть F(x) = G(x) для любого x. + Альтернатива - выборки принадлежат разным распределениям. + - `'greater'` + Нулевая гипотеза состоит в том, что элементы первой выборки в асимптотически почти наверное меньше элементов из второй выборки, + то есть функция распределения первой выборки лежит выше и соотвественно левее, чем функция распределения второй выборки. + Таким образом это означает, что F(x) >= G(x) for любого x, а альтернатива в этом случае состоит в том, что F(x) < G(x) хотя бы для одного x. + - `'less'`. + Нулевая гипотеза состоит в том, что элементы первой выборки в асимптотически почти наверное больше элементов из второй выборки, + то есть функция распределения первой выборки лежит ниже и соотвественно правее, чем функция распределения второй выборки. + Таким образом это означает, что F(x) <= G(x) for любого x, а альтернатива в этом случае состоит в том, что F(x) > G(x) хотя бы для одного x. +- `computation_method` — метод, используемый для вычисления p-value. (Необязательный параметр, по умолчанию: `'auto'`.) [String](../../../sql-reference/data-types/string.md). + - `'exact'` - вычисление производится с помощью вычисления точного распределения статистики. Требует большого количества вычислительных ресурсов и расточительно для больших выборок. + - `'asymp'`(`'asymptotic'`) - используется приближенное вычисление. Для больших выборок приближенный результат и точный почти идентичны. + - `'auto'` - значение вычисляется точно (с помощью метода `'exact'`), если максимальный размер двух выборок не превышает 10'000. + +**Возвращаемые значения** + +[Кортеж](../../../sql-reference/data-types/tuple.md) с двумя элементами: + +- вычисленное статистики. [Float64](../../../sql-reference/data-types/float.md). +- вычисленное p-value. [Float64](../../../sql-reference/data-types/float.md). + + +**Пример** + +Запрос: + +``` sql +SELECT kolmogorovSmirnovTest('less', 'exact')(value, num) +FROM +( + SELECT + randNormal(0, 10) AS value, + 0 AS num + FROM numbers(10000) + UNION ALL + SELECT + randNormal(0, 10) AS value, + 1 AS num + FROM numbers(10000) +) +``` + +Результат: + +``` text +┌─kolmogorovSmirnovTest('less', 'exact')(value, num)─┐ +│ (0.009899999999999996,0.37528595205132287) │ +└────────────────────────────────────────────────────┘ +``` + +Заметки: +P-value больше чем 0.05 (для уровня значимости 95%), то есть нулевая гипотеза не отвергается. + + +Запрос: + +``` sql +SELECT kolmogorovSmirnovTest('two-sided', 'exact')(value, num) +FROM +( + SELECT + randStudentT(10) AS value, + 0 AS num + FROM numbers(100) + UNION ALL + SELECT + randNormal(0, 10) AS value, + 1 AS num + FROM numbers(100) +) +``` + +Результат: + +``` text +┌─kolmogorovSmirnovTest('two-sided', 'exact')(value, num)─┐ +│ (0.4100000000000002,6.61735760482795e-8) │ +└─────────────────────────────────────────────────────────┘ +``` + +Заметки: +P-value меньше чем 0.05 (для уровня значимости 95%), то есть нулевая гипотеза отвергается. + + +**Смотрите также** + +- [Критерий согласия Колмогорова-Смирнова](https://ru.wikipedia.org/wiki/%D0%9A%D1%80%D0%B8%D1%82%D0%B5%D1%80%D0%B8%D0%B9_%D1%81%D0%BE%D0%B3%D0%BB%D0%B0%D1%81%D0%B8%D1%8F_%D0%9A%D0%BE%D0%BB%D0%BC%D0%BE%D0%B3%D0%BE%D1%80%D0%BE%D0%B2%D0%B0) diff --git a/programs/local/LocalServer.cpp b/programs/local/LocalServer.cpp index 5768e744f94e..8925f50fe973 100644 --- a/programs/local/LocalServer.cpp +++ b/programs/local/LocalServer.cpp @@ -26,12 +26,13 @@ #include #include #include +#include #include #include #include #include #include -#include +#include #include #include #include @@ -133,6 +134,11 @@ void LocalServer::initialize(Poco::Util::Application & self) config().getUInt("max_io_thread_pool_size", 100), config().getUInt("max_io_thread_pool_free_size", 0), config().getUInt("io_thread_pool_queue_size", 10000)); + + OutdatedPartsLoadingThreadPool::initialize( + config().getUInt("max_outdated_parts_loading_thread_pool_size", 16), + 0, // We don't need any threads one all the parts will be loaded + config().getUInt("outdated_part_loading_thread_pool_queue_size", 10000)); } diff --git a/programs/server/Server.cpp b/programs/server/Server.cpp index 8c0d50bae55c..bbd536d93004 100644 --- a/programs/server/Server.cpp +++ b/programs/server/Server.cpp @@ -41,10 +41,9 @@ #include #include #include -#include #include #include -#include +#include #include #include #include @@ -778,6 +777,11 @@ try server_settings.max_backups_io_thread_pool_free_size, server_settings.backups_io_thread_pool_queue_size); + OutdatedPartsLoadingThreadPool::initialize( + server_settings.max_outdated_parts_loading_thread_pool_size, + 0, // We don't need any threads one all the parts will be loaded + server_settings.outdated_part_loading_thread_pool_queue_size); + /// Initialize global local cache for remote filesystem. if (config().has("local_cache_for_remote_fs")) { @@ -1852,7 +1856,7 @@ try LOG_INFO(log, "Closed all listening sockets."); /// Killing remaining queries. - if (server_settings.shutdown_wait_unfinished_queries) + if (!server_settings.shutdown_wait_unfinished_queries) global_context->getProcessList().killAllQueries(); if (current_connections) diff --git a/programs/server/config.xml b/programs/server/config.xml index 1aeda624db2d..51aa04ba0e5d 100644 --- a/programs/server/config.xml +++ b/programs/server/config.xml @@ -476,6 +476,14 @@ 1 1 + + sha256_password + + + 12 + \n" + f"This is an automated comment for commit {pr_info.sha} with " + f"description of existing statuses. It's updated for the latest CI running\n" + f"The full report is available [here]({report_url})\n" + f"{worst_state}\n\n" + "\n" + "" + ) + # group checks by the name to get the worst one per each + grouped_statuses = {} # type: Dict[CheckDescription, CommitStatuses] + for status in statuses: + cd = None + for c in CHECK_DESCRIPTIONS: + if c.match_func(status.context): + cd = c + break + + if cd is None or cd == CHECK_DESCRIPTIONS[-1]: + # This is the case for either non-found description or a fallback + cd = CheckDescription( + status.context, + CHECK_DESCRIPTIONS[-1].description, + CHECK_DESCRIPTIONS[-1].match_func, + ) + + if cd in grouped_statuses: + grouped_statuses[cd].append(status) + else: + grouped_statuses[cd] = [status] + + table_rows = [] # type: List[str] + for desc, gs in grouped_statuses.items(): + table_rows.append( + f"" + f"\n" + ) + + table_rows.sort() + + comment_footer = "
Check nameDescriptionStatus
{desc.name}{desc.description}{beauty_state(get_worst_state(gs))}
" + return "".join([comment_body, *table_rows, comment_footer]) + + +def get_worst_state(statuses: CommitStatuses) -> str: + worst_status = None + states = {"error": 0, "failure": 1, "pending": 2, "success": 3} + for status in statuses: + if worst_status is None: + worst_status = status + continue + if states[status.state] < states[worst_status.state]: + worst_status = status + if worst_status.state == "error": + break + + if worst_status is None: + return "" + return worst_status.state + + +def create_ci_report(pr_info: PRInfo, statuses: CommitStatuses) -> str: + """The function converst the statuses to TestResults and uploads the report + to S3 tests bucket. Then it returns the URL""" + test_results = [] # type: TestResults + for status in statuses: + log_urls = None + if status.target_url is not None: + log_urls = [status.target_url] + test_results.append(TestResult(status.context, status.state, log_urls=log_urls)) + return upload_results( + S3Helper(), pr_info.number, pr_info.sha, test_results, [], CI_STATUS_NAME + ) def post_commit_status_to_file( @@ -90,8 +290,16 @@ def get_commit_filtered_statuses(commit: Commit) -> CommitStatuses: return list(filtered.values()) +def get_repo(gh: Github) -> Repository: + global GH_REPO + if GH_REPO is not None: + return GH_REPO + GH_REPO = gh.get_repo(GITHUB_REPOSITORY) + return GH_REPO + + def remove_labels(gh: Github, pr_info: PRInfo, labels_names: List[str]) -> None: - repo = gh.get_repo(GITHUB_REPOSITORY) + repo = get_repo(gh) pull_request = repo.get_pull(pr_info.number) for label in labels_names: pull_request.remove_from_labels(label) @@ -99,7 +307,7 @@ def remove_labels(gh: Github, pr_info: PRInfo, labels_names: List[str]) -> None: def post_labels(gh: Github, pr_info: PRInfo, labels_names: List[str]) -> None: - repo = gh.get_repo(GITHUB_REPOSITORY) + repo = get_repo(gh) pull_request = repo.get_pull(pr_info.number) for label in labels_names: pull_request.add_to_labels(label) diff --git a/tests/ci/compatibility_check.py b/tests/ci/compatibility_check.py index 432e9ec7c019..04203617dca7 100644 --- a/tests/ci/compatibility_check.py +++ b/tests/ci/compatibility_check.py @@ -16,13 +16,12 @@ mark_flaky_tests, prepare_tests_results_for_clickhouse, ) -from commit_status_helper import post_commit_status +from commit_status_helper import RerunHelper, get_commit, post_commit_status from docker_pull_helper import get_images_with_versions from env_helper import TEMP_PATH, REPORTS_PATH from get_robot_token import get_best_robot_token from pr_info import PRInfo from report import TestResults, TestResult -from rerun_helper import RerunHelper from s3_helper import S3Helper from stopwatch import Stopwatch from upload_result_helper import upload_results @@ -150,8 +149,9 @@ def main(): pr_info = PRInfo() gh = Github(get_best_robot_token(), per_page=100) + commit = get_commit(gh, pr_info.sha) - rerun_helper = RerunHelper(gh, pr_info, args.check_name) + rerun_helper = RerunHelper(commit, args.check_name) if rerun_helper.is_already_finished_by_status(): logging.info("Check is already finished according to github status, exiting") sys.exit(0) @@ -242,7 +242,7 @@ def url_filter(url): args.check_name, ) print(f"::notice ::Report url: {report_url}") - post_commit_status(gh, pr_info.sha, args.check_name, description, state, report_url) + post_commit_status(commit, state, report_url, description, args.check_name, pr_info) prepared_events = prepare_tests_results_for_clickhouse( pr_info, diff --git a/tests/ci/docker_images_check.py b/tests/ci/docker_images_check.py index f2b1105b3b0f..16a58a90dcf6 100644 --- a/tests/ci/docker_images_check.py +++ b/tests/ci/docker_images_check.py @@ -14,7 +14,7 @@ from github import Github from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse -from commit_status_helper import format_description, post_commit_status +from commit_status_helper import format_description, get_commit, post_commit_status from env_helper import GITHUB_WORKSPACE, RUNNER_TEMP, GITHUB_RUN_URL from get_robot_token import get_best_robot_token, get_parameter_from_ssm from pr_info import PRInfo @@ -474,7 +474,8 @@ def main(): return gh = Github(get_best_robot_token(), per_page=100) - post_commit_status(gh, pr_info.sha, NAME, description, status, url) + commit = get_commit(gh, pr_info.sha) + post_commit_status(commit, status, url, description, NAME, pr_info) prepared_events = prepare_tests_results_for_clickhouse( pr_info, diff --git a/tests/ci/docker_manifests_merge.py b/tests/ci/docker_manifests_merge.py index 0484ea8f6416..d89708b9277e 100644 --- a/tests/ci/docker_manifests_merge.py +++ b/tests/ci/docker_manifests_merge.py @@ -10,7 +10,7 @@ from github import Github from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse -from commit_status_helper import format_description, post_commit_status +from commit_status_helper import format_description, get_commit, post_commit_status from env_helper import RUNNER_TEMP from get_robot_token import get_best_robot_token, get_parameter_from_ssm from pr_info import PRInfo @@ -221,7 +221,8 @@ def main(): description = format_description(description) gh = Github(get_best_robot_token(), per_page=100) - post_commit_status(gh, pr_info.sha, NAME, description, status, url) + commit = get_commit(gh, pr_info.sha) + post_commit_status(commit, status, url, description, NAME, pr_info) prepared_events = prepare_tests_results_for_clickhouse( pr_info, diff --git a/tests/ci/docker_server.py b/tests/ci/docker_server.py index c6854c5aa788..a434d3cc8411 100644 --- a/tests/ci/docker_server.py +++ b/tests/ci/docker_server.py @@ -15,7 +15,7 @@ from build_check import get_release_or_pr from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse -from commit_status_helper import format_description, post_commit_status +from commit_status_helper import format_description, get_commit, post_commit_status from docker_images_check import DockerImage from env_helper import CI, GITHUB_RUN_URL, RUNNER_TEMP, S3_BUILDS_BUCKET, S3_DOWNLOAD from get_robot_token import get_best_robot_token, get_parameter_from_ssm @@ -372,7 +372,8 @@ def main(): description = format_description(description) gh = Github(get_best_robot_token(), per_page=100) - post_commit_status(gh, pr_info.sha, NAME, description, status, url) + commit = get_commit(gh, pr_info.sha) + post_commit_status(commit, status, url, description, NAME, pr_info) prepared_events = prepare_tests_results_for_clickhouse( pr_info, diff --git a/tests/ci/docs_check.py b/tests/ci/docs_check.py index ed2743ca9655..e3930a20bd98 100644 --- a/tests/ci/docs_check.py +++ b/tests/ci/docs_check.py @@ -9,13 +9,18 @@ from github import Github from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse -from commit_status_helper import post_commit_status, get_commit, update_mergeable_check +from commit_status_helper import ( + NotSet, + RerunHelper, + get_commit, + post_commit_status, + update_mergeable_check, +) from docker_pull_helper import get_image_with_version from env_helper import TEMP_PATH, REPO_COPY from get_robot_token import get_best_robot_token from pr_info import PRInfo from report import TestResults, TestResult -from rerun_helper import RerunHelper from s3_helper import S3Helper from stopwatch import Stopwatch from tee_popen import TeePopen @@ -52,8 +57,9 @@ def main(): pr_info = PRInfo(need_changed_files=True) gh = Github(get_best_robot_token(), per_page=100) + commit = get_commit(gh, pr_info.sha) - rerun_helper = RerunHelper(gh, pr_info, NAME) + rerun_helper = RerunHelper(commit, NAME) if rerun_helper.is_already_finished_by_status(): logging.info("Check is already finished according to github status, exiting") sys.exit(0) @@ -61,9 +67,8 @@ def main(): if not pr_info.has_changes_in_documentation() and not args.force: logging.info("No changes in documentation") - commit = get_commit(gh, pr_info.sha) - commit.create_status( - context=NAME, description="No changes in docs", state="success" + post_commit_status( + commit, "success", NotSet, "No changes in docs", NAME, pr_info ) sys.exit(0) @@ -132,7 +137,7 @@ def main(): s3_helper, pr_info.number, pr_info.sha, test_results, additional_files, NAME ) print("::notice ::Report url: {report_url}") - post_commit_status(gh, pr_info.sha, NAME, description, status, report_url) + post_commit_status(commit, status, report_url, description, NAME, pr_info) prepared_events = prepare_tests_results_for_clickhouse( pr_info, diff --git a/tests/ci/env_helper.py b/tests/ci/env_helper.py index a5a4913be0b4..5c2139ae0bc2 100644 --- a/tests/ci/env_helper.py +++ b/tests/ci/env_helper.py @@ -1,7 +1,7 @@ import os from os import path as p -from build_download_helper import get_with_retries +from build_download_helper import get_gh_api module_dir = p.abspath(p.dirname(__file__)) git_root = p.abspath(p.join(module_dir, "..", "..")) @@ -46,7 +46,7 @@ def GITHUB_JOB_ID() -> str: jobs = [] page = 1 while not _GITHUB_JOB_ID: - response = get_with_retries( + response = get_gh_api( f"https://api.github.com/repos/{GITHUB_REPOSITORY}/" f"actions/runs/{GITHUB_RUN_ID}/jobs?per_page=100&page={page}" ) diff --git a/tests/ci/fast_test_check.py b/tests/ci/fast_test_check.py index f13b40996572..89066ade2cb3 100644 --- a/tests/ci/fast_test_check.py +++ b/tests/ci/fast_test_check.py @@ -17,6 +17,8 @@ prepare_tests_results_for_clickhouse, ) from commit_status_helper import ( + RerunHelper, + get_commit, post_commit_status, update_mergeable_check, ) @@ -25,7 +27,6 @@ from get_robot_token import get_best_robot_token from pr_info import FORCE_TESTS_LABEL, PRInfo from report import TestResults, read_test_results -from rerun_helper import RerunHelper from s3_helper import S3Helper from stopwatch import Stopwatch from tee_popen import TeePopen @@ -106,10 +107,11 @@ def main(): pr_info = PRInfo() gh = Github(get_best_robot_token(), per_page=100) + commit = get_commit(gh, pr_info.sha) atexit.register(update_mergeable_check, gh, pr_info, NAME) - rerun_helper = RerunHelper(gh, pr_info, NAME) + rerun_helper = RerunHelper(commit, NAME) if rerun_helper.is_already_finished_by_status(): logging.info("Check is already finished according to github status, exiting") status = rerun_helper.get_finished_status() @@ -197,7 +199,7 @@ def main(): NAME, ) print(f"::notice ::Report url: {report_url}") - post_commit_status(gh, pr_info.sha, NAME, description, state, report_url) + post_commit_status(commit, state, report_url, description, NAME, pr_info) prepared_events = prepare_tests_results_for_clickhouse( pr_info, diff --git a/tests/ci/finish_check.py b/tests/ci/finish_check.py index ea2f5eb3136b..aa8a0cf9553f 100644 --- a/tests/ci/finish_check.py +++ b/tests/ci/finish_check.py @@ -2,32 +2,42 @@ import logging from github import Github -from env_helper import GITHUB_RUN_URL -from pr_info import PRInfo +from commit_status_helper import ( + CI_STATUS_NAME, + NotSet, + get_commit, + get_commit_filtered_statuses, + post_commit_status, +) from get_robot_token import get_best_robot_token -from commit_status_helper import get_commit, get_commit_filtered_statuses - -NAME = "Run Check" +from pr_info import PRInfo -if __name__ == "__main__": +def main(): logging.basicConfig(level=logging.INFO) pr_info = PRInfo(need_orgs=True) gh = Github(get_best_robot_token(), per_page=100) commit = get_commit(gh, pr_info.sha) - url = GITHUB_RUN_URL - statuses = get_commit_filtered_statuses(commit) - pending_status = any( # find NAME status in pending state - True - for status in statuses - if status.context == NAME and status.state == "pending" - ) - if pending_status: - commit.create_status( - context=NAME, - description="All checks finished", - state="success", - target_url=url, + statuses = [ + status + for status in get_commit_filtered_statuses(commit) + if status.context == CI_STATUS_NAME + ] + if not statuses: + return + status = statuses[0] + if status.state == "pending": + post_commit_status( + commit, + "success", + status.target_url or NotSet, + "All checks finished", + CI_STATUS_NAME, + pr_info, ) + + +if __name__ == "__main__": + main() diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index 813386bc0db6..037bb13f1f87 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -20,9 +20,11 @@ prepare_tests_results_for_clickhouse, ) from commit_status_helper import ( - post_commit_status, + NotSet, + RerunHelper, get_commit, override_status, + post_commit_status, post_commit_status_to_file, update_mergeable_check, ) @@ -32,7 +34,6 @@ from get_robot_token import get_best_robot_token from pr_info import FORCE_TESTS_LABEL, PRInfo from report import TestResults, read_test_results -from rerun_helper import RerunHelper from s3_helper import S3Helper from stopwatch import Stopwatch from tee_popen import TeePopen @@ -247,6 +248,7 @@ def main(): need_changed_files=run_changed_tests, pr_event_from_api=validate_bugfix_check ) + commit = get_commit(gh, pr_info.sha) atexit.register(update_mergeable_check, gh, pr_info, check_name) if not os.path.exists(temp_path): @@ -274,7 +276,7 @@ def main(): run_by_hash_total = 0 check_name_with_group = check_name - rerun_helper = RerunHelper(gh, pr_info, check_name_with_group) + rerun_helper = RerunHelper(commit, check_name_with_group) if rerun_helper.is_already_finished_by_status(): logging.info("Check is already finished according to github status, exiting") sys.exit(0) @@ -283,13 +285,15 @@ def main(): if run_changed_tests: tests_to_run = get_tests_to_run(pr_info) if not tests_to_run: - commit = get_commit(gh, pr_info.sha) state = override_status("success", check_name, validate_bugfix_check) if args.post_commit_status == "commit_status": - commit.create_status( - context=check_name_with_group, - description=NO_CHANGES_MSG, - state=state, + post_commit_status( + commit, + state, + NotSet, + NO_CHANGES_MSG, + check_name_with_group, + pr_info, ) elif args.post_commit_status == "file": post_commit_status_to_file( @@ -376,16 +380,16 @@ def main(): if args.post_commit_status == "commit_status": if "parallelreplicas" in check_name.lower(): post_commit_status( - gh, - pr_info.sha, - check_name_with_group, - description, + commit, "success", report_url, + description, + check_name_with_group, + pr_info, ) else: post_commit_status( - gh, pr_info.sha, check_name_with_group, description, state, report_url + commit, state, report_url, description, check_name_with_group, pr_info ) elif args.post_commit_status == "file": if "parallelreplicas" in check_name.lower(): diff --git a/tests/ci/get_robot_token.py b/tests/ci/get_robot_token.py index 6ecaf468ed15..b41eba49cc32 100644 --- a/tests/ci/get_robot_token.py +++ b/tests/ci/get_robot_token.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 import logging from dataclasses import dataclass +from typing import Optional import boto3 # type: ignore from github import Github @@ -20,7 +21,13 @@ def get_parameter_from_ssm(name, decrypt=True, client=None): return client.get_parameter(Name=name, WithDecryption=decrypt)["Parameter"]["Value"] +ROBOT_TOKEN = None # type: Optional[Token] + + def get_best_robot_token(token_prefix_env_name="github_robot_token_"): + global ROBOT_TOKEN + if ROBOT_TOKEN is not None: + return ROBOT_TOKEN.value client = boto3.client("ssm", region_name="us-east-1") parameters = client.describe_parameters( ParameterFilters=[ @@ -28,7 +35,6 @@ def get_best_robot_token(token_prefix_env_name="github_robot_token_"): ] )["Parameters"] assert parameters - token = None for token_name in [p["Name"] for p in parameters]: value = get_parameter_from_ssm(token_name, True, client) @@ -38,15 +44,17 @@ def get_best_robot_token(token_prefix_env_name="github_robot_token_"): user = gh.get_user() rest, _ = gh.rate_limiting logging.info("Get token with %s remaining requests", rest) - if token is None: - token = Token(user, value, rest) + if ROBOT_TOKEN is None: + ROBOT_TOKEN = Token(user, value, rest) continue - if token.rest < rest: - token.user, token.value, token.rest = user, value, rest + if ROBOT_TOKEN.rest < rest: + ROBOT_TOKEN.user, ROBOT_TOKEN.value, ROBOT_TOKEN.rest = user, value, rest - assert token + assert ROBOT_TOKEN logging.info( - "User %s with %s remaining requests is used", token.user.login, token.rest + "User %s with %s remaining requests is used", + ROBOT_TOKEN.user.login, + ROBOT_TOKEN.rest, ) - return token.value + return ROBOT_TOKEN.value diff --git a/tests/ci/install_check.py b/tests/ci/install_check.py index 54245670b26c..d619ce96ceec 100644 --- a/tests/ci/install_check.py +++ b/tests/ci/install_check.py @@ -19,7 +19,9 @@ prepare_tests_results_for_clickhouse, ) from commit_status_helper import ( + RerunHelper, format_description, + get_commit, post_commit_status, update_mergeable_check, ) @@ -29,7 +31,6 @@ from get_robot_token import get_best_robot_token from pr_info import PRInfo from report import TestResults, TestResult -from rerun_helper import RerunHelper from s3_helper import S3Helper from stopwatch import Stopwatch from tee_popen import TeePopen @@ -268,9 +269,10 @@ def main(): if CI: gh = Github(get_best_robot_token(), per_page=100) + commit = get_commit(gh, pr_info.sha) atexit.register(update_mergeable_check, gh, pr_info, args.check_name) - rerun_helper = RerunHelper(gh, pr_info, args.check_name) + rerun_helper = RerunHelper(commit, args.check_name) if rerun_helper.is_already_finished_by_status(): logging.info( "Check is already finished according to github status, exiting" @@ -347,7 +349,7 @@ def filter_artifacts(path: str) -> bool: description = format_description(description) - post_commit_status(gh, pr_info.sha, args.check_name, description, state, report_url) + post_commit_status(commit, state, report_url, description, args.check_name, pr_info) prepared_events = prepare_tests_results_for_clickhouse( pr_info, diff --git a/tests/ci/integration_test_check.py b/tests/ci/integration_test_check.py index f864751e8304..8ef6244a1c55 100644 --- a/tests/ci/integration_test_check.py +++ b/tests/ci/integration_test_check.py @@ -19,8 +19,10 @@ prepare_tests_results_for_clickhouse, ) from commit_status_helper import ( - post_commit_status, + RerunHelper, + get_commit, override_status, + post_commit_status, post_commit_status_to_file, ) from docker_pull_helper import get_images_with_versions @@ -29,7 +31,6 @@ from get_robot_token import get_best_robot_token from pr_info import PRInfo from report import TestResults, read_test_results -from rerun_helper import RerunHelper from s3_helper import S3Helper from stopwatch import Stopwatch from tee_popen import TeePopen @@ -198,8 +199,9 @@ def main(): sys.exit(0) gh = Github(get_best_robot_token(), per_page=100) + commit = get_commit(gh, pr_info.sha) - rerun_helper = RerunHelper(gh, pr_info, check_name_with_group) + rerun_helper = RerunHelper(commit, check_name_with_group) if rerun_helper.is_already_finished_by_status(): logging.info("Check is already finished according to github status, exiting") sys.exit(0) @@ -284,15 +286,10 @@ def main(): print(f"::notice:: {check_name} Report url: {report_url}") if args.post_commit_status == "commit_status": post_commit_status( - gh, pr_info.sha, check_name_with_group, description, state, report_url + commit, state, report_url, description, check_name_with_group, pr_info ) elif args.post_commit_status == "file": - post_commit_status_to_file( - post_commit_path, - description, - state, - report_url, - ) + post_commit_status_to_file(post_commit_path, description, state, report_url) else: raise Exception( f'Unknown post_commit_status option "{args.post_commit_status}"' diff --git a/tests/ci/jepsen_check.py b/tests/ci/jepsen_check.py index ffa9e45373f4..9d35d2d6e352 100644 --- a/tests/ci/jepsen_check.py +++ b/tests/ci/jepsen_check.py @@ -13,13 +13,12 @@ from build_download_helper import get_build_name_for_check from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse -from commit_status_helper import post_commit_status +from commit_status_helper import RerunHelper, get_commit, post_commit_status from compress_files import compress_fast from env_helper import REPO_COPY, TEMP_PATH, S3_BUILDS_BUCKET, S3_DOWNLOAD from get_robot_token import get_best_robot_token, get_parameter_from_ssm from pr_info import PRInfo from report import TestResults, TestResult -from rerun_helper import RerunHelper from s3_helper import S3Helper from ssh import SSHKey from stopwatch import Stopwatch @@ -181,10 +180,11 @@ def get_run_command( sys.exit(0) gh = Github(get_best_robot_token(), per_page=100) + commit = get_commit(gh, pr_info.sha) check_name = KEEPER_CHECK_NAME if args.program == "keeper" else SERVER_CHECK_NAME - rerun_helper = RerunHelper(gh, pr_info, check_name) + rerun_helper = RerunHelper(commit, check_name) if rerun_helper.is_already_finished_by_status(): logging.info("Check is already finished according to github status, exiting") sys.exit(0) @@ -293,7 +293,7 @@ def get_run_command( ) print(f"::notice ::Report url: {report_url}") - post_commit_status(gh, pr_info.sha, check_name, description, status, report_url) + post_commit_status(commit, status, report_url, description, check_name, pr_info) ch_helper = ClickHouseHelper() prepared_events = prepare_tests_results_for_clickhouse( diff --git a/tests/ci/mark_release_ready.py b/tests/ci/mark_release_ready.py index b103dd053bbc..4501d40e4d32 100755 --- a/tests/ci/mark_release_ready.py +++ b/tests/ci/mark_release_ready.py @@ -4,7 +4,7 @@ import logging import os -from commit_status_helper import get_commit +from commit_status_helper import NotSet, get_commit, post_commit_status from env_helper import GITHUB_JOB_URL from get_robot_token import get_best_robot_token from github_helper import GitHub @@ -34,6 +34,7 @@ def main(): args = parser.parse_args() url = "" description = "the release can be created from the commit, manually set" + pr_info = None if not args.commit: pr_info = PRInfo() if pr_info.event == pr_info.default_event: @@ -45,14 +46,10 @@ def main(): gh = GitHub(args.token, create_cache_dir=False) # Get the rate limits for a quick fail - gh.get_rate_limit() commit = get_commit(gh, args.commit) - - commit.create_status( - context=RELEASE_READY_STATUS, - description=description, - state="success", - target_url=url, + gh.get_rate_limit() + post_commit_status( + commit, "success", url or NotSet, description, RELEASE_READY_STATUS, pr_info ) diff --git a/tests/ci/performance_comparison_check.py b/tests/ci/performance_comparison_check.py index 0da41e0ae82f..bf5704f31bd2 100644 --- a/tests/ci/performance_comparison_check.py +++ b/tests/ci/performance_comparison_check.py @@ -12,13 +12,12 @@ from github import Github -from commit_status_helper import get_commit, post_commit_status +from commit_status_helper import RerunHelper, get_commit, post_commit_status from ci_config import CI_CONFIG from docker_pull_helper import get_image_with_version from env_helper import GITHUB_EVENT_PATH, GITHUB_RUN_URL, S3_BUILDS_BUCKET, S3_DOWNLOAD from get_robot_token import get_best_robot_token, get_parameter_from_ssm from pr_info import PRInfo -from rerun_helper import RerunHelper from s3_helper import S3Helper from tee_popen import TeePopen @@ -118,7 +117,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): message = "Skipped, not labeled with 'pr-performance'" report_url = GITHUB_RUN_URL post_commit_status( - gh, pr_info.sha, check_name_with_group, message, status, report_url + commit, status, report_url, message, check_name_with_group, pr_info ) sys.exit(0) @@ -131,7 +130,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): "Fill fliter our performance tests by grep -v %s", test_grep_exclude_filter ) - rerun_helper = RerunHelper(gh, pr_info, check_name_with_group) + rerun_helper = RerunHelper(commit, check_name_with_group) if rerun_helper.is_already_finished_by_status(): logging.info("Check is already finished according to github status, exiting") sys.exit(0) @@ -267,7 +266,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): report_url = uploaded["report.html"] post_commit_status( - gh, pr_info.sha, check_name_with_group, message, status, report_url + commit, status, report_url, message, check_name_with_group, pr_info ) if status == "error": diff --git a/tests/ci/pr_info.py b/tests/ci/pr_info.py index ddeb070b2b94..86d4985c6b27 100644 --- a/tests/ci/pr_info.py +++ b/tests/ci/pr_info.py @@ -6,7 +6,7 @@ from unidiff import PatchSet # type: ignore -from build_download_helper import get_with_retries +from build_download_helper import get_gh_api from env_helper import ( GITHUB_REPOSITORY, GITHUB_SERVER_URL, @@ -45,7 +45,7 @@ def get_pr_for_commit(sha, ref): f"https://api.github.com/repos/{GITHUB_REPOSITORY}/commits/{sha}/pulls" ) try: - response = get_with_retries(try_get_pr_url, sleep=RETRY_SLEEP) + response = get_gh_api(try_get_pr_url, sleep=RETRY_SLEEP) data = response.json() our_prs = [] # type: List[Dict] if len(data) > 1: @@ -105,7 +105,7 @@ def __init__( # workflow completed event, used for PRs only if "action" in github_event and github_event["action"] == "completed": self.sha = github_event["workflow_run"]["head_sha"] - prs_for_sha = get_with_retries( + prs_for_sha = get_gh_api( f"https://api.github.com/repos/{GITHUB_REPOSITORY}/commits/{self.sha}" "/pulls", sleep=RETRY_SLEEP, @@ -117,7 +117,7 @@ def __init__( self.number = github_event["pull_request"]["number"] if pr_event_from_api: try: - response = get_with_retries( + response = get_gh_api( f"https://api.github.com/repos/{GITHUB_REPOSITORY}" f"/pulls/{self.number}", sleep=RETRY_SLEEP, @@ -159,7 +159,7 @@ def __init__( self.user_login = github_event["pull_request"]["user"]["login"] self.user_orgs = set([]) if need_orgs: - user_orgs_response = get_with_retries( + user_orgs_response = get_gh_api( github_event["pull_request"]["user"]["organizations_url"], sleep=RETRY_SLEEP, ) @@ -255,7 +255,7 @@ def fetch_changed_files(self): raise TypeError("The event does not have diff URLs") for diff_url in self.diff_urls: - response = get_with_retries( + response = get_gh_api( diff_url, sleep=RETRY_SLEEP, ) diff --git a/tests/ci/report.py b/tests/ci/report.py index a40eb559792d..cdef8409e7ea 100644 --- a/tests/ci/report.py +++ b/tests/ci/report.py @@ -370,6 +370,7 @@ def create_test_html_report( colspan += 1 if test_result.log_urls is not None: + has_log_urls = True test_logs_html = "
".join( [_get_html_url(url) for url in test_result.log_urls] ) diff --git a/tests/ci/rerun_helper.py b/tests/ci/rerun_helper.py deleted file mode 100644 index fa73256d759f..000000000000 --- a/tests/ci/rerun_helper.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python3 -from typing import Optional - -from commit_status_helper import get_commit, get_commit_filtered_statuses -from github import Github -from github.CommitStatus import CommitStatus -from pr_info import PRInfo - - -# TODO: move it to commit_status_helper -class RerunHelper: - def __init__(self, gh: Github, pr_info: PRInfo, check_name: str): - self.gh = gh - self.pr_info = pr_info - self.check_name = check_name - commit = get_commit(gh, self.pr_info.sha) - if commit is None: - raise ValueError(f"unable to receive commit for {pr_info.sha}") - self.pygh_commit = commit - self.statuses = get_commit_filtered_statuses(commit) - - def is_already_finished_by_status(self) -> bool: - # currently we agree even for failed statuses - for status in self.statuses: - if self.check_name in status.context and status.state in ( - "success", - "failure", - ): - return True - return False - - def get_finished_status(self) -> Optional[CommitStatus]: - for status in self.statuses: - if self.check_name in status.context: - return status - return None diff --git a/tests/ci/run_check.py b/tests/ci/run_check.py index 44e1e4132c82..351e740bd3cc 100644 --- a/tests/ci/run_check.py +++ b/tests/ci/run_check.py @@ -7,20 +7,22 @@ from github import Github from commit_status_helper import ( + CI_STATUS_NAME, + NotSet, + create_ci_report, format_description, get_commit, + post_commit_status, post_labels, remove_labels, set_mergeable_check, ) from docs_check import NAME as DOCS_NAME -from env_helper import GITHUB_RUN_URL, GITHUB_REPOSITORY, GITHUB_SERVER_URL +from env_helper import GITHUB_REPOSITORY, GITHUB_SERVER_URL from get_robot_token import get_best_robot_token from pr_info import FORCE_TESTS_LABEL, PRInfo from workflow_approve_rerun_lambda.app import TRUSTED_CONTRIBUTORS -NAME = "Run Check" - TRUSTED_ORG_IDS = { 54801242, # clickhouse } @@ -89,7 +91,7 @@ def pr_is_by_trusted_user(pr_user_login, pr_user_orgs): # Returns whether we should look into individual checks for this PR. If not, it # can be skipped entirely. # Returns can_run, description, labels_state -def should_run_checks_for_pr(pr_info: PRInfo) -> Tuple[bool, str, str]: +def should_run_ci_for_pr(pr_info: PRInfo) -> Tuple[bool, str, str]: # Consider the labels and whether the user is trusted. print("Got labels", pr_info.labels) if FORCE_TESTS_LABEL in pr_info.labels: @@ -203,7 +205,7 @@ def check_pr_description(pr_info: PRInfo) -> Tuple[str, str]: return description_error, category -if __name__ == "__main__": +def main(): logging.basicConfig(level=logging.INFO) pr_info = PRInfo(need_orgs=True, pr_event_from_api=True, need_changed_files=True) @@ -213,7 +215,7 @@ def check_pr_description(pr_info: PRInfo) -> Tuple[str, str]: print("::notice ::Cannot run, no PR exists for the commit") sys.exit(1) - can_run, description, labels_state = should_run_checks_for_pr(pr_info) + can_run, description, labels_state = should_run_ci_for_pr(pr_info) if can_run and OK_SKIP_LABELS.intersection(pr_info.labels): print("::notice :: Early finish the check, running in a special PR") sys.exit(0) @@ -253,10 +255,12 @@ def check_pr_description(pr_info: PRInfo) -> Tuple[str, str]: if FEATURE_LABEL in pr_info.labels: print(f"The '{FEATURE_LABEL}' in the labels, expect the 'Docs Check' status") - commit.create_status( - context=DOCS_NAME, - description=f"expect adding docs for {FEATURE_LABEL}", - state="pending", + post_commit_status( # do not pass pr_info here intentionally + commit, + "pending", + NotSet, + f"expect adding docs for {FEATURE_LABEL}", + DOCS_NAME, ) else: set_mergeable_check(commit, "skipped") @@ -267,7 +271,7 @@ def check_pr_description(pr_info: PRInfo) -> Tuple[str, str]: f"{description_error}" ) logging.info( - "PR body doesn't match the template: (start)\n%s\n(end)\n" "Reason: %s", + "PR body doesn't match the template: (start)\n%s\n(end)\nReason: %s", pr_info.body, description_error, ) @@ -275,23 +279,29 @@ def check_pr_description(pr_info: PRInfo) -> Tuple[str, str]: f"{GITHUB_SERVER_URL}/{GITHUB_REPOSITORY}/" "blob/master/.github/PULL_REQUEST_TEMPLATE.md?plain=1" ) - commit.create_status( - context=NAME, - description=format_description(description_error), - state="failure", - target_url=url, + post_commit_status( + commit, + "failure", + url, + format_description(description_error), + CI_STATUS_NAME, + pr_info, ) sys.exit(1) - url = GITHUB_RUN_URL + ci_report_url = create_ci_report(pr_info, []) if not can_run: print("::notice ::Cannot run") - commit.create_status( - context=NAME, description=description, state=labels_state, target_url=url + post_commit_status( + commit, labels_state, ci_report_url, description, CI_STATUS_NAME, pr_info ) sys.exit(1) else: print("::notice ::Can run") - commit.create_status( - context=NAME, description=description, state="pending", target_url=url + post_commit_status( + commit, "pending", ci_report_url, description, CI_STATUS_NAME, pr_info ) + + +if __name__ == "__main__": + main() diff --git a/tests/ci/s3_helper.py b/tests/ci/s3_helper.py index fbe9f33b49bb..2af02d572c89 100644 --- a/tests/ci/s3_helper.py +++ b/tests/ci/s3_helper.py @@ -40,11 +40,11 @@ def _flatten_list(lst): class S3Helper: - def __init__(self, host=S3_URL, download_host=S3_DOWNLOAD): + def __init__(self): self.session = boto3.session.Session(region_name="us-east-1") - self.client = self.session.client("s3", endpoint_url=host) - self.host = host - self.download_host = download_host + self.client = self.session.client("s3", endpoint_url=S3_URL) + self.host = S3_URL + self.download_host = S3_DOWNLOAD def _upload_file_to_s3(self, bucket_name: str, file_path: str, s3_path: str) -> str: logging.debug( diff --git a/tests/ci/sqlancer_check.py b/tests/ci/sqlancer_check.py index 1a6c4d146162..144dea54133f 100644 --- a/tests/ci/sqlancer_check.py +++ b/tests/ci/sqlancer_check.py @@ -10,10 +10,14 @@ from build_download_helper import get_build_name_for_check, read_build_urls from clickhouse_helper import ClickHouseHelper, prepare_tests_results_for_clickhouse -from commit_status_helper import format_description, post_commit_status +from commit_status_helper import ( + RerunHelper, + format_description, + get_commit, + post_commit_status, +) from docker_pull_helper import get_image_with_version from env_helper import ( - GITHUB_REPOSITORY, GITHUB_RUN_URL, REPORTS_PATH, TEMP_PATH, @@ -21,7 +25,6 @@ from get_robot_token import get_best_robot_token from pr_info import PRInfo from report import TestResults, TestResult -from rerun_helper import RerunHelper from s3_helper import S3Helper from stopwatch import Stopwatch from upload_result_helper import upload_results @@ -46,12 +49,6 @@ def get_run_command(download_url, workspace_path, image): ) -def get_commit(gh, commit_sha): - repo = gh.get_repo(GITHUB_REPOSITORY) - commit = repo.get_commit(commit_sha) - return commit - - def main(): logging.basicConfig(level=logging.INFO) @@ -68,8 +65,9 @@ def main(): pr_info = PRInfo() gh = Github(get_best_robot_token(), per_page=100) + commit = get_commit(gh, pr_info.sha) - rerun_helper = RerunHelper(gh, pr_info, check_name) + rerun_helper = RerunHelper(commit, check_name) if rerun_helper.is_already_finished_by_status(): logging.info("Check is already finished according to github status, exiting") sys.exit(0) @@ -187,12 +185,10 @@ def main(): check_name, ) - post_commit_status(gh, pr_info.sha, check_name, description, status, report_url) - + post_commit_status(commit, status, report_url, description, check_name, pr_info) print(f"::notice:: {check_name} Report url: {report_url}") ch_helper = ClickHouseHelper() - prepared_events = prepare_tests_results_for_clickhouse( pr_info, test_results, @@ -202,12 +198,8 @@ def main(): report_url, check_name, ) - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) - print(f"::notice Result: '{status}', '{description}', '{report_url}'") - post_commit_status(gh, pr_info.sha, check_name, description, status, report_url) - if __name__ == "__main__": main() diff --git a/tests/ci/sqllogic_test.py b/tests/ci/sqllogic_test.py index 9b41ff4680f6..942c9c60ee8c 100755 --- a/tests/ci/sqllogic_test.py +++ b/tests/ci/sqllogic_test.py @@ -17,11 +17,15 @@ from build_download_helper import download_all_deb_packages from upload_result_helper import upload_results from docker_pull_helper import get_image_with_version -from commit_status_helper import override_status, post_commit_status +from commit_status_helper import ( + RerunHelper, + get_commit, + override_status, + post_commit_status, +) from report import TestResults, read_test_results from stopwatch import Stopwatch -from rerun_helper import RerunHelper from tee_popen import TeePopen @@ -103,8 +107,9 @@ def parse_args(): pr_info = PRInfo() gh = Github(get_best_robot_token(), per_page=100) + commit = get_commit(gh, pr_info.sha) - rerun_helper = RerunHelper(gh, pr_info, check_name) + rerun_helper = RerunHelper(commit, check_name) if rerun_helper.is_already_finished_by_status(): logging.info("Check is already finished according to github status, exiting") sys.exit(0) @@ -203,7 +208,7 @@ def parse_args(): # Until it pass all tests, do not block CI, report "success" assert description is not None - post_commit_status(gh, pr_info.sha, check_name, description, "success", report_url) + post_commit_status(commit, "success", report_url, description, check_name, pr_info) if status != "success": if FORCE_TESTS_LABEL in pr_info.labels: diff --git a/tests/ci/stress_check.py b/tests/ci/stress_check.py index 7596a81ebc9c..ac280916a2f7 100644 --- a/tests/ci/stress_check.py +++ b/tests/ci/stress_check.py @@ -16,13 +16,12 @@ mark_flaky_tests, prepare_tests_results_for_clickhouse, ) -from commit_status_helper import post_commit_status +from commit_status_helper import RerunHelper, get_commit, post_commit_status from docker_pull_helper import get_image_with_version from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH from get_robot_token import get_best_robot_token from pr_info import PRInfo from report import TestResults, read_test_results -from rerun_helper import RerunHelper from s3_helper import S3Helper from stopwatch import Stopwatch from tee_popen import TeePopen @@ -125,8 +124,9 @@ def run_stress_test(docker_image_name): pr_info = PRInfo() gh = Github(get_best_robot_token(), per_page=100) + commit = get_commit(gh, pr_info.sha) - rerun_helper = RerunHelper(gh, pr_info, check_name) + rerun_helper = RerunHelper(commit, check_name) if rerun_helper.is_already_finished_by_status(): logging.info("Check is already finished according to github status, exiting") sys.exit(0) @@ -180,7 +180,7 @@ def run_stress_test(docker_image_name): ) print(f"::notice ::Report url: {report_url}") - post_commit_status(gh, pr_info.sha, check_name, description, state, report_url) + post_commit_status(commit, state, report_url, description, check_name, pr_info) prepared_events = prepare_tests_results_for_clickhouse( pr_info, diff --git a/tests/ci/style_check.py b/tests/ci/style_check.py index 89878990c2c3..33a5cd21f39b 100644 --- a/tests/ci/style_check.py +++ b/tests/ci/style_check.py @@ -15,7 +15,12 @@ mark_flaky_tests, prepare_tests_results_for_clickhouse, ) -from commit_status_helper import post_commit_status, update_mergeable_check +from commit_status_helper import ( + RerunHelper, + get_commit, + post_commit_status, + update_mergeable_check, +) from docker_pull_helper import get_image_with_version from env_helper import GITHUB_WORKSPACE, RUNNER_TEMP from get_robot_token import get_best_robot_token @@ -23,7 +28,6 @@ from git_helper import git_runner from pr_info import PRInfo from report import TestResults, read_test_results -from rerun_helper import RerunHelper from s3_helper import S3Helper from ssh import SSHKey from stopwatch import Stopwatch @@ -149,10 +153,11 @@ def main(): checkout_head(pr_info) gh = GitHub(get_best_robot_token(), create_cache_dir=False) + commit = get_commit(gh, pr_info.sha) atexit.register(update_mergeable_check, gh, pr_info, NAME) - rerun_helper = RerunHelper(gh, pr_info, NAME) + rerun_helper = RerunHelper(commit, NAME) if rerun_helper.is_already_finished_by_status(): logging.info("Check is already finished according to github status, exiting") # Finish with the same code as previous @@ -190,7 +195,7 @@ def main(): s3_helper, pr_info.number, pr_info.sha, test_results, additional_files, NAME ) print(f"::notice ::Report url: {report_url}") - post_commit_status(gh, pr_info.sha, NAME, description, state, report_url) + post_commit_status(commit, state, report_url, description, NAME, pr_info) prepared_events = prepare_tests_results_for_clickhouse( pr_info, diff --git a/tests/ci/unit_tests_check.py b/tests/ci/unit_tests_check.py index edc096908f41..5279ccde492e 100644 --- a/tests/ci/unit_tests_check.py +++ b/tests/ci/unit_tests_check.py @@ -15,13 +15,17 @@ mark_flaky_tests, prepare_tests_results_for_clickhouse, ) -from commit_status_helper import post_commit_status, update_mergeable_check +from commit_status_helper import ( + RerunHelper, + get_commit, + post_commit_status, + update_mergeable_check, +) from docker_pull_helper import get_image_with_version from env_helper import TEMP_PATH, REPORTS_PATH from get_robot_token import get_best_robot_token from pr_info import PRInfo from report import TestResults, TestResult -from rerun_helper import RerunHelper from s3_helper import S3Helper from stopwatch import Stopwatch from tee_popen import TeePopen @@ -116,10 +120,11 @@ def main(): pr_info = PRInfo() gh = Github(get_best_robot_token(), per_page=100) + commit = get_commit(gh, pr_info.sha) atexit.register(update_mergeable_check, gh, pr_info, check_name) - rerun_helper = RerunHelper(gh, pr_info, check_name) + rerun_helper = RerunHelper(commit, check_name) if rerun_helper.is_already_finished_by_status(): logging.info("Check is already finished according to github status, exiting") sys.exit(0) @@ -165,7 +170,7 @@ def main(): check_name, ) print(f"::notice ::Report url: {report_url}") - post_commit_status(gh, pr_info.sha, check_name, description, state, report_url) + post_commit_status(commit, state, report_url, description, check_name, pr_info) prepared_events = prepare_tests_results_for_clickhouse( pr_info, diff --git a/tests/clickhouse-test b/tests/clickhouse-test index 7c492a9b4673..acc8688cc4a1 100755 --- a/tests/clickhouse-test +++ b/tests/clickhouse-test @@ -11,6 +11,7 @@ import shutil import sys import os import os.path +import platform import signal import re import copy @@ -542,7 +543,10 @@ class SettingsRandomizer: 0.2, 0.5, 1, 10 * 1024 * 1024 * 1024 ), "local_filesystem_read_method": lambda: random.choice( + # Allow to use uring only when running on Linux ["read", "pread", "mmap", "pread_threadpool", "io_uring"] + if platform.system().lower() == "linux" + else ["read", "pread", "mmap", "pread_threadpool"] ), "remote_filesystem_read_method": lambda: random.choice(["read", "threadpool"]), "local_filesystem_read_prefetch": lambda: random.randint(0, 1), @@ -2113,7 +2117,14 @@ def reportLogStats(args): 'Column ''{}'' already exists', 'No macro {} in config', 'Invalid origin H3 index: {}', 'Invalid session timeout: ''{}''', 'Tuple cannot be empty', 'Database name is empty', 'Table {} is not a Dictionary', 'Expected function, got: {}', 'Unknown identifier: ''{}''', - 'Failed to {} input ''{}''', '{}.{} is not a VIEW', 'Cannot convert NULL to {}', 'Dictionary {} doesn''t exist' + 'Failed to {} input ''{}''', '{}.{} is not a VIEW', 'Cannot convert NULL to {}', 'Dictionary {} doesn''t exist', + 'Write file: {}', 'Unable to parse JSONPath', 'Host is empty in S3 URI.', 'Expected end of line', + 'inflate failed: {}{}', 'Center is not valid', 'Column ''{}'' is ambiguous', 'Cannot parse object', 'Invalid date: {}', + 'There is no cache by name: {}', 'No part {} in table', '`{}` should be a String', 'There are duplicate id {}', + 'Invalid replica name: {}', 'Unexpected value {} in enum', 'Unknown BSON type: {}', 'Point is not valid', + 'Invalid qualified name: {}', 'INTO OUTFILE is not allowed', 'Arguments must not be NaN', 'Cell is not valid', + 'brotli decode error{}', 'Invalid H3 index: {}', 'Too large node state size', 'No additional keys found.', + 'Attempt to read after EOF.', 'Replication was stopped', '{} building file infos', 'Cannot parse uuid {}' ) AS known_short_messages SELECT count() AS c, message_format_string, substr(any(message), 1, 120) FROM system.text_log @@ -2252,7 +2263,7 @@ def main(args): "\nFound hung queries in processlist:", args, "red", attrs=["bold"] ) ) - print(json.dumps(processlist, indent=4)) + print(processlist) print(get_transactions_list(args)) print_stacktraces() diff --git a/tests/config/config.d/merge_tree_old_dirs_cleanup.xml b/tests/config/config.d/merge_tree_old_dirs_cleanup.xml index 41932cb6d61d..2b8ea63b63de 100644 --- a/tests/config/config.d/merge_tree_old_dirs_cleanup.xml +++ b/tests/config/config.d/merge_tree_old_dirs_cleanup.xml @@ -3,6 +3,6 @@ 1 - 10 + 5 diff --git a/tests/config/config.d/storage_conf.xml b/tests/config/config.d/storage_conf.xml index bc9269e6ec1c..cb5a75f96989 100644 --- a/tests/config/config.d/storage_conf.xml +++ b/tests/config/config.d/storage_conf.xml @@ -55,52 +55,58 @@ cache s3_disk s3_cache/ - 2147483648 + 128Mi 1 0 + 100 cache s3_disk_2 s3_cache_2/ - 2Gi + 128Mi 0 100Mi + 100 cache s3_disk_3 s3_disk_3_cache/ - 22548578304 + 128Mi 22548578304 1 1 0 + 100 cache s3_disk_4 s3_cache_4/ - 22548578304 + 128Mi 1 1 0 + 100 cache s3_disk_5 s3_cache_5/ - 22548578304 + 128Mi 0 + 100 cache s3_disk_6 s3_cache_6/ - 22548578304 + 128Mi 0 1 100 + 100 cache @@ -108,15 +114,17 @@ s3_cache_small/ 1000 1 + 100 cache s3_disk_6 s3_cache_small_segment_size/ - 22548578304 + 128Mi 10Ki 0 1 + 100 @@ -139,6 +147,7 @@ 22548578304 1 0 + 100 cache @@ -146,6 +155,7 @@ local_cache_2/ 22548578304 0 + 100 cache @@ -155,6 +165,7 @@ 1 1 0 + 100 @@ -163,6 +174,7 @@ s3_cache_multi/ 22548578304 0 + 100 cache @@ -170,6 +182,7 @@ s3_cache_multi_2/ 22548578304 0 + 100 diff --git a/tests/integration/helpers/external_sources.py b/tests/integration/helpers/external_sources.py index fd086fc45266..afb91083d57b 100644 --- a/tests/integration/helpers/external_sources.py +++ b/tests/integration/helpers/external_sources.py @@ -161,6 +161,29 @@ def load_data(self, data, table_name): class SourceMongo(ExternalSource): + def __init__( + self, + name, + internal_hostname, + internal_port, + docker_hostname, + docker_port, + user, + password, + secure=False, + ): + ExternalSource.__init__( + self, + name, + internal_hostname, + internal_port, + docker_hostname, + docker_port, + user, + password, + ) + self.secure = secure + def get_source_str(self, table_name): return """ @@ -170,6 +193,7 @@ def get_source_str(self, table_name): {password} test {tbl} + {options} """.format( host=self.docker_hostname, @@ -177,6 +201,7 @@ def get_source_str(self, table_name): user=self.user, password=self.password, tbl=table_name, + options="ssl=true" if self.secure else "", ) def prepare(self, structure, table_name, cluster): @@ -186,6 +211,8 @@ def prepare(self, structure, table_name, cluster): user=self.user, password=self.password, ) + if self.secure: + connection_str += "/?tls=true&tlsAllowInvalidCertificates=true" self.connection = pymongo.MongoClient(connection_str) self.converters = {} for field in structure.get_all_fields(): @@ -228,7 +255,7 @@ def compatible_with_layout(self, layout): def get_source_str(self, table_name): return """ - mongodb://{user}:{password}@{host}:{port}/test + mongodb://{user}:{password}@{host}:{port}/test{options} {tbl} """.format( @@ -237,6 +264,7 @@ def get_source_str(self, table_name): user=self.user, password=self.password, tbl=table_name, + options="?ssl=true" if self.secure else "", ) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/configs/ssl_verification.xml b/tests/integration/test_dictionaries_all_layouts_separate_sources/configs/ssl_verification.xml new file mode 100644 index 000000000000..3efe98e70450 --- /dev/null +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/configs/ssl_verification.xml @@ -0,0 +1,8 @@ + + + + + none + + + diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo.py index 55639877ba0b..973dbfc04299 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo.py @@ -17,15 +17,19 @@ test_name = "mongo" -def setup_module(module): - global cluster - global node - global simple_tester - global complex_tester - global ranged_tester - - cluster = ClickHouseCluster(__file__) - SOURCE = SourceMongo( +@pytest.fixture(scope="module") +def secure_connection(request): + return request.param + + +@pytest.fixture(scope="module") +def cluster(secure_connection): + return ClickHouseCluster(__file__) + + +@pytest.fixture(scope="module") +def source(secure_connection, cluster): + return SourceMongo( "MongoDB", "localhost", cluster.mongo_port, @@ -33,35 +37,71 @@ def setup_module(module): "27017", "root", "clickhouse", + secure=secure_connection, ) - simple_tester = SimpleLayoutTester(test_name) - simple_tester.cleanup() - simple_tester.create_dictionaries(SOURCE) - complex_tester = ComplexLayoutTester(test_name) - complex_tester.create_dictionaries(SOURCE) +@pytest.fixture(scope="module") +def simple_tester(source): + tester = SimpleLayoutTester(test_name) + tester.cleanup() + tester.create_dictionaries(source) + return tester - ranged_tester = RangedLayoutTester(test_name) - ranged_tester.create_dictionaries(SOURCE) - # Since that all .xml configs were created - main_configs = [] - main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) +@pytest.fixture(scope="module") +def complex_tester(source): + tester = ComplexLayoutTester(test_name) + tester.create_dictionaries(source) + return tester - dictionaries = simple_tester.list_dictionaries() - node = cluster.add_instance( - "node", main_configs=main_configs, dictionaries=dictionaries, with_mongo=True - ) +@pytest.fixture(scope="module") +def ranged_tester(source): + tester = RangedLayoutTester(test_name) + tester.create_dictionaries(source) + return tester -def teardown_module(module): - simple_tester.cleanup() +@pytest.fixture(scope="module") +def main_config(secure_connection): + main_config = [] + if secure_connection: + main_config.append(os.path.join("configs", "disable_ssl_verification.xml")) + else: + main_config.append(os.path.join("configs", "ssl_verification.xml")) + return main_config @pytest.fixture(scope="module") -def started_cluster(): +def started_cluster( + secure_connection, + cluster, + main_config, + simple_tester, + ranged_tester, + complex_tester, +): + SOURCE = SourceMongo( + "MongoDB", + "localhost", + cluster.mongo_port, + cluster.mongo_host, + "27017", + "root", + "clickhouse", + secure=secure_connection, + ) + dictionaries = simple_tester.list_dictionaries() + + node = cluster.add_instance( + "node", + main_configs=main_config, + dictionaries=dictionaries, + with_mongo=True, + with_mongo_secure=secure_connection, + ) + try: cluster.start() @@ -75,16 +115,25 @@ def started_cluster(): cluster.shutdown() +@pytest.mark.parametrize("secure_connection", [False], indirect=["secure_connection"]) @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_SIMPLE)) -def test_simple(started_cluster, layout_name): - simple_tester.execute(layout_name, node) +def test_simple(secure_connection, started_cluster, layout_name, simple_tester): + simple_tester.execute(layout_name, started_cluster.instances["node"]) +@pytest.mark.parametrize("secure_connection", [False], indirect=["secure_connection"]) @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_COMPLEX)) -def test_complex(started_cluster, layout_name): - complex_tester.execute(layout_name, node) +def test_complex(secure_connection, started_cluster, layout_name, complex_tester): + complex_tester.execute(layout_name, started_cluster.instances["node"]) +@pytest.mark.parametrize("secure_connection", [False], indirect=["secure_connection"]) @pytest.mark.parametrize("layout_name", sorted(LAYOUTS_RANGED)) -def test_ranged(started_cluster, layout_name): - ranged_tester.execute(layout_name, node) +def test_ranged(secure_connection, started_cluster, layout_name, ranged_tester): + ranged_tester.execute(layout_name, started_cluster.instances["node"]) + + +@pytest.mark.parametrize("secure_connection", [True], indirect=["secure_connection"]) +@pytest.mark.parametrize("layout_name", sorted(LAYOUTS_SIMPLE)) +def test_simple_ssl(secure_connection, started_cluster, layout_name, simple_tester): + simple_tester.execute(layout_name, started_cluster.instances["node"]) diff --git a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo_uri.py b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo_uri.py index 84c547b7a6b3..225414322591 100644 --- a/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo_uri.py +++ b/tests/integration/test_dictionaries_all_layouts_separate_sources/test_mongo_uri.py @@ -8,25 +8,22 @@ from helpers.dictionary import Field, Row, Dictionary, DictionaryStructure, Layout from helpers.external_sources import SourceMongoURI -SOURCE = None -cluster = None -node = None -simple_tester = None -complex_tester = None -ranged_tester = None test_name = "mongo_uri" -def setup_module(module): - global cluster - global node - global simple_tester - global complex_tester - global ranged_tester +@pytest.fixture(scope="module") +def secure_connection(request): + return request.param + - cluster = ClickHouseCluster(__file__) +@pytest.fixture(scope="module") +def cluster(secure_connection): + return ClickHouseCluster(__file__) - SOURCE = SourceMongoURI( + +@pytest.fixture(scope="module") +def source(secure_connection, cluster): + return SourceMongoURI( "MongoDB", "localhost", cluster.mongo_port, @@ -34,52 +31,55 @@ def setup_module(module): "27017", "root", "clickhouse", + secure=secure_connection, ) - simple_tester = SimpleLayoutTester(test_name) - simple_tester.cleanup() - simple_tester.create_dictionaries(SOURCE) - complex_tester = ComplexLayoutTester(test_name) - complex_tester.create_dictionaries(SOURCE) +@pytest.fixture(scope="module") +def simple_tester(source): + tester = SimpleLayoutTester(test_name) + tester.cleanup() + tester.create_dictionaries(source) + return tester + - ranged_tester = RangedLayoutTester(test_name) - ranged_tester.create_dictionaries(SOURCE) - # Since that all .xml configs were created +@pytest.fixture(scope="module") +def main_config(secure_connection): + main_config = [] + if secure_connection: + main_config.append(os.path.join("configs", "disable_ssl_verification.xml")) + else: + main_config.append(os.path.join("configs", "ssl_verification.xml")) + return main_config - main_configs = [] - main_configs.append(os.path.join("configs", "disable_ssl_verification.xml")) +@pytest.fixture(scope="module") +def started_cluster(secure_connection, cluster, main_config, simple_tester): dictionaries = simple_tester.list_dictionaries() node = cluster.add_instance( "uri_node", - main_configs=main_configs, + main_configs=main_config, dictionaries=dictionaries, with_mongo=True, + with_mongo_secure=secure_connection, ) - - -def teardown_module(module): - simple_tester.cleanup() - - -@pytest.fixture(scope="module") -def started_cluster(): try: cluster.start() - simple_tester.prepare(cluster) - complex_tester.prepare(cluster) - ranged_tester.prepare(cluster) - yield cluster - finally: cluster.shutdown() # See comment in SourceMongoURI +@pytest.mark.parametrize("secure_connection", [False], indirect=["secure_connection"]) +@pytest.mark.parametrize("layout_name", ["flat"]) +def test_simple(secure_connection, started_cluster, simple_tester, layout_name): + simple_tester.execute(layout_name, started_cluster.instances["uri_node"]) + + +@pytest.mark.parametrize("secure_connection", [True], indirect=["secure_connection"]) @pytest.mark.parametrize("layout_name", ["flat"]) -def test_simple(started_cluster, layout_name): - simple_tester.execute(layout_name, node) +def test_simple_ssl(secure_connection, started_cluster, simple_tester, layout_name): + simple_tester.execute(layout_name, started_cluster.instances["uri_node"]) diff --git a/tests/integration/test_mask_sensitive_info/test.py b/tests/integration/test_mask_sensitive_info/test.py index 92232f7e6a8b..69144c0eb074 100644 --- a/tests/integration/test_mask_sensitive_info/test.py +++ b/tests/integration/test_mask_sensitive_info/test.py @@ -95,14 +95,14 @@ def test_create_alter_user(): check_logs( must_contain=[ - "CREATE USER u1 IDENTIFIED WITH sha256_password", - "ALTER USER u1 IDENTIFIED WITH sha256_password", + "CREATE USER u1 IDENTIFIED", + "ALTER USER u1 IDENTIFIED", "CREATE USER u2 IDENTIFIED WITH plaintext_password", ], must_not_contain=[ password, - "IDENTIFIED WITH sha256_password BY", - "IDENTIFIED WITH sha256_hash BY", + "IDENTIFIED BY", + "IDENTIFIED BY", "IDENTIFIED WITH plaintext_password BY", ], ) diff --git a/tests/integration/test_max_rows_to_read_leaf_with_view/__init__.py b/tests/integration/test_max_rows_to_read_leaf_with_view/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/integration/test_max_rows_to_read_leaf_with_view/configs/remote_servers.xml b/tests/integration/test_max_rows_to_read_leaf_with_view/configs/remote_servers.xml new file mode 100644 index 000000000000..9ce90edb727e --- /dev/null +++ b/tests/integration/test_max_rows_to_read_leaf_with_view/configs/remote_servers.xml @@ -0,0 +1,18 @@ + + + + + + node1 + 9000 + + + + + node2 + 9000 + + + + + diff --git a/tests/integration/test_max_rows_to_read_leaf_with_view/test.py b/tests/integration/test_max_rows_to_read_leaf_with_view/test.py new file mode 100755 index 000000000000..6957534ce0d2 --- /dev/null +++ b/tests/integration/test_max_rows_to_read_leaf_with_view/test.py @@ -0,0 +1,76 @@ +from contextlib import contextmanager + +import pytest +from helpers.cluster import ClickHouseCluster +from helpers.client import QueryRuntimeException + +cluster = ClickHouseCluster(__file__) + +node1 = cluster.add_instance( + "node1", + main_configs=["configs/remote_servers.xml"], + with_zookeeper=True, +) + +node2 = cluster.add_instance( + "node2", + main_configs=["configs/remote_servers.xml"], + with_zookeeper=True, +) + + +@pytest.fixture(scope="module") +def started_cluster(): + try: + cluster.start() + + for node in (node1, node2): + node.query( + f""" + CREATE TABLE local_table(id UInt32, d DateTime) ENGINE = ReplicatedMergeTree('/clickhouse/tables/0/max_rows_read_leaf', '{node}') PARTITION BY toYYYYMM(d) ORDER BY d; + + CREATE TABLE distributed_table(id UInt32, d DateTime) ENGINE = Distributed(two_shards, default, local_table); + + CREATE OR REPLACE VIEW test_view AS select id from distributed_table; +""" + ) + node1.query( + "INSERT INTO local_table (id) select * from system.numbers limit 200" + ) + node2.query( + "INSERT INTO local_table (id) select * from system.numbers limit 200" + ) + + yield cluster + + finally: + cluster.shutdown() + + +def test_max_rows_to_read_leaf_via_view(started_cluster): + """ + Asserts the expected behaviour that we should be able to select + the total amount of rows (400 - 200 from each shard) from a + view that selects from a distributed table. + """ + assert ( + node1.query( + "SELECT count() from test_view SETTINGS max_rows_to_read_leaf=200" + ).rstrip() + == "400" + ) + with pytest.raises( + QueryRuntimeException, match="controlled by 'max_rows_to_read_leaf'" + ): + # insert some more data and ensure we get a legitimate failure + node2.query( + "INSERT INTO local_table (id) select * from system.numbers limit 10" + ) + node2.query("SELECT count() from test_view SETTINGS max_rows_to_read_leaf=200") + + +if __name__ == "__main__": + with contextmanager(started_cluster)() as cluster: + for name, instance in list(cluster.instances.items()): + print(name, instance.ip_address) + input("Cluster created, press any key to destroy...") diff --git a/tests/integration/test_merge_tree_hdfs/test.py b/tests/integration/test_merge_tree_hdfs/test.py index 782237539fa6..3057e48c7530 100644 --- a/tests/integration/test_merge_tree_hdfs/test.py +++ b/tests/integration/test_merge_tree_hdfs/test.py @@ -224,14 +224,22 @@ def test_attach_detach_partition(cluster): wait_for_delete_empty_parts(node, "hdfs_test") wait_for_delete_inactive_parts(node, "hdfs_test") wait_for_delete_hdfs_objects( - cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + cluster, + FILES_OVERHEAD + + FILES_OVERHEAD_PER_PART_WIDE * 2 + - FILES_OVERHEAD_METADATA_VERSION, ) node.query("ALTER TABLE hdfs_test ATTACH PARTITION '2020-01-03'") assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(8192)" hdfs_objects = fs.listdir("/clickhouse") - assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + assert ( + len(hdfs_objects) + == FILES_OVERHEAD + + FILES_OVERHEAD_PER_PART_WIDE * 2 + - FILES_OVERHEAD_METADATA_VERSION + ) node.query("ALTER TABLE hdfs_test DROP PARTITION '2020-01-03'") assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(4096)" @@ -355,7 +363,14 @@ def test_move_replace_partition_to_another_table(cluster): # Number of objects in HDFS should be unchanged. hdfs_objects = fs.listdir("/clickhouse") - assert len(hdfs_objects) == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4 + for obj in hdfs_objects: + print("Object in HDFS after move", obj) + wait_for_delete_hdfs_objects( + cluster, + FILES_OVERHEAD * 2 + + FILES_OVERHEAD_PER_PART_WIDE * 4 + - FILES_OVERHEAD_METADATA_VERSION * 2, + ) # Add new partitions to source table, but with different values and replace them from copied table. node.query( @@ -370,7 +385,15 @@ def test_move_replace_partition_to_another_table(cluster): assert node.query("SELECT count(*) FROM hdfs_test FORMAT Values") == "(16384)" hdfs_objects = fs.listdir("/clickhouse") - assert len(hdfs_objects) == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 6 + for obj in hdfs_objects: + print("Object in HDFS after insert", obj) + + wait_for_delete_hdfs_objects( + cluster, + FILES_OVERHEAD * 2 + + FILES_OVERHEAD_PER_PART_WIDE * 6 + - FILES_OVERHEAD_METADATA_VERSION * 2, + ) node.query("ALTER TABLE hdfs_test REPLACE PARTITION '2020-01-03' FROM hdfs_clone") node.query("ALTER TABLE hdfs_test REPLACE PARTITION '2020-01-05' FROM hdfs_clone") @@ -381,7 +404,10 @@ def test_move_replace_partition_to_another_table(cluster): # Wait for outdated partitions deletion. wait_for_delete_hdfs_objects( - cluster, FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4 + cluster, + FILES_OVERHEAD * 2 + + FILES_OVERHEAD_PER_PART_WIDE * 4 + - FILES_OVERHEAD_METADATA_VERSION * 2, ) node.query("DROP TABLE hdfs_clone NO DELAY") @@ -390,4 +416,13 @@ def test_move_replace_partition_to_another_table(cluster): # Data should remain in hdfs hdfs_objects = fs.listdir("/clickhouse") - assert len(hdfs_objects) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4 + + for obj in hdfs_objects: + print("Object in HDFS after drop", obj) + + wait_for_delete_hdfs_objects( + cluster, + FILES_OVERHEAD + + FILES_OVERHEAD_PER_PART_WIDE * 4 + - FILES_OVERHEAD_METADATA_VERSION * 2, + ) diff --git a/tests/integration/test_merge_tree_load_parts/test.py b/tests/integration/test_merge_tree_load_parts/test.py index dfbe00c8e28c..049dd516647c 100644 --- a/tests/integration/test_merge_tree_load_parts/test.py +++ b/tests/integration/test_merge_tree_load_parts/test.py @@ -6,12 +6,14 @@ cluster = helpers.cluster.ClickHouseCluster(__file__) + node1 = cluster.add_instance( "node1", main_configs=["configs/fast_background_pool.xml"], with_zookeeper=True, stay_alive=True, ) + node2 = cluster.add_instance( "node2", main_configs=["configs/fast_background_pool.xml"], @@ -19,6 +21,12 @@ stay_alive=True, ) +node3 = cluster.add_instance( + "node3", + with_zookeeper=True, + stay_alive=True, +) + @pytest.fixture(scope="module") def started_cluster(): @@ -194,3 +202,54 @@ def check_parts_loading(node, partition, loaded, failed, skipped): ) == "111\t1\n222\t1\n333\t1\n" ) + + +def test_merge_tree_load_parts_filesystem_error(started_cluster): + if node3.is_built_with_sanitizer() or node3.is_debug_build(): + pytest.skip( + "Skip with debug build and sanitizers. \ + This test intentionally triggers LOGICAL_ERROR which leads to crash with those builds" + ) + + node3.query( + """ + CREATE TABLE mt_load_parts (id UInt32) + ENGINE = MergeTree ORDER BY id + SETTINGS index_granularity_bytes = 0""" + ) + + node3.query("SYSTEM STOP MERGES mt_load_parts") + + for i in range(2): + node3.query(f"INSERT INTO mt_load_parts VALUES ({i})") + + # We want to somehow check that exception thrown on part creation is handled during part loading. + # It can be a filesystem exception triggered at initialization of part storage but it hard + # to trigger it because it should be an exception on stat/listDirectory. + # The most easy way to trigger such exception is to use chmod but clickhouse server + # is run with root user in integration test and this won't work. So let's do some + # some stupid things: create a table without adaptive granularity and change mark + # extensions of data files in part to make clickhouse think that it's a compact part which + # cannot be created in such table. This will trigger a LOGICAL_ERROR on part creation. + + def corrupt_part(table, part_name): + part_path = node3.query( + "SELECT path FROM system.parts WHERE table = '{}' and name = '{}'".format( + table, part_name + ) + ).strip() + + node3.exec_in_container( + ["bash", "-c", f"mv {part_path}id.mrk {part_path}id.mrk3"], privileged=True + ) + + corrupt_part("mt_load_parts", "all_1_1_0") + node3.restart_clickhouse(kill=True) + + assert node3.query("SELECT * FROM mt_load_parts") == "1\n" + assert ( + node3.query( + "SELECT name FROM system.detached_parts WHERE table = 'mt_load_parts'" + ) + == "broken-on-start_all_1_1_0\n" + ) diff --git a/tests/integration/test_merge_tree_s3/test.py b/tests/integration/test_merge_tree_s3/test.py index c2e00dc0cb86..9e9903c36c73 100644 --- a/tests/integration/test_merge_tree_s3/test.py +++ b/tests/integration/test_merge_tree_s3/test.py @@ -101,44 +101,45 @@ def run_s3_mocks(cluster): ) -def list_objects(cluster, path="data/"): +def list_objects(cluster, path="data/", hint="list_objects"): minio = cluster.minio_client objects = list(minio.list_objects(cluster.minio_bucket, path, recursive=True)) - logging.info(f"list_objects ({len(objects)}): {[x.object_name for x in objects]}") + logging.info(f"{hint} ({len(objects)}): {[x.object_name for x in objects]}") return objects def wait_for_delete_s3_objects(cluster, expected, timeout=30): - minio = cluster.minio_client while timeout > 0: - if ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == expected - ): + if len(list_objects(cluster, "data/")) == expected: return timeout -= 1 time.sleep(1) - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == expected - ) + assert len(list_objects(cluster, "data/")) == expected -@pytest.fixture(autouse=True) -@pytest.mark.parametrize("node_name", ["node"]) -def drop_table(cluster, node_name): - yield - node = cluster.instances[node_name] +def remove_all_s3_objects(cluster): minio = cluster.minio_client + for obj in list_objects(cluster, "data/"): + minio.remove_object(cluster.minio_bucket, obj.object_name) - node.query("DROP TABLE IF EXISTS s3_test NO DELAY") +@pytest.fixture(autouse=True, scope="function") +def clear_minio(cluster): try: - wait_for_delete_s3_objects(cluster, 0) - finally: + # CH do some writes to the S3 at start. For example, file data/clickhouse_access_check_{server_uuid}. + # Set the timeout there as 10 sec in order to resolve the race with that file exists. + wait_for_delete_s3_objects(cluster, 0, timeout=10) + except: # Remove extra objects to prevent tests cascade failing - for obj in list_objects(cluster, "data/"): - minio.remove_object(cluster.minio_bucket, obj.object_name) + remove_all_s3_objects(cluster) + + yield + + +def check_no_objects_after_drop(cluster, table_name="s3_test", node_name="node"): + node = cluster.instances[node_name] + node.query(f"DROP TABLE IF EXISTS {table_name} NO DELAY") + wait_for_delete_s3_objects(cluster, 0, timeout=0) @pytest.mark.parametrize( @@ -158,10 +159,7 @@ def test_simple_insert_select( values1 = generate_values("2020-01-03", 4096) node.query("INSERT INTO s3_test VALUES {}".format(values1)) assert node.query("SELECT * FROM s3_test order by dt, id FORMAT Values") == values1 - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == FILES_OVERHEAD + files_per_part - ) + assert len(list_objects(cluster, "data/")) == FILES_OVERHEAD + files_per_part values2 = generate_values("2020-01-04", 4096) node.query("INSERT INTO s3_test VALUES {}".format(values2)) @@ -169,15 +167,14 @@ def test_simple_insert_select( node.query("SELECT * FROM s3_test ORDER BY dt, id FORMAT Values") == values1 + "," + values2 ) - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == FILES_OVERHEAD + files_per_part * 2 - ) + assert len(list_objects(cluster, "data/")) == FILES_OVERHEAD + files_per_part * 2 assert ( node.query("SELECT count(*) FROM s3_test where id = 1 FORMAT Values") == "(2)" ) + check_no_objects_after_drop(cluster) + @pytest.mark.parametrize("merge_vertical,node_name", [(True, "node"), (False, "node")]) def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name): @@ -188,7 +185,6 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name): node = cluster.instances[node_name] create_table(node, "s3_test", **settings) - minio = cluster.minio_client node.query("SYSTEM STOP MERGES s3_test") node.query( @@ -214,7 +210,7 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name): node.query("SELECT count(distinct(id)) FROM s3_test FORMAT Values") == "(8192)" ) assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) + len(list_objects(cluster, "data/")) == FILES_OVERHEAD_PER_PART_WIDE * 6 + FILES_OVERHEAD ) @@ -242,6 +238,8 @@ def test_insert_same_partition_and_merge(cluster, merge_vertical, node_name): cluster, FILES_OVERHEAD_PER_PART_WIDE + FILES_OVERHEAD, timeout=45 ) + check_no_objects_after_drop(cluster) + @pytest.mark.parametrize("node_name", ["node"]) def test_alter_table_columns(cluster, node_name): @@ -287,12 +285,13 @@ def test_alter_table_columns(cluster, node_name): cluster, FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE + 2 ) + check_no_objects_after_drop(cluster) + @pytest.mark.parametrize("node_name", ["node"]) def test_attach_detach_partition(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") - minio = cluster.minio_client node.query( "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) @@ -312,14 +311,18 @@ def test_attach_detach_partition(cluster, node_name): assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(4096)" assert ( len(list_objects(cluster, "data/")) - == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + == FILES_OVERHEAD + + FILES_OVERHEAD_PER_PART_WIDE * 2 + - FILES_OVERHEAD_METADATA_VERSION ) node.query("ALTER TABLE s3_test ATTACH PARTITION '2020-01-03'") assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" assert ( len(list_objects(cluster, "data/")) - == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + == FILES_OVERHEAD + + FILES_OVERHEAD_PER_PART_WIDE * 2 + - FILES_OVERHEAD_METADATA_VERSION ) node.query("ALTER TABLE s3_test DROP PARTITION '2020-01-03'") @@ -337,7 +340,9 @@ def test_attach_detach_partition(cluster, node_name): assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(0)" assert ( len(list_objects(cluster, "data/")) - == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 1 + == FILES_OVERHEAD + + FILES_OVERHEAD_PER_PART_WIDE * 1 + - FILES_OVERHEAD_METADATA_VERSION ) node.query( "ALTER TABLE s3_test DROP DETACHED PARTITION '2020-01-04'", @@ -349,12 +354,13 @@ def test_attach_detach_partition(cluster, node_name): == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 0 ) + check_no_objects_after_drop(cluster) + @pytest.mark.parametrize("node_name", ["node"]) def test_move_partition_to_another_disk(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") - minio = cluster.minio_client node.query( "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) @@ -364,30 +370,31 @@ def test_move_partition_to_another_disk(cluster, node_name): ) assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) + len(list_objects(cluster, "data/")) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 ) node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-04' TO DISK 'hdd'") assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) + len(list_objects(cluster, "data/")) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE ) node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-04' TO DISK 's3'") assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) + len(list_objects(cluster, "data/")) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 ) + check_no_objects_after_drop(cluster) + @pytest.mark.parametrize("node_name", ["node"]) def test_table_manipulations(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") - minio = cluster.minio_client node.query( "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) @@ -399,9 +406,10 @@ def test_table_manipulations(cluster, node_name): node.query("RENAME TABLE s3_test TO s3_renamed") assert node.query("SELECT count(*) FROM s3_renamed FORMAT Values") == "(8192)" assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) + len(list_objects(cluster, "data/")) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 ) + node.query("RENAME TABLE s3_renamed TO s3_test") assert node.query("CHECK TABLE s3_test FORMAT Values") == "(1)" @@ -410,7 +418,7 @@ def test_table_manipulations(cluster, node_name): node.query("ATTACH TABLE s3_test") assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) + len(list_objects(cluster, "data/")) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 ) @@ -418,17 +426,15 @@ def test_table_manipulations(cluster, node_name): wait_for_delete_empty_parts(node, "s3_test") wait_for_delete_inactive_parts(node, "s3_test") assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(0)" - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == FILES_OVERHEAD - ) + assert len(list_objects(cluster, "data/")) == FILES_OVERHEAD + + check_no_objects_after_drop(cluster) @pytest.mark.parametrize("node_name", ["node"]) def test_move_replace_partition_to_another_table(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") - minio = cluster.minio_client node.query( "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) @@ -444,11 +450,11 @@ def test_move_replace_partition_to_another_table(cluster, node_name): ) assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)" assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)" + assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) + len(list_objects(cluster, "data/", "Objects at start")) == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4 ) - create_table(node, "s3_clone") node.query("ALTER TABLE s3_test MOVE PARTITION '2020-01-03' TO TABLE s3_clone") @@ -457,10 +463,14 @@ def test_move_replace_partition_to_another_table(cluster, node_name): assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(8192)" assert node.query("SELECT sum(id) FROM s3_clone FORMAT Values") == "(0)" assert node.query("SELECT count(*) FROM s3_clone FORMAT Values") == "(8192)" + + list_objects(cluster, "data/", "Object after move partition") # Number of objects in S3 should be unchanged. - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4 + wait_for_delete_s3_objects( + cluster, + FILES_OVERHEAD * 2 + + FILES_OVERHEAD_PER_PART_WIDE * 4 + - FILES_OVERHEAD_METADATA_VERSION * 2, ) # Add new partitions to source table, but with different values and replace them from copied table. @@ -472,9 +482,13 @@ def test_move_replace_partition_to_another_table(cluster, node_name): ) assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)" assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)" - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 6 + + list_objects(cluster, "data/", "Object after insert") + wait_for_delete_s3_objects( + cluster, + FILES_OVERHEAD * 2 + + FILES_OVERHEAD_PER_PART_WIDE * 6 + - FILES_OVERHEAD_METADATA_VERSION * 2, ) node.query("ALTER TABLE s3_test REPLACE PARTITION '2020-01-03' FROM s3_clone") @@ -486,39 +500,48 @@ def test_move_replace_partition_to_another_table(cluster, node_name): # Wait for outdated partitions deletion. wait_for_delete_s3_objects( - cluster, FILES_OVERHEAD * 2 + FILES_OVERHEAD_PER_PART_WIDE * 4 + cluster, + FILES_OVERHEAD * 2 + + FILES_OVERHEAD_PER_PART_WIDE * 4 + - FILES_OVERHEAD_METADATA_VERSION * 2, ) node.query("DROP TABLE s3_clone NO DELAY") assert node.query("SELECT sum(id) FROM s3_test FORMAT Values") == "(0)" assert node.query("SELECT count(*) FROM s3_test FORMAT Values") == "(16384)" - # Data should remain in S3 - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4 + + list_objects(cluster, "data/", "Object after drop") + wait_for_delete_s3_objects( + cluster, + FILES_OVERHEAD + + FILES_OVERHEAD_PER_PART_WIDE * 4 + - FILES_OVERHEAD_METADATA_VERSION * 2, ) node.query("ALTER TABLE s3_test FREEZE") # Number S3 objects should be unchanged. - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 4 + list_objects(cluster, "data/", "Object after freeze") + wait_for_delete_s3_objects( + cluster, + FILES_OVERHEAD + + FILES_OVERHEAD_PER_PART_WIDE * 4 + - FILES_OVERHEAD_METADATA_VERSION * 2, ) node.query("DROP TABLE s3_test NO DELAY") # Backup data should remain in S3. - wait_for_delete_s3_objects(cluster, FILES_OVERHEAD_PER_PART_WIDE * 4) + wait_for_delete_s3_objects( + cluster, FILES_OVERHEAD_PER_PART_WIDE * 4 - FILES_OVERHEAD_METADATA_VERSION * 4 + ) - for obj in list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)): - minio.remove_object(cluster.minio_bucket, obj.object_name) + remove_all_s3_objects(cluster) @pytest.mark.parametrize("node_name", ["node"]) def test_freeze_unfreeze(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") - minio = cluster.minio_client node.query( "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-03", 4096)) @@ -533,8 +556,9 @@ def test_freeze_unfreeze(cluster, node_name): wait_for_delete_empty_parts(node, "s3_test") wait_for_delete_inactive_parts(node, "s3_test") assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + len(list_objects(cluster, "data/")) + == FILES_OVERHEAD + + (FILES_OVERHEAD_PER_PART_WIDE - FILES_OVERHEAD_METADATA_VERSION) * 2 ) # Unfreeze single partition from backup1. @@ -544,13 +568,10 @@ def test_freeze_unfreeze(cluster, node_name): # Unfreeze all partitions from backup2. node.query("ALTER TABLE s3_test UNFREEZE WITH NAME 'backup2'") + # Data should be removed from S3. wait_for_delete_s3_objects(cluster, FILES_OVERHEAD) - # Data should be removed from S3. - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == FILES_OVERHEAD - ) + check_no_objects_after_drop(cluster) @pytest.mark.parametrize("node_name", ["node"]) @@ -558,7 +579,6 @@ def test_freeze_system_unfreeze(cluster, node_name): node = cluster.instances[node_name] create_table(node, "s3_test") create_table(node, "s3_test_removed") - minio = cluster.minio_client node.query( "INSERT INTO s3_test VALUES {}".format(generate_values("2020-01-04", 4096)) @@ -574,20 +594,18 @@ def test_freeze_system_unfreeze(cluster, node_name): wait_for_delete_inactive_parts(node, "s3_test") node.query("DROP TABLE s3_test_removed NO DELAY") assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == FILES_OVERHEAD + FILES_OVERHEAD_PER_PART_WIDE * 2 + len(list_objects(cluster, "data/")) + == FILES_OVERHEAD + + (FILES_OVERHEAD_PER_PART_WIDE - FILES_OVERHEAD_METADATA_VERSION) * 2 ) # Unfreeze all data from backup3. node.query("SYSTEM UNFREEZE WITH NAME 'backup3'") + # Data should be removed from S3. wait_for_delete_s3_objects(cluster, FILES_OVERHEAD) - # Data should be removed from S3. - assert ( - len(list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True))) - == FILES_OVERHEAD - ) + check_no_objects_after_drop(cluster) @pytest.mark.parametrize("node_name", ["node"]) @@ -633,6 +651,8 @@ def get_s3_requests(): # There should be 3 times more S3 requests because multi-part upload mode uses 3 requests to upload object. assert get_s3_requests() - s3_requests_before == s3_requests_to_write_partition * 3 + check_no_objects_after_drop(cluster) + @pytest.mark.parametrize("node_name", ["node"]) def test_s3_no_delete_objects(cluster, node_name): @@ -641,6 +661,7 @@ def test_s3_no_delete_objects(cluster, node_name): node, "s3_test_no_delete_objects", storage_policy="no_delete_objects_s3" ) node.query("DROP TABLE s3_test_no_delete_objects SYNC") + remove_all_s3_objects(cluster) @pytest.mark.parametrize("node_name", ["node"]) @@ -655,6 +676,7 @@ def test_s3_disk_reads_on_unstable_connection(cluster, node_name): assert node.query("SELECT sum(id) FROM s3_test").splitlines() == [ "40499995500000" ] + check_no_objects_after_drop(cluster) @pytest.mark.parametrize("node_name", ["node"]) @@ -664,14 +686,13 @@ def test_lazy_seek_optimization_for_async_read(cluster, node_name): node.query( "CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='s3';" ) + node.query("SYSTEM STOP MERGES s3_test") node.query( "INSERT INTO s3_test SELECT * FROM generateRandom('key UInt32, value String') LIMIT 10000000" ) node.query("SELECT * FROM s3_test WHERE value LIKE '%abc%' ORDER BY value LIMIT 10") - node.query("DROP TABLE IF EXISTS s3_test NO DELAY") - minio = cluster.minio_client - for obj in list(minio.list_objects(cluster.minio_bucket, "data/", recursive=True)): - minio.remove_object(cluster.minio_bucket, obj.object_name) + + check_no_objects_after_drop(cluster) @pytest.mark.parametrize("node_name", ["node_with_limited_disk"]) @@ -681,6 +702,7 @@ def test_cache_with_full_disk_space(cluster, node_name): node.query( "CREATE TABLE s3_test (key UInt32, value String) Engine=MergeTree() ORDER BY value SETTINGS storage_policy='s3_with_cache_and_jbod';" ) + node.query("SYSTEM STOP MERGES s3_test") node.query( "INSERT INTO s3_test SELECT number, toString(number) FROM numbers(100000000)" ) @@ -699,7 +721,7 @@ def test_cache_with_full_disk_space(cluster, node_name): assert node.contains_in_log( "Insert into cache is skipped due to insufficient disk space" ) - node.query("DROP TABLE IF EXISTS s3_test NO DELAY") + check_no_objects_after_drop(cluster, node_name=node_name) @pytest.mark.parametrize("node_name", ["node"]) @@ -724,6 +746,7 @@ def test_store_cleanup_disk_s3(cluster, node_name): "CREATE TABLE s3_test UUID '00000000-1000-4000-8000-000000000001' (n UInt64) Engine=MergeTree() ORDER BY n SETTINGS storage_policy='s3';" ) node.query("INSERT INTO s3_test SELECT 1") + check_no_objects_after_drop(cluster) @pytest.mark.parametrize("node_name", ["node"]) @@ -800,3 +823,5 @@ def test_cache_setting_compatibility(cluster, node_name): node.query("SELECT * FROM s3_test FORMAT Null") assert not node.contains_in_log("No such file or directory: Cache info:") + + check_no_objects_after_drop(cluster) diff --git a/tests/integration/test_merge_tree_s3_failover/configs/config.d/storage_conf.xml b/tests/integration/test_merge_tree_s3_failover/configs/config.d/storage_conf.xml index 976933b2d217..74af657c7830 100644 --- a/tests/integration/test_merge_tree_s3_failover/configs/config.d/storage_conf.xml +++ b/tests/integration/test_merge_tree_s3_failover/configs/config.d/storage_conf.xml @@ -11,6 +11,7 @@ true 0 + 20000 s3 @@ -20,6 +21,7 @@ minio123 true + 20000 s3 @@ -32,6 +34,7 @@ 1 1 + 20000 diff --git a/tests/integration/test_partition/test.py b/tests/integration/test_partition/test.py index 5a972b58f999..93f03f4420ea 100644 --- a/tests/integration/test_partition/test.py +++ b/tests/integration/test_partition/test.py @@ -70,7 +70,7 @@ def partition_complex_assert_columns_txt(): ) -def partition_complex_assert_checksums(): +def partition_complex_assert_checksums(after_detach=False): # Do not check increment.txt - it can be changed by other tests with FREEZE cmd = [ "bash", @@ -80,36 +80,67 @@ def partition_complex_assert_checksums(): " | sed 's shadow/[0-9]*/data/[a-z0-9_-]*/ shadow/1/data/test/ g' | sort | uniq", ] - checksums = ( - "082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition_complex/19700102_2_2_0/k.bin\n" - "082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition_complex/19700201_1_1_0/v1.bin\n" - "13cae8e658e0ca4f75c56b1fc424e150\tshadow/1/data/test/partition_complex/19700102_2_2_0/minmax_p.idx\n" - "25daad3d9e60b45043a70c4ab7d3b1c6\tshadow/1/data/test/partition_complex/19700102_2_2_0/partition.dat\n" - "3726312af62aec86b64a7708d5751787\tshadow/1/data/test/partition_complex/19700201_1_1_0/partition.dat\n" - "37855b06a39b79a67ea4e86e4a3299aa\tshadow/1/data/test/partition_complex/19700102_2_2_0/checksums.txt\n" - "38e62ff37e1e5064e9a3f605dfe09d13\tshadow/1/data/test/partition_complex/19700102_2_2_0/v1.bin\n" - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/k.mrk\n" - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/p.mrk\n" - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/v1.mrk\n" - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/k.mrk\n" - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/p.mrk\n" - "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/v1.mrk\n" - "55a54008ad1ba589aa210d2629c1df41\tshadow/1/data/test/partition_complex/19700201_1_1_0/primary.idx\n" - "5f087cb3e7071bf9407e095821e2af8f\tshadow/1/data/test/partition_complex/19700201_1_1_0/checksums.txt\n" - "77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition_complex/19700102_2_2_0/columns.txt\n" - "77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition_complex/19700201_1_1_0/columns.txt\n" - "88cdc31ded355e7572d68d8cde525d3a\tshadow/1/data/test/partition_complex/19700201_1_1_0/p.bin\n" - "9e688c58a5487b8eaf69c9e1005ad0bf\tshadow/1/data/test/partition_complex/19700102_2_2_0/primary.idx\n" - "c0904274faa8f3f06f35666cc9c5bd2f\tshadow/1/data/test/partition_complex/19700102_2_2_0/default_compression_codec.txt\n" - "c0904274faa8f3f06f35666cc9c5bd2f\tshadow/1/data/test/partition_complex/19700201_1_1_0/default_compression_codec.txt\n" - "c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition_complex/19700102_2_2_0/count.txt\n" - "c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition_complex/19700201_1_1_0/count.txt\n" - "cfcb770c3ecd0990dcceb1bde129e6c6\tshadow/1/data/test/partition_complex/19700102_2_2_0/p.bin\n" - "cfcd208495d565ef66e7dff9f98764da\tshadow/1/data/test/partition_complex/19700102_2_2_0/metadata_version.txt\n" - "cfcd208495d565ef66e7dff9f98764da\tshadow/1/data/test/partition_complex/19700201_1_1_0/metadata_version.txt\n" - "e2af3bef1fd129aea73a890ede1e7a30\tshadow/1/data/test/partition_complex/19700201_1_1_0/k.bin\n" - "f2312862cc01adf34a93151377be2ddf\tshadow/1/data/test/partition_complex/19700201_1_1_0/minmax_p.idx\n" - ) + # no metadata version + if after_detach: + checksums = ( + "082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition_complex/19700102_2_2_0/k.bin\n" + "082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition_complex/19700201_1_1_0/v1.bin\n" + "13cae8e658e0ca4f75c56b1fc424e150\tshadow/1/data/test/partition_complex/19700102_2_2_0/minmax_p.idx\n" + "25daad3d9e60b45043a70c4ab7d3b1c6\tshadow/1/data/test/partition_complex/19700102_2_2_0/partition.dat\n" + "3726312af62aec86b64a7708d5751787\tshadow/1/data/test/partition_complex/19700201_1_1_0/partition.dat\n" + "37855b06a39b79a67ea4e86e4a3299aa\tshadow/1/data/test/partition_complex/19700102_2_2_0/checksums.txt\n" + "38e62ff37e1e5064e9a3f605dfe09d13\tshadow/1/data/test/partition_complex/19700102_2_2_0/v1.bin\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/k.mrk\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/p.mrk\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/v1.mrk\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/k.mrk\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/p.mrk\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/v1.mrk\n" + "55a54008ad1ba589aa210d2629c1df41\tshadow/1/data/test/partition_complex/19700201_1_1_0/primary.idx\n" + "5f087cb3e7071bf9407e095821e2af8f\tshadow/1/data/test/partition_complex/19700201_1_1_0/checksums.txt\n" + "77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition_complex/19700102_2_2_0/columns.txt\n" + "77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition_complex/19700201_1_1_0/columns.txt\n" + "88cdc31ded355e7572d68d8cde525d3a\tshadow/1/data/test/partition_complex/19700201_1_1_0/p.bin\n" + "9e688c58a5487b8eaf69c9e1005ad0bf\tshadow/1/data/test/partition_complex/19700102_2_2_0/primary.idx\n" + "c0904274faa8f3f06f35666cc9c5bd2f\tshadow/1/data/test/partition_complex/19700102_2_2_0/default_compression_codec.txt\n" + "c0904274faa8f3f06f35666cc9c5bd2f\tshadow/1/data/test/partition_complex/19700201_1_1_0/default_compression_codec.txt\n" + "c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition_complex/19700102_2_2_0/count.txt\n" + "c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition_complex/19700201_1_1_0/count.txt\n" + "cfcb770c3ecd0990dcceb1bde129e6c6\tshadow/1/data/test/partition_complex/19700102_2_2_0/p.bin\n" + "e2af3bef1fd129aea73a890ede1e7a30\tshadow/1/data/test/partition_complex/19700201_1_1_0/k.bin\n" + "f2312862cc01adf34a93151377be2ddf\tshadow/1/data/test/partition_complex/19700201_1_1_0/minmax_p.idx\n" + ) + else: + checksums = ( + "082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition_complex/19700102_2_2_0/k.bin\n" + "082814b5aa5109160d5c0c5aff10d4df\tshadow/1/data/test/partition_complex/19700201_1_1_0/v1.bin\n" + "13cae8e658e0ca4f75c56b1fc424e150\tshadow/1/data/test/partition_complex/19700102_2_2_0/minmax_p.idx\n" + "25daad3d9e60b45043a70c4ab7d3b1c6\tshadow/1/data/test/partition_complex/19700102_2_2_0/partition.dat\n" + "3726312af62aec86b64a7708d5751787\tshadow/1/data/test/partition_complex/19700201_1_1_0/partition.dat\n" + "37855b06a39b79a67ea4e86e4a3299aa\tshadow/1/data/test/partition_complex/19700102_2_2_0/checksums.txt\n" + "38e62ff37e1e5064e9a3f605dfe09d13\tshadow/1/data/test/partition_complex/19700102_2_2_0/v1.bin\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/k.mrk\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/p.mrk\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700102_2_2_0/v1.mrk\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/k.mrk\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/p.mrk\n" + "4ae71336e44bf9bf79d2752e234818a5\tshadow/1/data/test/partition_complex/19700201_1_1_0/v1.mrk\n" + "55a54008ad1ba589aa210d2629c1df41\tshadow/1/data/test/partition_complex/19700201_1_1_0/primary.idx\n" + "5f087cb3e7071bf9407e095821e2af8f\tshadow/1/data/test/partition_complex/19700201_1_1_0/checksums.txt\n" + "77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition_complex/19700102_2_2_0/columns.txt\n" + "77d5af402ada101574f4da114f242e02\tshadow/1/data/test/partition_complex/19700201_1_1_0/columns.txt\n" + "88cdc31ded355e7572d68d8cde525d3a\tshadow/1/data/test/partition_complex/19700201_1_1_0/p.bin\n" + "9e688c58a5487b8eaf69c9e1005ad0bf\tshadow/1/data/test/partition_complex/19700102_2_2_0/primary.idx\n" + "c0904274faa8f3f06f35666cc9c5bd2f\tshadow/1/data/test/partition_complex/19700102_2_2_0/default_compression_codec.txt\n" + "c0904274faa8f3f06f35666cc9c5bd2f\tshadow/1/data/test/partition_complex/19700201_1_1_0/default_compression_codec.txt\n" + "c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition_complex/19700102_2_2_0/count.txt\n" + "c4ca4238a0b923820dcc509a6f75849b\tshadow/1/data/test/partition_complex/19700201_1_1_0/count.txt\n" + "cfcb770c3ecd0990dcceb1bde129e6c6\tshadow/1/data/test/partition_complex/19700102_2_2_0/p.bin\n" + "cfcd208495d565ef66e7dff9f98764da\tshadow/1/data/test/partition_complex/19700102_2_2_0/metadata_version.txt\n" + "cfcd208495d565ef66e7dff9f98764da\tshadow/1/data/test/partition_complex/19700201_1_1_0/metadata_version.txt\n" + "e2af3bef1fd129aea73a890ede1e7a30\tshadow/1/data/test/partition_complex/19700201_1_1_0/k.bin\n" + "f2312862cc01adf34a93151377be2ddf\tshadow/1/data/test/partition_complex/19700201_1_1_0/minmax_p.idx\n" + ) assert TSV(instance.exec_in_container(cmd).replace(" ", "\t")) == TSV(checksums) @@ -134,7 +165,7 @@ def test_partition_complex(partition_table_complex): q("ALTER TABLE test.partition_complex FREEZE") - partition_complex_assert_checksums() + partition_complex_assert_checksums(True) q("ALTER TABLE test.partition_complex DETACH PARTITION 197001") q("ALTER TABLE test.partition_complex ATTACH PARTITION 197001") @@ -144,7 +175,7 @@ def test_partition_complex(partition_table_complex): q("ALTER TABLE test.partition_complex MODIFY COLUMN v1 Int8") # Check the backup hasn't changed - partition_complex_assert_checksums() + partition_complex_assert_checksums(True) q("OPTIMIZE TABLE test.partition_complex") diff --git a/tests/integration/test_password_constraints/configs/default_password_type.xml b/tests/integration/test_password_constraints/configs/default_password_type.xml new file mode 100644 index 000000000000..4b23ea31df0a --- /dev/null +++ b/tests/integration/test_password_constraints/configs/default_password_type.xml @@ -0,0 +1,3 @@ + + double_sha1_password + diff --git a/tests/integration/test_password_constraints/test.py b/tests/integration/test_password_constraints/test.py index 9cdff51caa11..94e10ed5f9e3 100644 --- a/tests/integration/test_password_constraints/test.py +++ b/tests/integration/test_password_constraints/test.py @@ -5,6 +5,9 @@ cluster = ClickHouseCluster(__file__) node = cluster.add_instance("node", main_configs=["configs/complexity_rules.xml"]) +node2 = cluster.add_instance( + "node2", main_configs=["configs/default_password_type.xml"] +) @pytest.fixture(scope="module") @@ -39,3 +42,10 @@ def test_complexity_rules(start_cluster): node.query("CREATE USER u_5 IDENTIFIED WITH plaintext_password BY 'aA!000000000'") node.query("DROP USER u_5") + + +def test_default_password_type(start_cluster): + node2.query("CREATE USER u1 IDENTIFIED BY 'pwd'") + + required_type = "double_sha1_password" + assert required_type in node2.query("SHOW CREATE USER u1") diff --git a/tests/integration/test_s3_cluster/test.py b/tests/integration/test_s3_cluster/test.py index 241b90cac3f5..237a81da0f5b 100644 --- a/tests/integration/test_s3_cluster/test.py +++ b/tests/integration/test_s3_cluster/test.py @@ -247,9 +247,10 @@ def test_skip_unavailable_shards(started_cluster): assert result == "10\n" -def test_unskip_unavailable_shards(started_cluster): +def test_unset_skip_unavailable_shards(started_cluster): + # Although skip_unavailable_shards is not set, cluster table functions should always skip unavailable shards. node = started_cluster.instances["s0_0_0"] - error = node.query_and_get_error( + result = node.query( """ SELECT count(*) from s3Cluster( 'cluster_non_existent_port', @@ -258,7 +259,7 @@ def test_unskip_unavailable_shards(started_cluster): """ ) - assert "NETWORK_ERROR" in error + assert result == "10\n" def test_distributed_insert_select_with_replicated(started_cluster): diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index d4752d6cf2e6..edf5344e887f 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -788,6 +788,7 @@ def test_schema_inference_cache(started_cluster): def test_hdfsCluster_skip_unavailable_shards(started_cluster): + # Although skip_unavailable_shards is not set, cluster table functions should always skip unavailable shards. hdfs_api = started_cluster.hdfs_api node = started_cluster.instances["node1"] data = "1\tSerialize\t555.222\n2\tData\t777.333\n" @@ -801,16 +802,18 @@ def test_hdfsCluster_skip_unavailable_shards(started_cluster): ) -def test_hdfsCluster_unskip_unavailable_shards(started_cluster): +def test_hdfsCluster_unset_skip_unavailable_shards(started_cluster): hdfs_api = started_cluster.hdfs_api node = started_cluster.instances["node1"] data = "1\tSerialize\t555.222\n2\tData\t777.333\n" hdfs_api.write_data("/unskip_unavailable_shards", data) - error = node.query_and_get_error( - "select * from hdfsCluster('cluster_non_existent_port', 'hdfs://hdfs1:9000/unskip_unavailable_shards', 'TSV', 'id UInt64, text String, number Float64')" - ) - assert "NETWORK_ERROR" in error + assert ( + node1.query( + "select * from hdfsCluster('cluster_non_existent_port', 'hdfs://hdfs1:9000/skip_unavailable_shards', 'TSV', 'id UInt64, text String, number Float64')" + ) + == data + ) if __name__ == "__main__": diff --git a/tests/integration/test_system_metrics/test.py b/tests/integration/test_system_metrics/test.py index 439e8b66db11..8539828a8b81 100644 --- a/tests/integration/test_system_metrics/test.py +++ b/tests/integration/test_system_metrics/test.py @@ -157,3 +157,57 @@ def test_metrics_storage_buffer_size(start_cluster): ) == "0\n" ) + + +def test_attach_without_zk_incr_readonly_metric(start_cluster): + assert ( + node1.query("SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'") + == "0\n" + ) + + node1.query( + "ATTACH TABLE test.test_no_zk UUID 'a50b7933-59b2-49ce-8db6-59da3c9b4413' (i Int8, d Date) ENGINE = ReplicatedMergeTree('no_zk', 'replica') ORDER BY tuple()" + ) + assert_eq_with_retry( + node1, + "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", + "1\n", + retry_count=300, + sleep_time=1, + ) + + node1.query("DETACH TABLE test.test_no_zk") + assert_eq_with_retry( + node1, + "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", + "0\n", + retry_count=300, + sleep_time=1, + ) + + node1.query("ATTACH TABLE test.test_no_zk") + assert_eq_with_retry( + node1, + "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", + "1\n", + retry_count=300, + sleep_time=1, + ) + + node1.query("SYSTEM RESTORE REPLICA test.test_no_zk") + assert_eq_with_retry( + node1, + "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", + "0\n", + retry_count=300, + sleep_time=1, + ) + + node1.query("DROP TABLE test.test_no_zk") + assert_eq_with_retry( + node1, + "SELECT value FROM system.metrics WHERE metric = 'ReadonlyReplica'", + "0\n", + retry_count=300, + sleep_time=1, + ) diff --git a/tests/performance/parallel_hash_join.xml b/tests/performance/parallel_hash_join.xml new file mode 100644 index 000000000000..412f33e9fe73 --- /dev/null +++ b/tests/performance/parallel_hash_join.xml @@ -0,0 +1,75 @@ + + + 8 + parallel_hash + + + CREATE TABLE test_data_left ( + `id` UInt64, + `x` UInt64, + `y` UInt64, + `z` UInt64, + `w` String + )ENGINE = Memory + + + + CREATE TABLE test_data_right ( + `id` UInt64, + `x` UInt64, + `y` UInt64, + `z` UInt64, + `w` String + )ENGINE = Memory + + + insert into test_data_left select number as id, number % 1111 as x, number * 3 as y, number - 100 as z, cast(number as String) as w from numbers(10000000); + + + insert into test_data_right select number as id, number % 1311 as x, number * 5 as y, number + 100 as z, cast(number as String) as w from numbers(10000000); + + + SELECT + t1.id, + t1.x, + t1.y, + t2.id, + t2.x, + t2.y + FROM test_data_left AS t1 + INNER JOIN test_data_right AS t2 + ON t1.id = t2.id + FORMAT `Null` + + + + SELECT + t1.id, + t1.x, + t1.y, + t2.id, + t2.x, + t2.y + FROM test_data_left AS t1 + LEFT JOIN test_data_right AS t2 + ON t1.id = t2.id + FORMAT `Null` + + + + SELECT + t1.id, + t1.x, + t1.y, + t2.id, + t2.x, + t2.y + FROM test_data_left AS t1 + RIGHT JOIN test_data_right AS t2 + ON t1.id = t2.id + FORMAT `Null` + + + DROP TABLE test_data_right + DROP TABLE test_data_left + diff --git a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.reference b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.reference index cd9da9837857..11b660b54a3c 100644 --- a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.reference +++ b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.reference @@ -2,7 +2,7 @@ runtime messages 0.001 runtime exceptions 0.05 messages shorter than 10 1 messages shorter than 16 3 -exceptions shorter than 30 30 +exceptions shorter than 30 3 noisy messages 0.3 noisy Trace messages 0.16 noisy Debug messages 0.09 diff --git a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql index 480effec0653..7796785afb53 100644 --- a/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql +++ b/tests/queries/0_stateless/00002_log_and_exception_messages_formatting.sql @@ -49,7 +49,14 @@ create temporary table known_short_messages (s String) as select * from (select 'Column ''{}'' already exists', 'No macro {} in config', 'Invalid origin H3 index: {}', 'Invalid session timeout: ''{}''', 'Tuple cannot be empty', 'Database name is empty', 'Table {} is not a Dictionary', 'Expected function, got: {}', 'Unknown identifier: ''{}''', -'Failed to {} input ''{}''', '{}.{} is not a VIEW', 'Cannot convert NULL to {}', 'Dictionary {} doesn''t exist' +'Failed to {} input ''{}''', '{}.{} is not a VIEW', 'Cannot convert NULL to {}', 'Dictionary {} doesn''t exist', +'Write file: {}', 'Unable to parse JSONPath', 'Host is empty in S3 URI.', 'Expected end of line', +'inflate failed: {}{}', 'Center is not valid', 'Column ''{}'' is ambiguous', 'Cannot parse object', 'Invalid date: {}', +'There is no cache by name: {}', 'No part {} in table', '`{}` should be a String', 'There are duplicate id {}', +'Invalid replica name: {}', 'Unexpected value {} in enum', 'Unknown BSON type: {}', 'Point is not valid', +'Invalid qualified name: {}', 'INTO OUTFILE is not allowed', 'Arguments must not be NaN', 'Cell is not valid', +'brotli decode error{}', 'Invalid H3 index: {}', 'Too large node state size', 'No additional keys found.', +'Attempt to read after EOF.', 'Replication was stopped', '{} building file infos', 'Cannot parse uuid {}' ] as arr) array join arr; -- Check that we don't have too many short meaningless message patterns. @@ -59,7 +66,7 @@ select 'messages shorter than 10', max2(countDistinctOrDefault(message_format_st select 'messages shorter than 16', max2(countDistinctOrDefault(message_format_string), 3) from logs where length(message_format_string) < 16 and message_format_string not in known_short_messages; -- Same as above, but exceptions must be more informative. Feel free to update the threshold or remove this query if really necessary -select 'exceptions shorter than 30', max2(countDistinctOrDefault(message_format_string), 30) from logs where length(message_format_string) < 30 and message ilike '%DB::Exception%' and message_format_string not in known_short_messages; +select 'exceptions shorter than 30', max2(countDistinctOrDefault(message_format_string), 3) from logs where length(message_format_string) < 30 and message ilike '%DB::Exception%' and message_format_string not in known_short_messages; -- Avoid too noisy messages: top 1 message frequency must be less than 30%. We should reduce the threshold @@ -98,7 +105,9 @@ select 'incorrect patterns', max2(countDistinct(message_format_string), 15) from where ((rand() % 8) = 0) and message not like (replaceRegexpAll(message_format_string, '{[:.0-9dfx]*}', '%') as s) and message not like (s || ' (skipped % similar messages)') - and message not like ('%Exception: '||s||'%') group by message_format_string + and message not like ('%Exception: '||s||'%') + and message not like ('%(skipped % similar messages)%') + group by message_format_string ) where any_message not like '%Poco::Exception%'; drop table logs; diff --git a/tests/queries/0_stateless/00049_any_left_join.sql b/tests/queries/0_stateless/00049_any_left_join.sql index ecd079a5085b..b3ff7a4ea41c 100644 --- a/tests/queries/0_stateless/00049_any_left_join.sql +++ b/tests/queries/0_stateless/00049_any_left_join.sql @@ -1 +1 @@ -SELECT number, joined FROM system.numbers ANY LEFT JOIN (SELECT number * 2 AS number, number * 10 + 1 AS joined FROM system.numbers LIMIT 10) js2 USING number LIMIT 10 +SELECT number, joined FROM (SELECT number FROM system.numbers LIMIT 1000) as js1 ANY LEFT JOIN (SELECT number * 2 AS number, number * 10 + 1 AS joined FROM system.numbers LIMIT 10) js2 USING number ORDER BY number LIMIT 10 diff --git a/tests/queries/0_stateless/00051_any_inner_join.sql b/tests/queries/0_stateless/00051_any_inner_join.sql index 566b5ad526b5..65577bdaf8bb 100644 --- a/tests/queries/0_stateless/00051_any_inner_join.sql +++ b/tests/queries/0_stateless/00051_any_inner_join.sql @@ -8,4 +8,5 @@ ANY INNER JOIN ( SELECT number * 2 AS k, number AS joined FROM system.numbers LIMIT 10 ) AS b -USING k; +USING k +ORDER BY k; diff --git a/tests/queries/0_stateless/00052_all_left_join.sql b/tests/queries/0_stateless/00052_all_left_join.sql index 6d5a1ba073c8..49c3473dbb13 100644 --- a/tests/queries/0_stateless/00052_all_left_join.sql +++ b/tests/queries/0_stateless/00052_all_left_join.sql @@ -6,4 +6,5 @@ ALL LEFT JOIN ( SELECT intDiv(number, 2) AS k, number AS joined FROM system.numbers LIMIT 10 ) js2 -USING k; +USING k +ORDER BY k, joined; diff --git a/tests/queries/0_stateless/00057_join_aliases.sql b/tests/queries/0_stateless/00057_join_aliases.sql index 6f2830943962..bc4c5f9faef8 100644 --- a/tests/queries/0_stateless/00057_join_aliases.sql +++ b/tests/queries/0_stateless/00057_join_aliases.sql @@ -1,7 +1,8 @@ SELECT * FROM ( SELECT number, n, j1, j2 - FROM (SELECT number, number / 2 AS n FROM system.numbers) js1 + FROM (SELECT number, number / 2 AS n FROM system.numbers LIMIT 1000) js1 ANY LEFT JOIN (SELECT number / 3 AS n, number AS j1, 'Hello' AS j2 FROM system.numbers LIMIT 10) js2 - USING n LIMIT 10 + USING n + ORDER BY number LIMIT 10 ) ORDER BY n SETTINGS join_algorithm = 'hash'; -- the query does not finish with merge join diff --git a/tests/queries/0_stateless/00163_shard_join_with_empty_table.sql b/tests/queries/0_stateless/00163_shard_join_with_empty_table.sql index 07242f7a8d4d..433f268faee8 100644 --- a/tests/queries/0_stateless/00163_shard_join_with_empty_table.sql +++ b/tests/queries/0_stateless/00163_shard_join_with_empty_table.sql @@ -5,7 +5,7 @@ SET joined_subquery_requires_alias = 0; SELECT * FROM ( SELECT number, n, j1, j2 - FROM (SELECT number, number / 2 AS n FROM remote('127.0.0.{2,3}', system.numbers)) + FROM (SELECT number, number / 2 AS n FROM remote('127.0.0.{2,3}', system.numbers) LIMIT 10) ANY LEFT JOIN (SELECT number / 3 AS n, number AS j1, 'Hello' AS j2 FROM system.numbers LIMIT 0) USING n LIMIT 10 ) ORDER BY number; @@ -19,7 +19,7 @@ SELECT * FROM ( SELECT * FROM ( SELECT number, n, j1, j2 - FROM (SELECT number, number / 2 AS n FROM remote('127.0.0.{2,3}', system.numbers)) + FROM (SELECT number, number / 2 AS n FROM remote('127.0.0.{2,3}', system.numbers) LIMIT 10) GLOBAL ANY LEFT JOIN (SELECT number / 3 AS n, number AS j1, 'Hello' AS j2 FROM system.numbers LIMIT 0) USING n LIMIT 10 ) ORDER BY number; diff --git a/tests/queries/0_stateless/00189_time_zones_long.reference b/tests/queries/0_stateless/00189_time_zones_long.reference index 8717a662771c..d41c925bbe55 100644 --- a/tests/queries/0_stateless/00189_time_zones_long.reference +++ b/tests/queries/0_stateless/00189_time_zones_long.reference @@ -246,18 +246,18 @@ toUnixTimestamp 1426415400 1426415400 date_trunc -2019-01-01 -2020-01-01 -2020-01-01 -2019-10-01 -2020-01-01 -2020-01-01 -2019-12-01 -2020-01-01 -2020-01-01 -2019-12-30 -2019-12-30 -2019-12-30 +2019-01-01 00:00:00 +2020-01-01 00:00:00 +2020-01-01 00:00:00 +2019-10-01 00:00:00 +2020-01-01 00:00:00 +2020-01-01 00:00:00 +2019-12-01 00:00:00 +2020-01-01 00:00:00 +2020-01-01 00:00:00 +2019-12-30 00:00:00 +2019-12-30 00:00:00 +2019-12-30 00:00:00 2019-12-31 00:00:00 2020-01-01 00:00:00 2020-01-02 00:00:00 @@ -270,18 +270,18 @@ date_trunc 2019-12-31 20:11:22 2020-01-01 12:11:22 2020-01-02 05:11:22 -2019-01-01 -2020-01-01 -2020-01-01 -2019-10-01 -2020-01-01 -2020-01-01 -2019-12-01 -2020-01-01 -2020-01-01 -2019-12-30 -2019-12-30 -2019-12-30 +2019-01-01 00:00:00 +2020-01-01 00:00:00 +2020-01-01 00:00:00 +2019-10-01 00:00:00 +2020-01-01 00:00:00 +2020-01-01 00:00:00 +2019-12-01 00:00:00 +2020-01-01 00:00:00 +2020-01-01 00:00:00 +2019-12-30 00:00:00 +2019-12-30 00:00:00 +2019-12-30 00:00:00 2019-12-31 00:00:00 2020-01-01 00:00:00 2020-01-02 00:00:00 @@ -294,8 +294,8 @@ date_trunc 2019-12-31 20:11:22 2020-01-01 12:11:22 2020-01-02 05:11:22 -2020-01-01 -2020-01-01 -2020-01-01 -2019-12-30 +2020-01-01 00:00:00 +2020-01-01 00:00:00 +2020-01-01 00:00:00 +2019-12-30 00:00:00 2020-01-01 00:00:00 diff --git a/tests/queries/0_stateless/00475_in_join_db_table.sql b/tests/queries/0_stateless/00475_in_join_db_table.sql index 5f90d1080154..6aea40be3445 100644 --- a/tests/queries/0_stateless/00475_in_join_db_table.sql +++ b/tests/queries/0_stateless/00475_in_join_db_table.sql @@ -13,11 +13,11 @@ DROP TABLE set; DROP TABLE IF EXISTS join; CREATE TABLE join (k UInt8, x String) ENGINE = Memory; INSERT INTO join VALUES (1, 'hello'); -SELECT k, x FROM (SELECT arrayJoin([1, 2]) AS k) js1 ANY LEFT JOIN join USING k; +SELECT k, x FROM (SELECT arrayJoin([1, 2]) AS k) js1 ANY LEFT JOIN join USING k ORDER BY k; DROP TABLE join; CREATE TABLE join (k UInt8, x String) ENGINE = Join(ANY, LEFT, k); INSERT INTO join VALUES (1, 'hello'); -SELECT k, x FROM (SELECT arrayJoin([1, 2]) AS k) js1 ANY LEFT JOIN join USING k; +SELECT k, x FROM (SELECT arrayJoin([1, 2]) AS k) js1 ANY LEFT JOIN join USING k ORDER BY k; DROP TABLE join; diff --git a/tests/queries/0_stateless/00700_decimal_aggregates.reference b/tests/queries/0_stateless/00700_decimal_aggregates.reference index acf41546f5c4..79195312867b 100644 --- a/tests/queries/0_stateless/00700_decimal_aggregates.reference +++ b/tests/queries/0_stateless/00700_decimal_aggregates.reference @@ -5,7 +5,7 @@ -1275 -424.99999983 -255 -1275 -424.99999983 -255 101 101 101 101 101 101 -101 -101 -101 -101 -101 -101 -(101,101,101) (101,101,101) (101,101,101) (101,101,101) (102,100,101) +(101,101,101) (101,101,101) (101,101,101) (101,101,101) (1,1,1,1,1,1) 5 5 5 10 10 10 -50 -50 -16.66666666 -16.66666666 -10 -10 diff --git a/tests/queries/0_stateless/00700_decimal_aggregates.sql b/tests/queries/0_stateless/00700_decimal_aggregates.sql index a1814fc866fa..6ca37e069180 100644 --- a/tests/queries/0_stateless/00700_decimal_aggregates.sql +++ b/tests/queries/0_stateless/00700_decimal_aggregates.sql @@ -24,7 +24,7 @@ SELECT (uniq(a), uniq(b), uniq(c)), (uniqCombined(a), uniqCombined(b), uniqCombined(c)), (uniqCombined(17)(a), uniqCombined(17)(b), uniqCombined(17)(c)), (uniqExact(a), uniqExact(b), uniqExact(c)), - (uniqHLL12(a), uniqHLL12(b), uniqHLL12(c)) + (102 - uniqHLL12(a) >= 0, 102 - uniqHLL12(b) >= 0, 102 - uniqHLL12(c) >= 0, uniqHLL12(a) - 99 >= 0, uniqHLL12(b) - 99 >= 0, uniqHLL12(c) - 99 >= 0) FROM (SELECT * FROM decimal ORDER BY a); SELECT uniqUpTo(10)(a), uniqUpTo(10)(b), uniqUpTo(10)(c) FROM decimal WHERE a >= 0 AND a < 5; diff --git a/tests/queries/0_stateless/00725_join_on_bug_2.sql b/tests/queries/0_stateless/00725_join_on_bug_2.sql index 14fedfa14e76..7eaddab24b73 100644 --- a/tests/queries/0_stateless/00725_join_on_bug_2.sql +++ b/tests/queries/0_stateless/00725_join_on_bug_2.sql @@ -9,17 +9,17 @@ insert into t_00725_2 values(2,2); create table s_00725_2(a Int64, b Int64) engine = TinyLog; insert into s_00725_2 values(1,1); -select a, b, s_a, s_b from t_00725_2 all left join (select a,b,a s_a, b s_b from s_00725_2) using (a,b); +select a, b, s_a, s_b from t_00725_2 all left join (select a,b,a s_a, b s_b from s_00725_2) using (a,b) order by a; select '-'; -select t_00725_2.*, s_00725_2.* from t_00725_2 all left join s_00725_2 using (a,b); +select t_00725_2.*, s_00725_2.* from t_00725_2 all left join s_00725_2 using (a,b) order by t_00725_2.a; select '-'; -select a,b,s_a,s_b from t_00725_2 all left join (select a, b, a s_a, b s_b from s_00725_2) s_00725_2 on (s_00725_2.a = t_00725_2.a and s_00725_2.b = t_00725_2.b); +select a,b,s_a,s_b from t_00725_2 all left join (select a, b, a s_a, b s_b from s_00725_2) s_00725_2 on (s_00725_2.a = t_00725_2.a and s_00725_2.b = t_00725_2.b) order by a; select '-'; -select * from t_00725_2 all left join (select a s_a, b s_b from s_00725_2) on (s_a = t_00725_2.a and s_b = t_00725_2.b); +select * from t_00725_2 all left join (select a s_a, b s_b from s_00725_2) on (s_a = t_00725_2.a and s_b = t_00725_2.b) order by t_00725_2.a; select '-'; -select a,b,s_a,s_b from t_00725_2 all left join (select a,b, a s_a, b s_b from s_00725_2) on (s_a = t_00725_2.a and s_b = t_00725_2.b); +select a,b,s_a,s_b from t_00725_2 all left join (select a,b, a s_a, b s_b from s_00725_2) on (s_a = t_00725_2.a and s_b = t_00725_2.b) order by a; select '-'; -select t_00725_2.*, s_00725_2.* from t_00725_2 all left join s_00725_2 on (s_00725_2.a = t_00725_2.a and s_00725_2.b = t_00725_2.b); +select t_00725_2.*, s_00725_2.* from t_00725_2 all left join s_00725_2 on (s_00725_2.a = t_00725_2.a and s_00725_2.b = t_00725_2.b) order by t_00725_2.a; drop table if exists t_00725_2; drop table if exists s_00725_2; diff --git a/tests/queries/0_stateless/00800_low_cardinality_join.sql b/tests/queries/0_stateless/00800_low_cardinality_join.sql index 9c1fd9b7ad3e..e0f284102ae4 100644 --- a/tests/queries/0_stateless/00800_low_cardinality_join.sql +++ b/tests/queries/0_stateless/00800_low_cardinality_join.sql @@ -22,10 +22,10 @@ select * from (select toLowCardinality(toNullable(dummy)) as val from system.one select * from (select toLowCardinality(dummy) as val from system.one) any left join (select toLowCardinality(toNullable(dummy)) as rval from system.one) on val + 0 = rval * 1; select * from (select toLowCardinality(toNullable(dummy)) as val from system.one) any left join (select toLowCardinality(toNullable(dummy)) as rval from system.one) on val + 0 = rval * 1; select '-'; -select * from (select number as l from system.numbers limit 3) any left join (select number as r from system.numbers limit 3) on l + 1 = r * 1; -select * from (select toLowCardinality(number) as l from system.numbers limit 3) any left join (select number as r from system.numbers limit 3) on l + 1 = r * 1; -select * from (select number as l from system.numbers limit 3) any left join (select toLowCardinality(number) as r from system.numbers limit 3) on l + 1 = r * 1; -select * from (select toLowCardinality(number) as l from system.numbers limit 3) any left join (select toLowCardinality(number) as r from system.numbers limit 3) on l + 1 = r * 1; -select * from (select toLowCardinality(toNullable(number)) as l from system.numbers limit 3) any left join (select toLowCardinality(number) as r from system.numbers limit 3) on l + 1 = r * 1; -select * from (select toLowCardinality(number) as l from system.numbers limit 3) any left join (select toLowCardinality(toNullable(number)) as r from system.numbers limit 3) on l + 1 = r * 1; -select * from (select toLowCardinality(toNullable(number)) as l from system.numbers limit 3) any left join (select toLowCardinality(toNullable(number)) as r from system.numbers limit 3) on l + 1 = r * 1; +select * from (select number as l from system.numbers limit 3) any left join (select number as r from system.numbers limit 3) on l + 1 = r * 1 order by l; +select * from (select toLowCardinality(number) as l from system.numbers limit 3) any left join (select number as r from system.numbers limit 3) on l + 1 = r * 1 order by l; +select * from (select number as l from system.numbers limit 3) any left join (select toLowCardinality(number) as r from system.numbers limit 3) on l + 1 = r * 1 order by l; +select * from (select toLowCardinality(number) as l from system.numbers limit 3) any left join (select toLowCardinality(number) as r from system.numbers limit 3) on l + 1 = r * 1 order by l; +select * from (select toLowCardinality(toNullable(number)) as l from system.numbers limit 3) any left join (select toLowCardinality(number) as r from system.numbers limit 3) on l + 1 = r * 1 order by l; +select * from (select toLowCardinality(number) as l from system.numbers limit 3) any left join (select toLowCardinality(toNullable(number)) as r from system.numbers limit 3) on l + 1 = r * 1 order by l; +select * from (select toLowCardinality(toNullable(number)) as l from system.numbers limit 3) any left join (select toLowCardinality(toNullable(number)) as r from system.numbers limit 3) on l + 1 = r * 1 order by l; diff --git a/tests/queries/0_stateless/00818_inner_join_bug_3567.reference b/tests/queries/0_stateless/00818_inner_join_bug_3567.reference index 41e0d8ea43aa..e640e676a67e 100644 --- a/tests/queries/0_stateless/00818_inner_join_bug_3567.reference +++ b/tests/queries/0_stateless/00818_inner_join_bug_3567.reference @@ -1,19 +1,11 @@ -┌─a─┬──────────b─┐ -│ a │ 2018-01-01 │ -│ b │ 2018-01-01 │ -│ c │ 2018-01-01 │ -└───┴────────────┘ -┌─c─┬─table2.a─┬──────────d─┬─a─┬──────────b─┐ -│ B │ b │ 2018-01-01 │ B │ 2018-01-01 │ -│ C │ c │ 2018-01-01 │ C │ 2018-01-01 │ -│ D │ d │ 2018-01-01 │ D │ 2018-01-01 │ -└───┴──────────┴────────────┴───┴────────────┘ -┌─a─┬──────────b─┬─c─┬──────────d─┬─c─┐ -│ a │ 2018-01-01 │ │ 1970-01-01 │ │ -│ b │ 2018-01-01 │ B │ 2018-01-01 │ B │ -│ c │ 2018-01-01 │ C │ 2018-01-01 │ C │ -└───┴────────────┴───┴────────────┴───┘ -┌─a─┬──────────b─┬─c─┬──────────d─┬─c─┐ -│ b │ 2018-01-01 │ B │ 2018-01-01 │ B │ -│ c │ 2018-01-01 │ C │ 2018-01-01 │ C │ -└───┴────────────┴───┴────────────┴───┘ +a 2018-01-01 +b 2018-01-01 +c 2018-01-01 +B b 2018-01-01 B 2018-01-01 +C c 2018-01-01 C 2018-01-01 +D d 2018-01-01 D 2018-01-01 +a 2018-01-01 1970-01-01 +b 2018-01-01 B 2018-01-01 B +c 2018-01-01 C 2018-01-01 C +b 2018-01-01 B 2018-01-01 B +c 2018-01-01 C 2018-01-01 C diff --git a/tests/queries/0_stateless/00818_inner_join_bug_3567.sql b/tests/queries/0_stateless/00818_inner_join_bug_3567.sql index cc0b63f9def5..c3cc9ebe31fb 100644 --- a/tests/queries/0_stateless/00818_inner_join_bug_3567.sql +++ b/tests/queries/0_stateless/00818_inner_join_bug_3567.sql @@ -9,10 +9,10 @@ CREATE TABLE table2(c String, a String, d Date) ENGINE MergeTree order by c; INSERT INTO table1 VALUES ('a', '2018-01-01') ('b', '2018-01-01') ('c', '2018-01-01'); INSERT INTO table2 VALUES ('D', 'd', '2018-01-01') ('B', 'b', '2018-01-01') ('C', 'c', '2018-01-01'); -SELECT * FROM table1 t1 FORMAT PrettyCompact; -SELECT *, c as a, d as b FROM table2 FORMAT PrettyCompact; -SELECT * FROM table1 t1 ALL LEFT JOIN (SELECT *, c, d as b FROM table2) t2 USING (a, b) ORDER BY d, t1.a FORMAT PrettyCompact; -SELECT * FROM table1 t1 ALL INNER JOIN (SELECT *, c, d as b FROM table2) t2 USING (a, b) ORDER BY d, t1.a FORMAT PrettyCompact; +SELECT * FROM table1 t1; +SELECT *, c as a, d as b FROM table2; +SELECT * FROM table1 t1 ALL LEFT JOIN (SELECT *, c, d as b FROM table2) t2 USING (a, b) ORDER BY d, t1.a; +SELECT * FROM table1 t1 ALL INNER JOIN (SELECT *, c, d as b FROM table2) t2 USING (a, b) ORDER BY d, t1.a; DROP TABLE table1; DROP TABLE table2; diff --git a/tests/queries/0_stateless/00820_multiple_joins.reference b/tests/queries/0_stateless/00820_multiple_joins.reference index 3d7054bacfc9..b2d3c1cef742 100644 --- a/tests/queries/0_stateless/00820_multiple_joins.reference +++ b/tests/queries/0_stateless/00820_multiple_joins.reference @@ -1,51 +1,58 @@ +-- query 1 -- 0 0 0 6 60 600 12 120 1200 18 180 1800 +-- query 2 -- 0 0 0 10 100 1000 20 200 2000 -┌─t1.a─┬─t2.a─┬─t2.b─┬─t3.b─┬─t3.c─┬─t5.a─┬─t5.b─┬─t5.c─┐ -│ 0 │ 0 │ 0 │ 0 │ 0 │ 0 │ 0 │ 0 │ -└──────┴──────┴──────┴──────┴──────┴──────┴──────┴──────┘ +-- query 3 -- +0 0 0 0 0 0 0 0 +-- query 4 -- 0 0 0 0 6 6 60 60 12 12 120 120 18 18 180 180 +-- query 5 -- 0 0 0 0 6 6 60 60 12 12 120 120 18 18 180 180 +-- query 6 -- 0 0 0 0 6 6 60 60 12 12 120 120 18 18 180 180 +-- query 7 -- 0 0 0 0 6 6 60 60 12 12 120 120 18 18 180 180 +-- query 8 -- 0 0 0 0 6 6 60 60 12 12 120 120 18 18 180 180 +-- query 9 -- 0 0 0 0 6 6 60 60 12 12 120 120 18 18 180 180 -┌─t1.a─┬─t2.a─┬─t2.b─┬─t3.b─┬────c─┐ -│ 0 │ 0 │ 0 │ 0 │ 0 │ -│ 6 │ 6 │ 60 │ 60 │ 600 │ -│ 12 │ 12 │ 120 │ 120 │ 1200 │ -│ 18 │ 18 │ 180 │ 180 │ 1800 │ -└──────┴──────┴──────┴──────┴──────┘ -┌─t1.a─┬─t2.a─┬─t2.b─┬─t3.b─┬────c─┐ -│ 0 │ 0 │ 0 │ 0 │ 0 │ -│ 6 │ 6 │ 60 │ 60 │ 600 │ -│ 12 │ 12 │ 120 │ 120 │ 1200 │ -│ 18 │ 18 │ 180 │ 180 │ 1800 │ -└──────┴──────┴──────┴──────┴──────┘ +-- query 10 -- +0 0 0 0 0 +6 6 60 60 600 +12 12 120 120 1200 +18 18 180 180 1800 +-- query 11 -- +0 0 0 0 0 +6 6 60 60 600 +12 12 120 120 1200 +18 18 180 180 1800 +-- query 12 -- 0 0 0 0 0 0 0 6 6 60 60 66 66 120 12 12 120 120 132 132 240 18 18 180 180 198 198 360 +-- query 13 -- 1 diff --git a/tests/queries/0_stateless/00820_multiple_joins.sql b/tests/queries/0_stateless/00820_multiple_joins.sql index 5c7a7bebb0be..cf849c7ef9ce 100644 --- a/tests/queries/0_stateless/00820_multiple_joins.sql +++ b/tests/queries/0_stateless/00820_multiple_joins.sql @@ -16,17 +16,21 @@ INSERT INTO table2 SELECT number * 2, number * 20 FROM numbers(11); INSERT INTO table3 SELECT number * 30, number * 300 FROM numbers(10); INSERT INTO table5 SELECT number * 5, number * 50, number * 500 FROM numbers(10); +select '-- query 1 --'; select t1.a, t2.b, t3.c from table1 as t1 join table2 as t2 on t1.a = t2.a join table3 as t3 on t2.b = t3.b ORDER BY t1.a; +select '-- query 2 --'; select t1.a, t2.b, t5.c from table1 as t1 join table2 as t2 on t1.a = t2.a join table5 as t5 on t1.a = t5.a AND t2.b = t5.b ORDER BY t1.a; +select '-- query 3 --'; select t1.a, t2.a, t2.b, t3.b, t3.c, t5.a, t5.b, t5.c from table1 as t1 join table2 as t2 on t1.a = t2.a join table3 as t3 on t2.b = t3.b join table5 as t5 on t3.c = t5.c ORDER BY t1.a -FORMAT PrettyCompactNoEscapes; +; +select '-- query 4 --'; select t1.a as t1_a, t2.a as t2_a, t2.b as t2_b, t3.b as t3_b from table1 as t1 join table2 as t2 on t1_a = t2_a @@ -34,6 +38,7 @@ join table3 as t3 on t2_b = t3_b ORDER BY t1.a ; +select '-- query 5 --'; select t1.a as t1_a, t2.a as t2_a, t2.b as t2_b, t3.b as t3_b from table1 as t1 join table2 as t2 on t1.a = t2.a @@ -41,6 +46,7 @@ join table3 as t3 on t2.b = t3.b ORDER BY t1.a ; +select '-- query 6 --'; select t1.a as t1_a, t2.a as t2_a, t2.b as t2_b, t3.b as t3_b from table1 as t1 join table2 as t2 on table1.a = table2.a @@ -48,6 +54,7 @@ join table3 as t3 on table2.b = table3.b ORDER BY t1.a ; +select '-- query 7 --'; select t1.a, t2.a, t2.b, t3.b from table1 as t1 join table2 as t2 on table1.a = table2.a @@ -55,6 +62,7 @@ join table3 as t3 on table2.b = table3.b ORDER BY t1.a ; +select '-- query 8 --'; select t1.a, t2.a, t2.b, t3.b from table1 as t1 join table2 as t2 on t1.a = t2.a @@ -62,6 +70,7 @@ join table3 as t3 on t2.b = t3.b ORDER BY t1.a ; +select '-- query 9 --'; select table1.a, table2.a, table2.b, table3.b from table1 as t1 join table2 as t2 on table1.a = table2.a @@ -69,20 +78,23 @@ join table3 as t3 on table2.b = table3.b ORDER BY t1.a ; +select '-- query 10 --'; select t1.*, t2.*, t3.* from table1 as t1 join table2 as t2 on table1.a = table2.a join table3 as t3 on table2.b = table3.b ORDER BY t1.a -FORMAT PrettyCompactNoEscapes; +; +select '-- query 11 --'; select * from table1 as t1 join table2 as t2 on t1.a = t2.a join table3 as t3 on t2.b = t3.b ORDER BY t1.a -FORMAT PrettyCompactNoEscapes; +; +select '-- query 12 --'; select t1.a as t1_a, t2.a as t2_a, t2.b as t2_b, t3.b as t3_b, (t1.a + table2.b) as t1_t2_x, (table1.a + table3.b) as t1_t3_x, (t2.b + t3.b) as t2_t3_x from table1 as t1 @@ -94,6 +106,7 @@ ORDER BY t1.a CREATE TABLE table_set ( x UInt32 ) ENGINE = Set; INSERT INTO table_set VALUES (0), (1), (2); +select '-- query 13 --'; select count() from table1 as t1 join table2 as t2 on t1.a = t2.a diff --git a/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.reference b/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.reference index 11755d6bc8b2..003d4822d329 100644 --- a/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.reference +++ b/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.reference @@ -1,49 +1,55 @@ +-- query 1 -- 0 0 0 6 60 600 12 120 1200 18 180 1800 +-- query 2 -- 0 0 0 10 100 1000 20 200 2000 -┌─t1.a─┬─t2.a─┬─t2.b─┬─t3.b─┬─t3.c─┬─t5.a─┬─t5.b─┬─t5.c─┐ -│ 0 │ 0 │ 0 │ 0 │ 0 │ 0 │ 0 │ 0 │ -└──────┴──────┴──────┴──────┴──────┴──────┴──────┴──────┘ +-- query 3 -- +0 0 0 0 0 0 0 0 +-- query 3 -- 0 0 0 0 6 6 60 60 12 12 120 120 18 18 180 180 +-- query 4 -- 0 0 0 0 6 6 60 60 12 12 120 120 18 18 180 180 +-- query 5 -- 0 0 0 0 6 6 60 60 12 12 120 120 18 18 180 180 +-- query 6 -- 0 0 0 0 6 6 60 60 12 12 120 120 18 18 180 180 +-- query 7 -- 0 0 0 0 6 6 60 60 12 12 120 120 18 18 180 180 +-- query 8 -- 0 0 0 0 6 6 60 60 12 12 120 120 18 18 180 180 -┌─t1.a─┬─t2.a─┬─t2.b─┬─t3.b─┬────c─┐ -│ 0 │ 0 │ 0 │ 0 │ 0 │ -│ 6 │ 6 │ 60 │ 60 │ 600 │ -│ 12 │ 12 │ 120 │ 120 │ 1200 │ -│ 18 │ 18 │ 180 │ 180 │ 1800 │ -└──────┴──────┴──────┴──────┴──────┘ -┌─t1.a─┬─t2.a─┬─t2.b─┬─t3.b─┬────c─┐ -│ 0 │ 0 │ 0 │ 0 │ 0 │ -│ 6 │ 6 │ 60 │ 60 │ 600 │ -│ 12 │ 12 │ 120 │ 120 │ 1200 │ -│ 18 │ 18 │ 180 │ 180 │ 1800 │ -└──────┴──────┴──────┴──────┴──────┘ +-- query 9 -- +0 0 0 0 0 +6 6 60 60 600 +12 12 120 120 1200 +18 18 180 180 1800 +-- query 10 -- +0 0 0 0 0 +6 6 60 60 600 +12 12 120 120 1200 +18 18 180 180 1800 +-- query 11 -- 0 0 0 0 0 0 0 6 6 60 60 66 66 120 12 12 120 120 132 132 240 diff --git a/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql b/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql index 3da2cad4effb..7e9fbe0776e8 100644 --- a/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql +++ b/tests/queries/0_stateless/00820_multiple_joins_subquery_requires_alias.sql @@ -17,67 +17,76 @@ INSERT INTO table5 SELECT number * 5, number * 50, number * 500 FROM numbers(10) SET joined_subquery_requires_alias = 1; +select '-- query 1 --'; select t1.a, t2.b, t3.c from table1 as t1 join table2 as t2 on t1.a = t2.a join table3 as t3 on t2.b = t3.b ORDER BY t1.a; +select '-- query 2 --'; select t1.a, t2.b, t5.c from table1 as t1 join table2 as t2 on t1.a = t2.a join table5 as t5 on t1.a = t5.a AND t2.b = t5.b ORDER BY t1.a; +select '-- query 3 --'; select t1.a, t2.a, t2.b, t3.b, t3.c, t5.a, t5.b, t5.c from table1 as t1 join table2 as t2 on t1.a = t2.a join table3 as t3 on t2.b = t3.b join table5 as t5 on t3.c = t5.c -ORDER BY t1.a -FORMAT PrettyCompactNoEscapes; +ORDER BY t1.a; +select '-- query 3 --'; select t1.a as t1_a, t2.a as t2_a, t2.b as t2_b, t3.b as t3_b from table1 as t1 join table2 as t2 on t1_a = t2_a join table3 as t3 on t2_b = t3_b ORDER BY t1.a; +select '-- query 4 --'; select t1.a as t1_a, t2.a as t2_a, t2.b as t2_b, t3.b as t3_b from table1 as t1 join table2 as t2 on t1.a = t2.a join table3 as t3 on t2.b = t3.b ORDER BY t1.a; +select '-- query 5 --'; select t1.a as t1_a, t2.a as t2_a, t2.b as t2_b, t3.b as t3_b from table1 as t1 join table2 as t2 on table1.a = table2.a join table3 as t3 on table2.b = table3.b ORDER BY t1.a; +select '-- query 6 --'; select t1.a, t2.a, t2.b, t3.b from table1 as t1 join table2 as t2 on table1.a = table2.a join table3 as t3 on table2.b = table3.b ORDER BY t1.a; +select '-- query 7 --'; select t1.a, t2.a, t2.b, t3.b from table1 as t1 join table2 as t2 on t1.a = t2.a join table3 as t3 on t2.b = t3.b ORDER BY t1.a; +select '-- query 8 --'; select table1.a, table2.a, table2.b, table3.b from table1 as t1 join table2 as t2 on table1.a = table2.a join table3 as t3 on table2.b = table3.b ORDER BY t1.a; +select '-- query 9 --'; select t1.*, t2.*, t3.* from table1 as t1 join table2 as t2 on table1.a = table2.a join table3 as t3 on table2.b = table3.b -ORDER BY t1.a -FORMAT PrettyCompactNoEscapes; +ORDER BY t1.a; +select '-- query 10 --'; select * from table1 as t1 join table2 as t2 on t1.a = t2.a join table3 as t3 on t2.b = t3.b -ORDER BY t1.a -FORMAT PrettyCompactNoEscapes; +ORDER BY t1.a; +select '-- query 11 --'; select t1.a as t1_a, t2.a as t2_a, t2.b as t2_b, t3.b as t3_b, (t1.a + table2.b) as t1_t2_x, (table1.a + table3.b) as t1_t3_x, (t2.b + t3.b) as t2_t3_x from table1 as t1 diff --git a/tests/queries/0_stateless/00826_cross_to_inner_join.sql b/tests/queries/0_stateless/00826_cross_to_inner_join.sql index e9f9e13e2d39..b72cdfc17951 100644 --- a/tests/queries/0_stateless/00826_cross_to_inner_join.sql +++ b/tests/queries/0_stateless/00826_cross_to_inner_join.sql @@ -15,9 +15,9 @@ INSERT INTO t2_00826 values (1,1), (1,2); INSERT INTO t2_00826 (a) values (2), (3); SELECT '--- cross ---'; -SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a; +SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a order by t1_00826.a; SELECT '--- cross nullable ---'; -SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.b = t2_00826.b; +SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.b = t2_00826.b ORDER BY t1_00826.a; SELECT '--- cross nullable vs not nullable ---'; SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.b ORDER BY t1_00826.a; SELECT '--- cross self ---'; @@ -44,11 +44,11 @@ SELECT '--- do not rewrite alias ---'; SELECT a as b FROM t1_00826 cross join t2_00826 where t1_00826.b = t2_00826.a AND b > 0; SELECT '--- comma ---'; -SELECT * FROM t1_00826, t2_00826 where t1_00826.a = t2_00826.a; +SELECT * FROM t1_00826, t2_00826 where t1_00826.a = t2_00826.a ORDER BY t1_00826.a, t1_00826.b, t2_00826.b; SELECT '--- comma nullable ---'; -SELECT * FROM t1_00826, t2_00826 where t1_00826.b = t2_00826.b; +SELECT * FROM t1_00826, t2_00826 where t1_00826.b = t2_00826.b ORDER BY t1_00826.a; SELECT '--- comma and or ---'; -SELECT * FROM t1_00826, t2_00826 where t1_00826.a = t2_00826.a AND (t2_00826.b IS NULL OR t2_00826.b < 2); +SELECT * FROM t1_00826, t2_00826 where t1_00826.a = t2_00826.a AND (t2_00826.b IS NULL OR t2_00826.b < 2) ORDER BY t1_00826.a; SELECT '--- cross ---'; diff --git a/tests/queries/0_stateless/00845_join_on_aliases.sql b/tests/queries/0_stateless/00845_join_on_aliases.sql index 0800d27caa68..84a43f85049c 100644 --- a/tests/queries/0_stateless/00845_join_on_aliases.sql +++ b/tests/queries/0_stateless/00845_join_on_aliases.sql @@ -9,33 +9,40 @@ INSERT INTO table2 SELECT number * 2, number * 20 FROM numbers(6); select t1.a t1_a, t2.a from table1 as t1 -join table2 as t2 on table1.a = table2.a and t1.a = table2.a and t1_a = table2.a; +join table2 as t2 on table1.a = table2.a and t1.a = table2.a and t1_a = table2.a +order by t1.a, t2.a; select t1.a t1_a, t2.a from table1 as t1 -join table2 as t2 on table1.a = t2.a and t1.a = t2.a and t1_a = t2.a; +join table2 as t2 on table1.a = t2.a and t1.a = t2.a and t1_a = t2.a +order by t1.a, t2.a; select t1.a as t1_a, t2.a t2_a from table1 as t1 -join table2 as t2 on table1.a = t2_a and t1.a = t2_a and t1_a = t2_a; +join table2 as t2 on table1.a = t2_a and t1.a = t2_a and t1_a = t2_a +order by t1.a, t2.a; select t1.a t1_a, t2.a from table1 as t1 -join table2 as t2 on table1.a = table2.a and t1.a = t2.a and t1_a = t2.a; +join table2 as t2 on table1.a = table2.a and t1.a = t2.a and t1_a = t2.a +order by t1.a, t2.a; select t1.a t1_a, t2.a as t2_a from table1 as t1 -join table2 as t2 on table1.a = table2.a and t1.a = t2.a and t1_a = t2_a; +join table2 as t2 on table1.a = table2.a and t1.a = t2.a and t1_a = t2_a +order by t1.a, t2.a; select * from table1 as t1 join table2 as t2 on t1_a = t2_a -where (table1.a as t1_a) > 4 and (table2.a as t2_a) > 2; +where (table1.a as t1_a) > 4 and (table2.a as t2_a) > 2 +order by t1.a, t2.a; select t1.*, t2.* from table1 as t1 join table2 as t2 on t1_a = t2_a -where (t1.a as t1_a) > 2 and (t2.a as t2_a) > 4; +where (t1.a as t1_a) > 2 and (t2.a as t2_a) > 4 +order by t1.a, t2.a; DROP TABLE table1; DROP TABLE table2; diff --git a/tests/queries/0_stateless/00847_multiple_join_same_column.reference b/tests/queries/0_stateless/00847_multiple_join_same_column.reference index 91bd62ca5a3d..ecb4c8dbc5e3 100644 --- a/tests/queries/0_stateless/00847_multiple_join_same_column.reference +++ b/tests/queries/0_stateless/00847_multiple_join_same_column.reference @@ -15,31 +15,17 @@ s.a: 0 s.b: 0 y.a: 0 y.b: 0 -┌─t.a─┬─s.b─┬─s.a─┬─s.b─┬─y.a─┬─y.b─┐ -│ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ -│ 2 │ 0 │ 0 │ 0 │ 0 │ 0 │ -└─────┴─────┴─────┴─────┴─────┴─────┘ -┌─t_a─┐ -│ 1 │ -│ 2 │ -└─────┘ -┌─t.a─┬─s_a─┐ -│ 1 │ 1 │ -│ 2 │ 0 │ -└─────┴─────┘ -┌─t.a─┬─t.a─┬─t_b─┐ -│ 1 │ 1 │ 1 │ -│ 2 │ 2 │ 2 │ -└─────┴─────┴─────┘ -┌─s.a─┬─s.a─┬─s_b─┬─s.b─┐ -│ 1 │ 1 │ 1 │ 1 │ -│ 0 │ 0 │ 0 │ 0 │ -└─────┴─────┴─────┴─────┘ -┌─y.a─┬─y.a─┬─y_b─┬─y.b─┐ -│ 1 │ 1 │ 1 │ 1 │ -│ 0 │ 0 │ 0 │ 0 │ -└─────┴─────┴─────┴─────┘ -┌─t.a─┬─t_a─┬─s.a─┬─s_a─┬─y.a─┬─y_a─┐ -│ 1 │ 1 │ 1 │ 1 │ 1 │ 1 │ -│ 2 │ 2 │ 0 │ 0 │ 0 │ 0 │ -└─────┴─────┴─────┴─────┴─────┴─────┘ +1 1 1 1 1 1 +2 0 0 0 0 0 +1 +2 +1 1 +2 0 +1 1 1 +2 2 2 +1 1 1 1 +0 0 0 0 +1 1 1 1 +0 0 0 0 +1 1 1 1 1 1 +2 2 0 0 0 0 diff --git a/tests/queries/0_stateless/00847_multiple_join_same_column.sql b/tests/queries/0_stateless/00847_multiple_join_same_column.sql index c7f0c6383c20..eae88610e181 100644 --- a/tests/queries/0_stateless/00847_multiple_join_same_column.sql +++ b/tests/queries/0_stateless/00847_multiple_join_same_column.sql @@ -19,43 +19,36 @@ format Vertical; select t.a, s.b, s.a, s.b, y.a, y.b from t left join s on (t.a = s.a and s.b = t.b) left join y on (y.a = s.a and y.b = s.b) -order by t.a -format PrettyCompactNoEscapes; +order by t.a; select t.a as t_a from t left join s on s.a = t_a -order by t.a -format PrettyCompactNoEscapes; +order by t.a; select t.a, s.a as s_a from t left join s on s.a = t.a left join y on y.b = s.b -order by t.a -format PrettyCompactNoEscapes; +order by t.a; select t.a, t.a, t.b as t_b from t left join s on t.a = s.a left join y on y.b = s.b -order by t.a -format PrettyCompactNoEscapes; +order by t.a; select s.a, s.a, s.b as s_b, s.b from t left join s on s.a = t.a left join y on s.b = y.b -order by t.a -format PrettyCompactNoEscapes; +order by t.a; select y.a, y.a, y.b as y_b, y.b from t left join s on s.a = t.a left join y on y.b = s.b -order by t.a -format PrettyCompactNoEscapes; +order by t.a; select t.a, t.a as t_a, s.a, s.a as s_a, y.a, y.a as y_a from t left join s on t.a = s.a left join y on y.b = s.b -order by t.a -format PrettyCompactNoEscapes; +order by t.a; drop table t; drop table s; diff --git a/tests/queries/0_stateless/00860_unknown_identifier_bug.sql b/tests/queries/0_stateless/00860_unknown_identifier_bug.sql index bbcd3de8f20e..a146948b2a75 100644 --- a/tests/queries/0_stateless/00860_unknown_identifier_bug.sql +++ b/tests/queries/0_stateless/00860_unknown_identifier_bug.sql @@ -34,6 +34,7 @@ LEFT JOIN FROM appointment_events WHERE _status in ('Created', 'Transferred') GROUP BY _appointment_id ) B USING _appointment_id -WHERE A._set_at = B.max_set_at; +WHERE A._set_at = B.max_set_at +ORDER BY A._appointment_id; DROP TABLE appointment_events; diff --git a/tests/queries/0_stateless/00863_comma_join_in.sql b/tests/queries/0_stateless/00863_comma_join_in.sql index ebccd351c8a1..6979b0e6ae7b 100644 --- a/tests/queries/0_stateless/00863_comma_join_in.sql +++ b/tests/queries/0_stateless/00863_comma_join_in.sql @@ -10,13 +10,14 @@ insert into test1_00863 (id, code) select number, toString(number) FROM numbers( insert into test3_00863 (id, code) select number, toString(number) FROM numbers(100000); insert into test2_00863 (id, code, test1_id, test3_id) select number, toString(number), number, number FROM numbers(100000); -SET max_memory_usage = 50000000; +SET max_memory_usage = 100000000; select test2_00863.id from test1_00863, test2_00863, test3_00863 where test1_00863.code in ('1', '2', '3') and test2_00863.test1_id = test1_00863.id - and test2_00863.test3_id = test3_00863.id; + and test2_00863.test3_id = test3_00863.id +order by test2_00863.id; drop table test1_00863; drop table test2_00863; diff --git a/tests/queries/0_stateless/00921_datetime64_compatibility_long.reference b/tests/queries/0_stateless/00921_datetime64_compatibility_long.reference index 62de3a149a75..4f964f2478f7 100644 --- a/tests/queries/0_stateless/00921_datetime64_compatibility_long.reference +++ b/tests/queries/0_stateless/00921_datetime64_compatibility_long.reference @@ -135,13 +135,13 @@ Code: 43 ------------------------------------------ SELECT date_trunc(\'year\', N, \'Asia/Istanbul\') Code: 43 -"Date","2019-01-01" -"Date","2019-01-01" +"DateTime('Asia/Istanbul')","2019-01-01 00:00:00" +"DateTime('Asia/Istanbul')","2019-01-01 00:00:00" ------------------------------------------ SELECT date_trunc(\'month\', N, \'Asia/Istanbul\') Code: 43 -"Date","2019-09-01" -"Date","2019-09-01" +"DateTime('Asia/Istanbul')","2019-09-01 00:00:00" +"DateTime('Asia/Istanbul')","2019-09-01 00:00:00" ------------------------------------------ SELECT date_trunc(\'day\', N, \'Asia/Istanbul\') "DateTime('Asia/Istanbul')","2019-09-16 00:00:00" diff --git a/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql b/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql index f70bccd68fdf..af37e1de3e81 100644 --- a/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql +++ b/tests/queries/0_stateless/00956_join_use_nulls_with_array_column.sql @@ -1,3 +1,3 @@ SET join_use_nulls = 1; -SELECT number FROM system.numbers SEMI LEFT JOIN (SELECT number, ['test'] FROM system.numbers LIMIT 1) js2 USING (number) LIMIT 1; -SELECT number FROM system.numbers ANY LEFT JOIN (SELECT number, ['test'] FROM system.numbers LIMIT 1) js2 USING (number) LIMIT 1; +SELECT number FROM (SELECT number from system.numbers LIMIT 10) as js1 SEMI LEFT JOIN (SELECT number, ['test'] FROM system.numbers LIMIT 1) js2 USING (number) LIMIT 1; +SELECT number FROM (SELECT number from system.numbers LIMIT 10) as js1 ANY LEFT JOIN (SELECT number, ['test'] FROM system.numbers LIMIT 1) js2 USING (number) ORDER BY number LIMIT 1; diff --git a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh index d49f63e143d8..5b1c50262bf6 100755 --- a/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh +++ b/tests/queries/0_stateless/00992_system_parts_race_condition_zookeeper_long.sh @@ -13,8 +13,8 @@ $CLICKHOUSE_CLIENT -n -q " DROP TABLE IF EXISTS alter_table0; DROP TABLE IF EXISTS alter_table1; - CREATE TABLE alter_table0 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, replicated_max_mutations_in_one_entry = $(($RANDOM / 50)); - CREATE TABLE alter_table1 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r2') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, replicated_max_mutations_in_one_entry = $(($RANDOM / 50)); + CREATE TABLE alter_table0 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r1') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, replicated_max_mutations_in_one_entry = $(($RANDOM / 50 + 100)); + CREATE TABLE alter_table1 (a UInt8, b Int16, c Float32, d String, e Array(UInt8), f Nullable(UUID), g Tuple(UInt8, UInt16)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/alter_table', 'r2') ORDER BY a PARTITION BY b % 10 SETTINGS old_parts_lifetime = 1, cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, replicated_max_mutations_in_one_entry = $(($RANDOM / 50 + 200)); " function thread1() diff --git a/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh index bceda77c7f81..f4f38ad9c83c 100755 --- a/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh +++ b/tests/queries/0_stateless/00993_system_parts_race_condition_drop_zookeeper.sh @@ -63,7 +63,6 @@ function thread6() done } - # https://stackoverflow.com/questions/9954794/execute-a-shell-function-with-timeout export -f thread1; export -f thread2; diff --git a/tests/queries/0_stateless/01015_empty_in_inner_right_join.sql.j2 b/tests/queries/0_stateless/01015_empty_in_inner_right_join.sql.j2 index cdb9d253b9b5..310e1b028758 100644 --- a/tests/queries/0_stateless/01015_empty_in_inner_right_join.sql.j2 +++ b/tests/queries/0_stateless/01015_empty_in_inner_right_join.sql.j2 @@ -8,10 +8,10 @@ SELECT 'IN empty set', count() FROM system.numbers WHERE number IN (SELECT toUIn SELECT 'IN non-empty set', count() FROM (SELECT number FROM system.numbers LIMIT 10) t1 WHERE t1.number IN (SELECT toUInt64(1) WHERE 1); SELECT 'NOT IN empty set', count() FROM (SELECT number FROM system.numbers LIMIT 10) WHERE number NOT IN (SELECT toUInt64(1) WHERE 0); -SELECT 'INNER JOIN empty set', count() FROM system.numbers INNER JOIN (SELECT toUInt64(1) AS x WHERE 0) ON system.numbers.number = x; +SELECT 'INNER JOIN empty set', count() FROM numbers(1000) INNER JOIN (SELECT toUInt64(1) AS x WHERE 0) ON number = x; SELECT 'INNER JOIN non-empty set', count() FROM (SELECT number FROM system.numbers LIMIT 10) t1 INNER JOIN (SELECT toUInt64(1) AS x WHERE 1) ON t1.number = x; -SELECT 'RIGHT JOIN empty set', count() FROM system.numbers RIGHT JOIN (SELECT toUInt64(1) AS x WHERE 0) ON system.numbers.number = x; +SELECT 'RIGHT JOIN empty set', count() FROM numbers(1000) RIGHT JOIN (SELECT toUInt64(1) AS x WHERE 0) ON number = x; SELECT 'RIGHT JOIN non-empty set', count() FROM (SELECT number FROM system.numbers LIMIT 10) t1 RIGHT JOIN (SELECT toUInt64(1) AS x WHERE 1) ON t1.number = x; SELECT 'LEFT JOIN empty set', count() FROM (SELECT number FROM system.numbers LIMIT 10) t1 LEFT JOIN (SELECT toUInt64(1) AS x WHERE 0) ON t1.number = x; diff --git a/tests/queries/0_stateless/01158_zookeeper_log_long.reference b/tests/queries/0_stateless/01158_zookeeper_log_long.reference index a0088610c9dc..7ec52cb3366b 100644 --- a/tests/queries/0_stateless/01158_zookeeper_log_long.reference +++ b/tests/queries/0_stateless/01158_zookeeper_log_long.reference @@ -18,22 +18,18 @@ Response 0 Create /test/01158/default/rmt/replicas/1/parts/all_0_0_0 0 0 \N 0 4 Request 0 Exists /test/01158/default/rmt/replicas/1/parts/all_0_0_0 0 0 \N 0 0 \N \N \N 0 0 0 0 Response 0 Exists /test/01158/default/rmt/replicas/1/parts/all_0_0_0 0 0 \N 0 0 ZOK \N \N 0 0 96 0 blocks -Request 0 Multi 0 0 \N 3 0 \N \N \N 0 0 0 0 -Request 0 Create /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 \N 0 1 \N \N \N 0 0 0 0 -Request 0 Remove /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 -1 0 2 \N \N \N 0 0 0 0 -Request 0 Create /test/01158/default/rmt/block_numbers/all/block- 1 1 \N 0 3 \N \N \N 0 0 0 0 -Response 0 Multi 0 0 \N 3 0 ZOK \N \N 0 0 0 0 -Response 0 Create /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 \N 0 1 ZOK \N \N /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 0 0 -Response 0 Remove /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 -1 0 2 ZOK \N \N 0 0 0 0 -Response 0 Create /test/01158/default/rmt/block_numbers/all/block- 1 1 \N 0 3 ZOK \N \N /test/01158/default/rmt/block_numbers/all/block-0000000000 0 0 0 0 -Request 0 Multi 0 0 \N 3 0 \N \N \N 0 0 0 0 -Request 0 Create /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 \N 0 1 \N \N \N 0 0 0 0 -Request 0 Remove /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 -1 0 2 \N \N \N 0 0 0 0 -Request 0 Create /test/01158/default/rmt/block_numbers/all/block- 1 1 \N 0 3 \N \N \N 0 0 0 0 -Response 0 Multi 0 0 \N 3 0 ZNODEEXISTS \N \N 0 0 0 0 -Response 0 Error /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 \N 0 1 ZNODEEXISTS \N \N 0 0 0 0 -Response 0 Error /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 -1 0 2 ZRUNTIMEINCONSISTENCY \N \N 0 0 0 0 -Response 0 Error /test/01158/default/rmt/block_numbers/all/block- 1 1 \N 0 3 ZRUNTIMEINCONSISTENCY \N \N 0 0 0 0 +Request 0 Multi 0 0 \N 2 0 \N \N \N 0 0 0 0 +Request 0 CheckNotExists /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 -1 0 1 \N \N \N 0 0 0 0 +Request 0 Create /test/01158/default/rmt/block_numbers/all/block- 1 1 \N 0 2 \N \N \N 0 0 0 0 +Response 0 Multi 0 0 \N 2 0 ZOK \N \N 0 0 0 0 +Response 0 CheckNotExists /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 -1 0 1 ZOK \N \N 0 0 0 0 +Response 0 Create /test/01158/default/rmt/block_numbers/all/block- 1 1 \N 0 2 ZOK \N \N /test/01158/default/rmt/block_numbers/all/block-0000000000 0 0 0 0 +Request 0 Multi 0 0 \N 2 0 \N \N \N 0 0 0 0 +Request 0 CheckNotExists /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 -1 0 1 \N \N \N 0 0 0 0 +Request 0 Create /test/01158/default/rmt/block_numbers/all/block- 1 1 \N 0 2 \N \N \N 0 0 0 0 +Response 0 Multi 0 0 \N 2 0 ZNODEEXISTS \N \N 0 0 0 0 +Response 0 Error /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 -1 0 1 ZNODEEXISTS \N \N 0 0 0 0 +Response 0 Error /test/01158/default/rmt/block_numbers/all/block- 1 1 \N 0 2 ZRUNTIMEINCONSISTENCY \N \N 0 0 0 0 Request 0 Get /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 \N 0 0 \N \N \N 0 0 0 0 Response 0 Get /test/01158/default/rmt/blocks/all_6308706741995381342_2495791770474910886 0 0 \N 0 0 ZOK \N \N 0 0 9 0 duration_ms diff --git a/tests/queries/0_stateless/01292_create_user.reference b/tests/queries/0_stateless/01292_create_user.reference index f723412c636f..eb89a5ed38c0 100644 --- a/tests/queries/0_stateless/01292_create_user.reference +++ b/tests/queries/0_stateless/01292_create_user.reference @@ -13,6 +13,8 @@ CREATE USER u4_01292 IDENTIFIED WITH sha256_password CREATE USER u5_01292 IDENTIFIED WITH sha256_password CREATE USER u6_01292 IDENTIFIED WITH double_sha1_password CREATE USER u7_01292 IDENTIFIED WITH double_sha1_password +CREATE USER u8_01292 IDENTIFIED WITH bcrypt_password +CREATE USER u9_01292 IDENTIFIED WITH bcrypt_password CREATE USER u1_01292 IDENTIFIED WITH sha256_password CREATE USER u2_01292 IDENTIFIED WITH sha256_password CREATE USER u3_01292 IDENTIFIED WITH sha256_password diff --git a/tests/queries/0_stateless/01292_create_user.sql b/tests/queries/0_stateless/01292_create_user.sql index d0f157d36b09..a283ce687e6e 100644 --- a/tests/queries/0_stateless/01292_create_user.sql +++ b/tests/queries/0_stateless/01292_create_user.sql @@ -1,4 +1,4 @@ --- Tags: no-fasttest +-- Tags: no-fasttest, no-parallel DROP USER IF EXISTS u1_01292, u2_01292, u3_01292, u4_01292, u5_01292, u6_01292, u7_01292, u8_01292, u9_01292; DROP USER IF EXISTS u10_01292, u11_01292, u12_01292, u13_01292, u14_01292, u15_01292, u16_01292; @@ -31,6 +31,8 @@ CREATE USER u4_01292 IDENTIFIED WITH sha256_password BY 'qwe123'; CREATE USER u5_01292 IDENTIFIED WITH sha256_hash BY '18138372FAD4B94533CD4881F03DC6C69296DD897234E0CEE83F727E2E6B1F63'; CREATE USER u6_01292 IDENTIFIED WITH double_sha1_password BY 'qwe123'; CREATE USER u7_01292 IDENTIFIED WITH double_sha1_hash BY '8DCDD69CE7D121DE8013062AEAEB2A148910D50E'; +CREATE USER u8_01292 IDENTIFIED WITH bcrypt_password BY 'qwe123'; +CREATE USER u9_01292 IDENTIFIED WITH bcrypt_hash BY '$2a$12$rz5iy2LhuwBezsM88ZzWiemOVUeJ94xHTzwAlLMDhTzwUxOHaY64q'; SHOW CREATE USER u1_01292; SHOW CREATE USER u2_01292; SHOW CREATE USER u3_01292; @@ -38,6 +40,8 @@ SHOW CREATE USER u4_01292; SHOW CREATE USER u5_01292; SHOW CREATE USER u6_01292; SHOW CREATE USER u7_01292; +SHOW CREATE USER u8_01292; +SHOW CREATE USER u9_01292; ALTER USER u1_01292 IDENTIFIED BY '123qwe'; ALTER USER u2_01292 IDENTIFIED BY '123qwe'; ALTER USER u3_01292 IDENTIFIED BY '123qwe'; @@ -48,7 +52,7 @@ SHOW CREATE USER u2_01292; SHOW CREATE USER u3_01292; SHOW CREATE USER u4_01292; SHOW CREATE USER u5_01292; -DROP USER u1_01292, u2_01292, u3_01292, u4_01292, u5_01292, u6_01292, u7_01292; +DROP USER u1_01292, u2_01292, u3_01292, u4_01292, u5_01292, u6_01292, u7_01292, u8_01292, u9_01292; SELECT '-- host'; CREATE USER u1_01292 HOST ANY; diff --git a/tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper_long.reference b/tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper_long.reference index 60c6076aef0c..a905ea97ae51 100644 --- a/tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper_long.reference +++ b/tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper_long.reference @@ -1,24 +1,24 @@ hello test hello test -1_0_0_0 hello 1 -1_0_0_0 hello 1 +0 0 hello 1 +0 0 hello 1 hello test goodbye test hello test goodbye test -3_0_0_1 goodbye 3 -1_0_0_1 hello 1 -3_0_0_1 goodbye 3 -1_0_0_1 hello 1 +0 0 goodbye 3 +0 0 hello 1 +0 0 goodbye 3 +0 0 hello 1 1 test 3 test 111 abc 1 test 3 test 111 abc -1_0_0_2 1 1 -111_0_0_1 111 111 -3_0_0_2 3 3 -1_0_0_2 1 1 -111_0_0_1 111 111 -3_0_0_2 3 3 +0 0 1 1 +0 0 111 111 +0 0 3 3 +0 0 1 1 +0 0 111 111 +0 0 3 3 diff --git a/tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper_long.sql b/tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper_long.sql index f20156fd9e3d..d40bcc15e556 100644 --- a/tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper_long.sql +++ b/tests/queries/0_stateless/01346_alter_enum_partition_key_replicated_zookeeper_long.sql @@ -13,16 +13,17 @@ INSERT INTO test VALUES ('hello', 'test'); SELECT * FROM test; SYSTEM SYNC REPLICA test2; SELECT * FROM test2; -SELECT name, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test' AND active ORDER BY partition; -SELECT name, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test2' AND active ORDER BY partition; +SELECT min_block_number, max_block_number, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test' AND active ORDER BY partition; +SELECT min_block_number, max_block_number, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test2' AND active ORDER BY partition; ALTER TABLE test MODIFY COLUMN x Enum('hello' = 1, 'world' = 2, 'goodbye' = 3); INSERT INTO test VALUES ('goodbye', 'test'); OPTIMIZE TABLE test FINAL; SELECT * FROM test ORDER BY x; +SYSTEM SYNC REPLICA test2; SELECT * FROM test2 ORDER BY x; -SELECT name, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test' AND active ORDER BY partition; -SELECT name, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test2' AND active ORDER BY partition; +SELECT min_block_number, max_block_number, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test' AND active ORDER BY partition; +SELECT min_block_number, max_block_number, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test2' AND active ORDER BY partition; ALTER TABLE test MODIFY COLUMN x Enum('hello' = 1, 'world' = 2); -- { serverError 524 } ALTER TABLE test MODIFY COLUMN x Enum('hello' = 1, 'world' = 2, 'test' = 3); @@ -33,9 +34,10 @@ ALTER TABLE test MODIFY COLUMN x Int8; INSERT INTO test VALUES (111, 'abc'); OPTIMIZE TABLE test FINAL; SELECT * FROM test ORDER BY x; +SYSTEM SYNC REPLICA test2; SELECT * FROM test2 ORDER BY x; -SELECT name, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test' AND active ORDER BY partition; -SELECT name, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test2' AND active ORDER BY partition; +SELECT min_block_number, max_block_number, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test' AND active ORDER BY partition; +SELECT min_block_number, max_block_number, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test2' AND active ORDER BY partition; ALTER TABLE test MODIFY COLUMN x Enum8('' = 1); -- { serverError 524 } ALTER TABLE test MODIFY COLUMN x Enum16('' = 1); -- { serverError 524 } diff --git a/tests/queries/0_stateless/01495_subqueries_in_with_statement.sql b/tests/queries/0_stateless/01495_subqueries_in_with_statement.sql index 819346be1291..a71053934b2e 100644 --- a/tests/queries/0_stateless/01495_subqueries_in_with_statement.sql +++ b/tests/queries/0_stateless/01495_subqueries_in_with_statement.sql @@ -7,7 +7,7 @@ INSERT INTO test1 VALUES (1, 2), (3, 4); WITH test1 AS (SELECT * FROM numbers(5)) SELECT * FROM test1; WITH test1 AS (SELECT i + 1, j + 1 FROM test1) SELECT * FROM test1; WITH test1 AS (SELECT i + 1, j + 1 FROM test1) SELECT * FROM (SELECT * FROM test1); -SELECT * FROM (WITH test1 AS (SELECT toInt32(*) i FROM numbers(5)) SELECT * FROM test1) l ANY INNER JOIN test1 r on (l.i == r.i); +SELECT * FROM (WITH test1 AS (SELECT toInt32(*) i FROM numbers(5)) SELECT * FROM test1) l ANY INNER JOIN test1 r on (l.i == r.i) order by l.i; WITH test1 AS (SELECT i + 1, j + 1 FROM test1) SELECT toInt64(4) i, toInt64(5) j FROM numbers(3) WHERE (i, j) IN test1; DROP TABLE IF EXISTS test1; @@ -20,7 +20,7 @@ WITH test1 AS (SELECT number-1 as n FROM numbers(42)) SELECT max(n+1)+1 z FROM test1; WITH test1 AS (SELECT number-1 as n FROM numbers(42)) -SELECT max(n+1)+1 z FROM test1 join test1 x using n having z - 1 = (select min(n-1)+41 from test1) + 2; +SELECT max(n+1)+1 z FROM test1 join test1 x using n having z - 1 = (select min(n-1)+41 from test1) + 2 order by z; WITH test1 AS (SELECT number-1 as n FROM numbers(4442) order by n limit 100) SELECT max(n) FROM test1 where n=422; @@ -32,10 +32,10 @@ drop table if exists with_test ; create table with_test engine=Memory as select cast(number-1 as Nullable(Int64)) n from numbers(10000); WITH test1 AS (SELECT n FROM with_test where n <= 40) -SELECT max(n+1)+1 z FROM test1 join test1 x using (n) having max(n+1)+1 - 1 = (select min(n-1)+41 from test1) + 2; +SELECT max(n+1)+1 z FROM test1 join test1 x using (n) having max(n+1)+1 - 1 = (select min(n-1)+41 from test1) + 2 order by z; WITH test1 AS (SELECT n FROM with_test where n <= 40) -SELECT max(n+1)+1 z FROM test1 join test1 x using (n) having z - 1 = (select min(n-1)+41 from test1) + 2; +SELECT max(n+1)+1 z FROM test1 join test1 x using (n) having z - 1 = (select min(n-1)+41 from test1) + 2 order by z; WITH test1 AS (SELECT n FROM with_test order by n limit 100) SELECT max(n) FROM test1 where n=422; diff --git a/tests/queries/0_stateless/01600_parts_states_metrics_long.sh b/tests/queries/0_stateless/01600_parts_states_metrics_long.sh index f47d0863e698..89ce84f6dbca 100755 --- a/tests/queries/0_stateless/01600_parts_states_metrics_long.sh +++ b/tests/queries/0_stateless/01600_parts_states_metrics_long.sh @@ -1,5 +1,4 @@ #!/usr/bin/env bash -# Tags: long CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -8,7 +7,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # NOTE: database = $CLICKHOUSE_DATABASE is unwanted verify_sql="SELECT (SELECT sumIf(value, metric = 'PartsActive'), sumIf(value, metric = 'PartsOutdated') FROM system.metrics) - = (SELECT sum(active), sum(NOT active) FROM system.parts)" + = (SELECT sum(active), sum(NOT active) FROM + (SELECT active FROM system.parts UNION ALL SELECT active FROM system.projection_parts))" # The query is not atomic - it can compare states between system.parts and system.metrics from different points in time. # So, there is inherent race condition. But it should get expected result eventually. diff --git a/tests/queries/0_stateless/01600_parts_types_metrics_long.sh b/tests/queries/0_stateless/01600_parts_types_metrics_long.sh index 05edf02f7edc..0b9afcf633ef 100755 --- a/tests/queries/0_stateless/01600_parts_types_metrics_long.sh +++ b/tests/queries/0_stateless/01600_parts_types_metrics_long.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long, no-s3-storage +# Tags: no-s3-storage CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh @@ -11,7 +11,8 @@ set -o pipefail # NOTE: database = $CLICKHOUSE_DATABASE is unwanted verify_sql="SELECT (SELECT sumIf(value, metric = 'PartsInMemory'), sumIf(value, metric = 'PartsCompact'), sumIf(value, metric = 'PartsWide') FROM system.metrics) = - (SELECT countIf(part_type == 'InMemory'), countIf(part_type == 'Compact'), countIf(part_type == 'Wide') FROM system.parts)" + (SELECT countIf(part_type == 'InMemory'), countIf(part_type == 'Compact'), countIf(part_type == 'Wide') + FROM (SELECT part_type FROM system.parts UNION ALL SELECT part_type FROM system.projection_parts))" # The query is not atomic - it can compare states between system.parts and system.metrics from different points in time. # So, there is inherent race condition (especially in fasttest that runs tests in parallel). diff --git a/tests/queries/0_stateless/01881_join_on_conditions_hash.sql.j2 b/tests/queries/0_stateless/01881_join_on_conditions_hash.sql.j2 index fafefd72cb85..41c3237a0e3c 100644 --- a/tests/queries/0_stateless/01881_join_on_conditions_hash.sql.j2 +++ b/tests/queries/0_stateless/01881_join_on_conditions_hash.sql.j2 @@ -24,13 +24,13 @@ SET join_algorithm = 'hash'; SELECT '-- hash_join --'; SELECT '--'; -SELECT t1.key, t1.key2 FROM t1 INNER ALL JOIN t2 ON t1.id == t2.id AND t2.key == t2.key2; +SELECT t1.key, t1.key2 FROM t1 INNER ALL JOIN t2 ON t1.id == t2.id AND t2.key == t2.key2 ORDER BY t1.key, t1.key2; SELECT '--'; -SELECT t1.key, t1.key2 FROM t1 INNER ALL JOIN t2 ON t1.id == t2.id AND t2.key == t2.key2 AND t1.key == t1.key2; +SELECT t1.key, t1.key2 FROM t1 INNER ALL JOIN t2 ON t1.id == t2.id AND t2.key == t2.key2 AND t1.key == t1.key2 ORDER BY t1.key, t1.key2; SELECT '--'; -SELECT t1.key FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND t2.key == t2.key2 AND t1.key == t1.key2; -SELECT t1.key FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND t2.key == t2.key2 AND t1.key == t1.key2 AND 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } +SELECT t1.key FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND t2.key == t2.key2 AND t1.key == t1.key2 ORDER BY t1.key; +SELECT t1.key FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND t2.key == t2.key2 AND t1.key == t1.key2 AND 0 ORDER BY t1.key; -- { serverError INVALID_JOIN_ON_EXPRESSION } SELECT '--'; SELECT '333' = t1.key FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND t2.key == t2.key2 AND t1.key == t1.key2 AND t2.id > 2; @@ -46,25 +46,25 @@ SELECT '333' = t1.key FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND t2.key == -- DISTINCT is used to remove the difference between 'hash' and 'merge' join: 'merge' doesn't support `any_join_distinct_right_table_keys` SELECT '--'; -SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2_nullable as t2 ON t1.id == t2.id AND t2.key2 != ''; -SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toNullable(t2.key2 != ''); -SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toLowCardinality(t2.key2 != ''); -SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toLowCardinality(toNullable(t2.key2 != '')); -SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toNullable(toLowCardinality(t2.key2 != '')); -SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toNullable(t1.key2 != ''); -SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toLowCardinality(t1.key2 != ''); -SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toLowCardinality(toNullable(t1.key2 != '')); -SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toNullable(toLowCardinality(t1.key2 != '')); +SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2_nullable as t2 ON t1.id == t2.id AND t2.key2 != '' ORDER BY t1.id; +SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toNullable(t2.key2 != '') ORDER BY t1.id; +SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toLowCardinality(t2.key2 != '') ORDER BY t1.id;; +SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toLowCardinality(toNullable(t2.key2 != '')) ORDER BY t1.id;; +SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toNullable(toLowCardinality(t2.key2 != '')) ORDER BY t1.id;; +SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toNullable(t1.key2 != '') ORDER BY t1.id;; +SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toLowCardinality(t1.key2 != '') ORDER BY t1.id;; +SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toLowCardinality(toNullable(t1.key2 != '')) ORDER BY t1.id;; +SELECT DISTINCT t1.id FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toNullable(toLowCardinality(t1.key2 != '')) ORDER BY t1.id;; SELECT '--'; -SELECT DISTINCT t1.key, toUInt8(t1.id) as e FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND e; +SELECT DISTINCT t1.key, toUInt8(t1.id) as e FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND e ORDER BY t1.key; -- `e + 1` is UInt16 -SELECT DISTINCT t1.key, toUInt8(t1.id) as e FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND e + 1; -- { serverError 403 } -SELECT DISTINCT t1.key, toUInt8(t1.id) as e FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toUInt8(e + 1); +SELECT DISTINCT t1.key, toUInt8(t1.id) as e FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND e + 1 ORDER BY t1.key; -- { serverError 403 } +SELECT DISTINCT t1.key, toUInt8(t1.id) as e FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND toUInt8(e + 1) ORDER BY t1.key; SELECT '--'; SELECT t1.id, t1.key, t1.key2, t2.id, t2.key, t2.key2 FROM t1 FULL JOIN t2 ON t1.id == t2.id AND t2.key == t2.key2 AND t1.key == t1.key2 ORDER BY t1.id NULLS FIRST, t2.id NULLS FIRST; -SELECT t1.id, t1.key, t1.key2, t22.id, t22.idd, t22.key, t22.key2 FROM t1 FULL JOIN t22 ON t1.id == t22.id AND t22.key == t22.key2 AND t1.key == t1.key2 OR t1.id = t22.idd AND t1.key = t1.key2 ORDER BY t1.id NULLS FIRST, t22.id NULLS FIRST; +SELECT t1.id, t1.key, t1.key2, t22.id, t22.idd, t22.key, t22.key2 FROM t1 FULL JOIN t22 ON t1.id == t22.id AND t22.key == t22.key2 AND t1.key == t1.key2 OR t1.id = t22.idd AND t1.key = t1.key2 ORDER BY t1.id NULLS FIRST, t1.key, t1.key2, t22.id ; SELECT * FROM t1 INNER ALL JOIN t2 ON t1.id == t2.id AND t1.id; -- { serverError 403 } SELECT * FROM t1 INNER ALL JOIN t2 ON t1.id == t2.id AND t2.id; -- { serverError 403 } @@ -82,7 +82,7 @@ SELECT * FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND t2.key == t2.key2 AND t SELECT '--'; -- length(t1.key2) == length(t2.key2) is expression for columns from both tables, it works because it part of joining key -SELECT t1.*, t2.* FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND length(t1.key2) == length(t2.key2) AND t1.key != '333'; +SELECT t1.*, t2.* FROM t1 INNER ANY JOIN t2 ON t1.id == t2.id AND length(t1.key2) == length(t2.key2) AND t1.key != '333' ORDER BY t1.id, t1.key, t1.key2, t2.id, t2.key, t2.key2; SELECT 't22', * FROM t1 JOIN t22 ON t1.id == t22.idd and (t22.key == t22.key2 OR t1.id == t22.id); -- { serverError 403 } SELECT 't22', * FROM t1 JOIN t22 ON t1.id == t22.idd and (t1.id == t22.id OR t22.key == t22.key2); -- { serverError 403 } diff --git a/tests/queries/0_stateless/02117_show_create_table_system.reference b/tests/queries/0_stateless/02117_show_create_table_system.reference index 5eba9cca350e..a352f0053694 100644 --- a/tests/queries/0_stateless/02117_show_create_table_system.reference +++ b/tests/queries/0_stateless/02117_show_create_table_system.reference @@ -47,7 +47,10 @@ CREATE TABLE system.clusters `default_database` String, `errors_count` UInt32, `slowdowns_count` UInt32, - `estimated_recovery_time` UInt32 + `estimated_recovery_time` UInt32, + `database_shard_name` String, + `database_replica_name` String, + `is_active` Nullable(UInt8) ) ENGINE = SystemClusters COMMENT 'SYSTEM TABLE is built on the fly.' @@ -281,7 +284,12 @@ CREATE TABLE system.functions `alias_to` String, `create_query` String, `origin` Enum8('System' = 0, 'SQLUserDefined' = 1, 'ExecutableUserDefined' = 2), - `description` String + `description` String, + `syntax` String, + `arguments` String, + `returned_value` String, + `examples` String, + `categories` String ) ENGINE = SystemFunctions COMMENT 'SYSTEM TABLE is built on the fly.' @@ -356,6 +364,7 @@ CREATE TABLE system.merges `partition_id` String, `is_mutation` UInt8, `total_size_bytes_compressed` UInt64, + `total_size_bytes_uncompressed` UInt64, `total_size_marks` UInt64, `bytes_read_uncompressed` UInt64, `rows_read` UInt64, @@ -1118,7 +1127,7 @@ CREATE TABLE system.users `name` String, `id` UUID, `storage` String, - `auth_type` Enum8('no_password' = 0, 'plaintext_password' = 1, 'sha256_password' = 2, 'double_sha1_password' = 3, 'ldap' = 4, 'kerberos' = 5, 'ssl_certificate' = 6), + `auth_type` Enum8('no_password' = 0, 'plaintext_password' = 1, 'sha256_password' = 2, 'double_sha1_password' = 3, 'ldap' = 4, 'kerberos' = 5, 'ssl_certificate' = 6, 'bcrypt_password' = 7), `auth_params` String, `host_ip` Array(String), `host_names` Array(String), diff --git a/tests/queries/0_stateless/02125_transform_decimal_bug.reference b/tests/queries/0_stateless/02125_transform_decimal_bug.reference index 7f59d0ee7bf6..d1bf333ec8e1 100644 --- a/tests/queries/0_stateless/02125_transform_decimal_bug.reference +++ b/tests/queries/0_stateless/02125_transform_decimal_bug.reference @@ -1,3 +1,4 @@ +1 0 1 2 diff --git a/tests/queries/0_stateless/02125_transform_decimal_bug.sql b/tests/queries/0_stateless/02125_transform_decimal_bug.sql index 4ef471ea875e..002f60076e9a 100644 --- a/tests/queries/0_stateless/02125_transform_decimal_bug.sql +++ b/tests/queries/0_stateless/02125_transform_decimal_bug.sql @@ -1,4 +1,4 @@ -SELECT transform(1, [1], [toDecimal32(1, 2)]); -- { serverError 44 } +SELECT transform(1, [1], [toDecimal32(1, 2)]); SELECT transform(toDecimal32(number, 2), [toDecimal32(3, 2)], [toDecimal32(30, 2)]) FROM system.numbers LIMIT 10; SELECT transform(toDecimal32(number, 2), [toDecimal32(3, 2)], [toDecimal32(30, 2)], toDecimal32(1000, 2)) FROM system.numbers LIMIT 10; SELECT transform(number, [3, 5, 11], [toDecimal32(30, 2), toDecimal32(50, 2), toDecimal32(70,2)], toDecimal32(1000, 2)) FROM system.numbers LIMIT 10; diff --git a/tests/queries/0_stateless/02163_operators.sql b/tests/queries/0_stateless/02163_operators.sql index b2414bb197ef..3f2d7d8bbb7a 100644 --- a/tests/queries/0_stateless/02163_operators.sql +++ b/tests/queries/0_stateless/02163_operators.sql @@ -1,2 +1,2 @@ -WITH 2 AS `b.c`, [4, 5] AS a, 6 AS u, 3 AS v, 2 AS d, TRUE AS e, 1 AS f, 0 AS g, 2 AS h, 'Hello' AS i, 'World' AS j, TIMESTAMP '2022-02-02 02:02:02' AS w, [] AS k, (1, 2) AS l, 2 AS m, 3 AS n, [] AS o, [1] AS p, 1 AS q, q AS r, 1 AS s, 1 AS t +WITH 2 AS `b.c`, [4, 5] AS a, 6 AS u, 3 AS v, 2 AS d, TRUE AS e, 1 AS f, 0 AS g, 2 AS h, 'Hello' AS i, 'World' AS j, 'hi' AS w, NULL AS k, (1, 2) AS l, 2 AS m, 3 AS n, [] AS o, [1] AS p, 1 AS q, q AS r, 1 AS s, 1 AS t SELECT INTERVAL CASE CASE WHEN NOT -a[`b.c`] * u DIV v + d IS NOT NULL AND e OR f BETWEEN g AND h THEN i ELSE j END WHEN w THEN k END || [l, (m, n)] MINUTE IS NULL OR NOT o::Array(INT) = p <> q < r > s != t AS upyachka; diff --git a/tests/queries/0_stateless/02169_map_functions.reference b/tests/queries/0_stateless/02169_map_functions.reference index bec2eaec5958..10746a70f06c 100644 --- a/tests/queries/0_stateless/02169_map_functions.reference +++ b/tests/queries/0_stateless/02169_map_functions.reference @@ -40,6 +40,8 @@ {'key1':1111,'key2':2222,'key5':500,'key6':600} {'key1':1112,'key2':2224,'key5':500,'key6':600} {'key1':1113,'key2':2226,'key5':500,'key6':600} +{'key5':500,'key6':600} +{'key5':500,'key6':600} 1 1 1 diff --git a/tests/queries/0_stateless/02169_map_functions.sql b/tests/queries/0_stateless/02169_map_functions.sql index 27ceb2520220..febaf2bd9d08 100644 --- a/tests/queries/0_stateless/02169_map_functions.sql +++ b/tests/queries/0_stateless/02169_map_functions.sql @@ -11,6 +11,8 @@ SELECT mapApply((k, v) -> tuple(v + 9223372036854775806), col) FROM table_map; - SELECT mapConcat(col, map('key5', 500), map('key6', 600)) FROM table_map ORDER BY id; SELECT mapConcat(col, materialize(map('key5', 500)), map('key6', 600)) FROM table_map ORDER BY id; +SELECT concat(map('key5', 500), map('key6', 600)); +SELECT map('key5', 500) || map('key6', 600); SELECT mapExists((k, v) -> k LIKE '%3', col) FROM table_map ORDER BY id; SELECT mapExists((k, v) -> k LIKE '%2' AND v < 1000, col) FROM table_map ORDER BY id; diff --git a/tests/queries/0_stateless/02236_explain_pipeline_join.reference b/tests/queries/0_stateless/02236_explain_pipeline_join.reference index 5d7a7bfc488a..18e5db7aa4ff 100644 --- a/tests/queries/0_stateless/02236_explain_pipeline_join.reference +++ b/tests/queries/0_stateless/02236_explain_pipeline_join.reference @@ -1,17 +1,26 @@ (Expression) ExpressionTransform (Join) - JoiningTransform 2 → 1 - (Expression) - ExpressionTransform - (Limit) - Limit - (ReadFromStorage) - Numbers 0 → 1 - (Expression) - FillingRightJoinSide - ExpressionTransform - (Limit) - Limit - (ReadFromStorage) - Numbers 0 → 1 + Resize 16 → 1 + JoiningTransform × 16 2 → 1 + InnerShuffleGatherTransform × 16 8 → 1 + InnerShuffleDispatchTransform × 8 2 → 16 + InnerShuffleScatterTransform × 16 + Resize 1 → 16 + (Expression) + ExpressionTransform + (Limit) + Limit + (ReadFromStorage) + Numbers 0 → 1 + (Expression) + FillingRightJoinSide × 16 + InnerShuffleGatherTransform × 16 8 → 1 + InnerShuffleDispatchTransform × 8 2 → 16 + InnerShuffleScatterTransform × 16 + Resize 1 → 16 + ExpressionTransform + (Limit) + Limit + (ReadFromStorage) + Numbers 0 → 1 diff --git a/tests/queries/0_stateless/02240_system_filesystem_cache_table.reference b/tests/queries/0_stateless/02240_system_filesystem_cache_table.reference index d3be4855b361..cf2bf5fb521d 100644 --- a/tests/queries/0_stateless/02240_system_filesystem_cache_table.reference +++ b/tests/queries/0_stateless/02240_system_filesystem_cache_table.reference @@ -1,10 +1,60 @@ Using storage policy: s3_cache -0 79 80 -0 745 746 -0 745 746 -0 745 746 +0 +Expect cache +DOWNLOADED 0 79 80 +DOWNLOADED 0 745 746 +2 +Expect cache +DOWNLOADED 0 79 80 +DOWNLOADED 0 745 746 +2 +Expect no cache +Expect cache +DOWNLOADED 0 79 80 +DOWNLOADED 0 745 746 +2 +Expect no cache +Expect cache +DOWNLOADED 0 79 80 +DOWNLOADED 0 745 746 +2 +Expect cache +DOWNLOADED 0 79 80 +DOWNLOADED 0 745 746 +2 +Expect no cache +Expect cache +DOWNLOADED 0 79 80 +DOWNLOADED 0 745 746 +2 +Expect no cache Using storage policy: local_cache -0 79 80 -0 745 746 -0 745 746 -0 745 746 +0 +Expect cache +DOWNLOADED 0 79 80 +DOWNLOADED 0 745 746 +2 +Expect cache +DOWNLOADED 0 79 80 +DOWNLOADED 0 745 746 +2 +Expect no cache +Expect cache +DOWNLOADED 0 79 80 +DOWNLOADED 0 745 746 +2 +Expect no cache +Expect cache +DOWNLOADED 0 79 80 +DOWNLOADED 0 745 746 +2 +Expect cache +DOWNLOADED 0 79 80 +DOWNLOADED 0 745 746 +2 +Expect no cache +Expect cache +DOWNLOADED 0 79 80 +DOWNLOADED 0 745 746 +2 +Expect no cache diff --git a/tests/queries/0_stateless/02240_system_filesystem_cache_table.sh b/tests/queries/0_stateless/02240_system_filesystem_cache_table.sh index a487f3ca739c..c7dc9fbd9617 100755 --- a/tests/queries/0_stateless/02240_system_filesystem_cache_table.sh +++ b/tests/queries/0_stateless/02240_system_filesystem_cache_table.sh @@ -9,34 +9,69 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) for STORAGE_POLICY in 's3_cache' 'local_cache'; do echo "Using storage policy: $STORAGE_POLICY" + ${CLICKHOUSE_CLIENT} --query "SYSTEM STOP MERGES" ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP FILESYSTEM CACHE" + ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP MARK CACHE" + ${CLICKHOUSE_CLIENT} --query "SELECT count() FROM system.filesystem_cache" ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test_02240_storage_policy" - ${CLICKHOUSE_CLIENT} --query "CREATE TABLE test_02240_storage_policy (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='${STORAGE_POLICY}', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false" + ${CLICKHOUSE_CLIENT} --query "CREATE TABLE test_02240_storage_policy (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='${STORAGE_POLICY}', min_bytes_for_wide_part = 1000000, compress_marks=false, compress_primary_key=false" ${CLICKHOUSE_CLIENT} --query "SYSTEM STOP MERGES test_02240_storage_policy" ${CLICKHOUSE_CLIENT} --enable_filesystem_cache_on_write_operations=0 --query "INSERT INTO test_02240_storage_policy SELECT number, toString(number) FROM numbers(100)" + + echo 'Expect cache' + ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP MARK CACHE" + ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_02240_storage_policy FORMAT Null" + ${CLICKHOUSE_CLIENT} --query "SELECT state, file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_begin, file_segment_range_end, size" + ${CLICKHOUSE_CLIENT} --query "SELECT uniqExact(key) FROM system.filesystem_cache"; + + echo 'Expect cache' + ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP MARK CACHE" ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_02240_storage_policy FORMAT Null" - ${CLICKHOUSE_CLIENT} --query "SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size" + ${CLICKHOUSE_CLIENT} --query "SELECT state, file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_begin, file_segment_range_end, size" + ${CLICKHOUSE_CLIENT} --query "SELECT uniqExact(key) FROM system.filesystem_cache"; ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP FILESYSTEM CACHE" + echo 'Expect no cache' ${CLICKHOUSE_CLIENT} --query "SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache" + + echo 'Expect cache' + ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP MARK CACHE" ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_02240_storage_policy FORMAT Null" - ${CLICKHOUSE_CLIENT} --query "SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache" + ${CLICKHOUSE_CLIENT} --query "SELECT state, file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_begin, file_segment_range_end, size" + ${CLICKHOUSE_CLIENT} --query "SELECT uniqExact(key) FROM system.filesystem_cache"; + ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP FILESYSTEM CACHE" + echo 'Expect no cache' ${CLICKHOUSE_CLIENT} --query "SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache" ${CLICKHOUSE_CLIENT} --query "DROP TABLE IF EXISTS test_02240_storage_policy_3" - ${CLICKHOUSE_CLIENT} --query "CREATE TABLE test_02240_storage_policy_3 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='${STORAGE_POLICY}_3', min_bytes_for_wide_part = 10485760, compress_marks=false, compress_primary_key=false" + ${CLICKHOUSE_CLIENT} --query "CREATE TABLE test_02240_storage_policy_3 (key UInt32, value String) Engine=MergeTree() ORDER BY key SETTINGS storage_policy='${STORAGE_POLICY}_3', min_bytes_for_wide_part = 1000000, compress_marks=false, compress_primary_key=false" ${CLICKHOUSE_CLIENT} --enable_filesystem_cache_on_write_operations=0 --query "INSERT INTO test_02240_storage_policy_3 SELECT number, toString(number) FROM numbers(100)" + + echo 'Expect cache' + ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP MARK CACHE" ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_02240_storage_policy_3 FORMAT Null" - ${CLICKHOUSE_CLIENT} --query "SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size" + ${CLICKHOUSE_CLIENT} --query "SELECT state, file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_begin, file_segment_range_end, size" + ${CLICKHOUSE_CLIENT} --query "SELECT uniqExact(key) FROM system.filesystem_cache"; + + echo 'Expect cache' + ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP MARK CACHE" ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_02240_storage_policy_3 FORMAT Null" - ${CLICKHOUSE_CLIENT} --query "SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_end, size" + ${CLICKHOUSE_CLIENT} --query "SELECT state, file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_begin, file_segment_range_end, size" + ${CLICKHOUSE_CLIENT} --query "SELECT uniqExact(key) FROM system.filesystem_cache"; + echo 'Expect no cache' ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP FILESYSTEM CACHE" ${CLICKHOUSE_CLIENT} --query "SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache" + + echo 'Expect cache' + ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP MARK CACHE" ${CLICKHOUSE_CLIENT} --query "SELECT * FROM test_02240_storage_policy_3 FORMAT Null" - ${CLICKHOUSE_CLIENT} --query "SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache" + ${CLICKHOUSE_CLIENT} --query "SELECT state, file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache ORDER BY file_segment_range_begin, file_segment_range_end, size" + ${CLICKHOUSE_CLIENT} --query "SELECT uniqExact(key) FROM system.filesystem_cache"; + ${CLICKHOUSE_CLIENT} --query "SYSTEM DROP FILESYSTEM CACHE" + echo 'Expect no cache' ${CLICKHOUSE_CLIENT} --query "SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache" done diff --git a/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.sql b/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.sql index abc2ee414022..deafa8d6ab5b 100644 --- a/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.sql +++ b/tests/queries/0_stateless/02245_join_with_nullable_lowcardinality_crash.sql @@ -12,8 +12,8 @@ CREATE TABLE without_nullable insert into with_nullable values(0,'f'),(0,'usa'); insert into without_nullable values(0,'usa'),(0,'us2a'); -select if(t0.country is null ,t2.country,t0.country) "country" -from without_nullable t0 right outer join with_nullable t2 on t0.country=t2.country; +select if(t0.country is null ,t2.country,t0.country) as c +from without_nullable t0 right outer join with_nullable t2 on t0.country=t2.country order by c desc; drop table with_nullable; drop table without_nullable; diff --git a/tests/queries/0_stateless/02282_array_distance.sql b/tests/queries/0_stateless/02282_array_distance.sql index 9c16071dc1f9..08539d461109 100644 --- a/tests/queries/0_stateless/02282_array_distance.sql +++ b/tests/queries/0_stateless/02282_array_distance.sql @@ -48,7 +48,8 @@ SELECT L2SquaredDistance(v1.v, v2.v), cosineDistance(v1.v, v2.v) FROM vec2 v1, vec2 v2 -WHERE length(v1.v) == length(v2.v); +WHERE length(v1.v) == length(v2.v) +ORDER BY v1.id, v2.id; INSERT INTO vec2f VALUES (1, [100, 200, 0]), (2, [888, 777, 666]), (3, range(1, 35, 1)), (4, range(3, 37, 1)), (5, range(1, 135, 1)), (6, range(3, 137, 1)); SELECT @@ -61,7 +62,8 @@ SELECT L2SquaredDistance(v1.v, v2.v), cosineDistance(v1.v, v2.v) FROM vec2f v1, vec2f v2 -WHERE length(v1.v) == length(v2.v); +WHERE length(v1.v) == length(v2.v) +ORDER BY v1.id, v2.id; INSERT INTO vec2d VALUES (1, [100, 200, 0]), (2, [888, 777, 666]), (3, range(1, 35, 1)), (4, range(3, 37, 1)), (5, range(1, 135, 1)), (6, range(3, 137, 1)); SELECT @@ -74,7 +76,8 @@ SELECT L2SquaredDistance(v1.v, v2.v), cosineDistance(v1.v, v2.v) FROM vec2d v1, vec2d v2 -WHERE length(v1.v) == length(v2.v); +WHERE length(v1.v) == length(v2.v) +ORDER BY v1.id, v2.id; SELECT v1.id, @@ -86,7 +89,8 @@ SELECT L2SquaredDistance(v1.v, v2.v), cosineDistance(v1.v, v2.v) FROM vec2f v1, vec2d v2 -WHERE length(v1.v) == length(v2.v); +WHERE length(v1.v) == length(v2.v) +ORDER BY v1.id, v2.id; SELECT L1Distance([0, 0], [1]); -- { serverError 190 } SELECT L2Distance([1, 2], (3,4)); -- { serverError 43 } diff --git a/tests/queries/0_stateless/02344_describe_cache.reference b/tests/queries/0_stateless/02344_describe_cache.reference index c98e9d263ca7..7561b32bae17 100644 --- a/tests/queries/0_stateless/02344_describe_cache.reference +++ b/tests/queries/0_stateless/02344_describe_cache.reference @@ -1,2 +1,2 @@ -2147483648 1048576 104857600 1 0 0 0 /var/lib/clickhouse/caches/s3_cache/ 0 -2147483648 1048576 104857600 0 0 0 0 /var/lib/clickhouse/caches/s3_cache_2/ 0 +134217728 1048576 104857600 1 0 0 0 /var/lib/clickhouse/caches/s3_cache/ 0 +134217728 1048576 104857600 0 0 0 0 /var/lib/clickhouse/caches/s3_cache_2/ 0 diff --git a/tests/queries/0_stateless/02346_additional_filters.reference b/tests/queries/0_stateless/02346_additional_filters.reference index 0a08995223d6..e3b6f2a38c64 100644 --- a/tests/queries/0_stateless/02346_additional_filters.reference +++ b/tests/queries/0_stateless/02346_additional_filters.reference @@ -101,7 +101,7 @@ select * from (select number from system.numbers limit 5 union all select x from 4 4 5 -select number, x, y from (select number from system.numbers limit 5) f any left join (select x, y from table_1) s on f.number = s.x settings additional_table_filters={'system.numbers' : 'number != 3', 'table_1' : 'x != 2'}; +select number, x, y from (select number from system.numbers limit 5) f any left join (select x, y from table_1) s on f.number = s.x order by number settings additional_table_filters={'system.numbers' : 'number != 3', 'table_1' : 'x != 2'}; 0 0 1 1 a 2 0 diff --git a/tests/queries/0_stateless/02346_additional_filters.sql b/tests/queries/0_stateless/02346_additional_filters.sql index f6b665713ec8..3d6a25f7ffde 100644 --- a/tests/queries/0_stateless/02346_additional_filters.sql +++ b/tests/queries/0_stateless/02346_additional_filters.sql @@ -40,7 +40,7 @@ select * from system.numbers as t limit 5 settings additional_table_filters={'t' select * from system.numbers limit 5 settings additional_table_filters={'system.numbers' : 'number != 3'}; select * from system.numbers limit 5 settings additional_table_filters={'system.numbers':'number != 3','table_1':'x!=2'}; select * from (select number from system.numbers limit 5 union all select x from table_1) order by number settings additional_table_filters={'system.numbers':'number != 3','table_1':'x!=2'}; -select number, x, y from (select number from system.numbers limit 5) f any left join (select x, y from table_1) s on f.number = s.x settings additional_table_filters={'system.numbers' : 'number != 3', 'table_1' : 'x != 2'}; +select number, x, y from (select number from system.numbers limit 5) f any left join (select x, y from table_1) s on f.number = s.x order by number settings additional_table_filters={'system.numbers' : 'number != 3', 'table_1' : 'x != 2'}; select b + 1 as c from (select a + 1 as b from (select x + 1 as a from table_1)) settings additional_table_filters={'table_1' : 'x != 2 and x != 3'}; -- { echoOff } diff --git a/tests/queries/0_stateless/02372_analyzer_join.reference b/tests/queries/0_stateless/02372_analyzer_join.reference index b8a658106ff0..1309d4280715 100644 --- a/tests/queries/0_stateless/02372_analyzer_join.reference +++ b/tests/queries/0_stateless/02372_analyzer_join.reference @@ -5,25 +5,25 @@ JOIN INNER SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value -FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; +FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id order by test_table_join_1.id, test_table_join_1.value; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id order by t1.id, t1.value; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 SELECT '--'; -- SELECT t1.value, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.value; Join_1_Value_0 Join_2_Value_0 Join_1_Value_1 Join_2_Value_1 SELECT id FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError 207 } @@ -31,37 +31,37 @@ SELECT value FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_j SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0'; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; 0 Join_1_Value_0 0 Join_2_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; 0 Join_1_Value_0 0 Join_2_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; 0 Join_1_Value_0 0 Join_2_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; 0 Join_1_Value_0 0 Join_2_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id order by t1.id, t1.value; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 SELECT 'JOIN expression aliases'; JOIN expression aliases -SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id); +SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) order by t1_id, t1.value, t2_id; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 SELECT '--'; -- -SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id; +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id order by t1_id, t1.value, t2_id; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 SELECT 'JOIN LEFT'; @@ -69,28 +69,28 @@ JOIN LEFT SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value -FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; +FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id order by test_table_join_1.id, test_table_join_1.value; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id order by t1.id, t1.value; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 SELECT '--'; -- SELECT t1.value, t2.value -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.value; Join_1_Value_0 Join_2_Value_0 Join_1_Value_1 Join_2_Value_1 Join_1_Value_2 @@ -99,45 +99,45 @@ SELECT value FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_jo SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0'; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; 0 Join_1_Value_0 0 Join_2_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 0 2 Join_1_Value_2 0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 0 2 Join_1_Value_2 0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 0 2 Join_1_Value_2 0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id order by t1.id, t1.value; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 SELECT 'JOIN expression aliases'; JOIN expression aliases -SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id); +SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) order by t1_id, t1.value, t2_id; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 SELECT '--'; -- -SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id; +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id order by t1_id, t1.value, t2_id; 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 @@ -146,182 +146,182 @@ JOIN RIGHT SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value -FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; +FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id order by test_table_join_1.id, test_table_join_1.value; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 -0 3 Join_2_Value_3 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id order by t1.id, t1.value; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 -0 3 Join_2_Value_3 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; +0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 -0 0 3 3 Join_2_Value_3 Join_2_Value_3 SELECT '--'; -- SELECT t1.value, t2.value -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.value; + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_1_Value_1 Join_2_Value_1 - Join_2_Value_3 SELECT id FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError 207 } SELECT value FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError 207 } SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0'; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; 0 Join_1_Value_0 0 Join_2_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; 0 1 Join_2_Value_1 0 3 Join_2_Value_3 +0 Join_1_Value_0 0 Join_2_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; 0 1 Join_2_Value_1 0 3 Join_2_Value_3 +0 Join_1_Value_0 0 Join_2_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; 0 1 Join_2_Value_1 0 3 Join_2_Value_3 +0 Join_1_Value_0 0 Join_2_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id; +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id order by t1.id, t1.value; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 -0 3 Join_2_Value_3 SELECT 'JOIN expression aliases'; JOIN expression aliases -SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id); +SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) order by t1_id, t1.value, t2_id; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 -0 3 Join_2_Value_3 SELECT '--'; -- -SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id; +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id order by t1_id, t1.value, t2_id; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 -0 3 Join_2_Value_3 SELECT 'JOIN FULL'; JOIN FULL SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value -FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; +FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id order by test_table_join_1.id, test_table_join_1.value; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 -0 3 Join_2_Value_3 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id order by t1.id, t1.value; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 -0 3 Join_2_Value_3 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; +0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 -0 0 3 3 Join_2_Value_3 Join_2_Value_3 SELECT '--'; -- SELECT t1.value, t2.value -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.value; + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_1_Value_1 Join_2_Value_1 Join_1_Value_2 - Join_2_Value_3 SELECT id FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError 207 } SELECT value FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError 207 } SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0'; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; 0 Join_1_Value_0 0 Join_2_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; +0 1 Join_2_Value_1 +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 0 2 Join_1_Value_2 0 -0 1 Join_2_Value_1 -0 3 Join_2_Value_3 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; +0 1 Join_2_Value_1 +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 0 2 Join_1_Value_2 0 -0 1 Join_2_Value_1 -0 3 Join_2_Value_3 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; +0 1 Join_2_Value_1 +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 0 2 Join_1_Value_2 0 -0 1 Join_2_Value_1 -0 3 Join_2_Value_3 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id order by t1.id, t1.value; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 -0 3 Join_2_Value_3 SELECT 'JOIN expression aliases'; JOIN expression aliases -SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id); +SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) order by t1_id, t1.value, t2_id; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 -0 3 Join_2_Value_3 SELECT '--'; -- -SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id; +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id order by t1_id, t1.value, t2_id; +0 3 Join_2_Value_3 0 Join_1_Value_0 0 Join_2_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 2 Join_1_Value_2 0 -0 3 Join_2_Value_3 SELECT 'First JOIN INNER second JOIN INNER'; First JOIN INNER second JOIN INNER SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; @@ -329,48 +329,48 @@ SELECT '--'; SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id -INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT 'First JOIN INNER second JOIN LEFT'; @@ -379,14 +379,14 @@ SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; @@ -394,48 +394,48 @@ SELECT '--'; SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id -LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT 'First JOIN INNER second JOIN RIGHT'; @@ -444,159 +444,159 @@ SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 -0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_3_Value_4 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 0 1 Join_3_Value_1 0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_3_Value_4 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id -RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT 'First JOIN INNER second JOIN FULL'; First JOIN INNER second JOIN FULL SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 -0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_3_Value_4 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 0 1 Join_3_Value_1 0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_3_Value_4 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1_id = t2_id -FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT 'First JOIN LEFT second JOIN INNER'; First JOIN LEFT second JOIN INNER SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -604,7 +604,7 @@ SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -613,7 +613,7 @@ SELECT '--'; SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 @@ -621,7 +621,7 @@ SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 @@ -629,20 +629,20 @@ SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 @@ -650,7 +650,7 @@ SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -658,7 +658,7 @@ SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id -INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -668,7 +668,7 @@ SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -676,7 +676,7 @@ SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -685,7 +685,7 @@ SELECT '--'; SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 @@ -693,7 +693,7 @@ SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 @@ -701,7 +701,7 @@ SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 0 0 2 Join_1_Value_2 0 0 @@ -709,7 +709,7 @@ SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 0 0 2 Join_1_Value_2 0 0 @@ -718,7 +718,7 @@ JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 @@ -726,7 +726,7 @@ SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -734,7 +734,7 @@ SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id -LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -744,184 +744,184 @@ SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 -0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_3_Value_4 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 0 1 Join_3_Value_1 0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_3_Value_4 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id -RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT 'First JOIN LEFT second JOIN FULL'; First JOIN LEFT second JOIN FULL SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 LEFT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 -0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_3_Value_4 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; +0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 0 0 2 Join_1_Value_2 0 0 -0 0 4 Join_3_Value_4 -0 0 1 Join_3_Value_1 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; +0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 0 0 2 Join_1_Value_2 0 0 -0 0 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_3_Value_4 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1_id = t2_id -FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT 'First JOIN RIGHT second JOIN INNER'; First JOIN RIGHT second JOIN INNER SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; @@ -929,48 +929,48 @@ SELECT '--'; SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id -INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 SELECT 'First JOIN RIGHT second JOIN LEFT'; @@ -979,246 +979,246 @@ SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 3 Join_2_Value_3 0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 3 Join_2_Value_3 0 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; +0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 -0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_2_Value_3 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 1 Join_2_Value_1 0 0 3 Join_2_Value_3 0 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_2_Value_3 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 3 Join_2_Value_3 0 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id -LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 3 Join_2_Value_3 0 SELECT 'First JOIN RIGHT second JOIN RIGHT'; First JOIN RIGHT second JOIN RIGHT SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 -0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_3_Value_4 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 0 1 Join_3_Value_1 0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_3_Value_4 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id -RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT 'First JOIN RIGHT second JOIN FULL'; First JOIN RIGHT second JOIN FULL SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 RIGHT JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; +0 0 4 Join_3_Value_4 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 3 Join_2_Value_3 0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; +0 0 4 Join_3_Value_4 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 3 Join_2_Value_3 0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 +0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 -0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 -0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; + Join_3_Value_4 + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_2_Value_3 - Join_3_Value_4 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 -0 1 Join_2_Value_1 0 -0 0 4 Join_3_Value_4 +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 -0 1 Join_2_Value_1 0 -0 3 Join_2_Value_3 0 +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 0 1 Join_3_Value_1 0 0 4 Join_3_Value_4 +0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; + Join_3_Value_4 + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 - Join_2_Value_3 - Join_3_Value_4 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 0 4 Join_3_Value_4 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 3 Join_2_Value_3 0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1_id = t2_id -FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 0 4 Join_3_Value_4 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 -0 3 Join_2_Value_3 0 -0 0 4 Join_3_Value_4 SELECT 'First JOIN FULL second JOIN INNER'; First JOIN FULL second JOIN INNER SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -1226,7 +1226,7 @@ SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -1235,7 +1235,7 @@ SELECT '--'; SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 @@ -1243,7 +1243,7 @@ SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +INNER JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 @@ -1251,20 +1251,20 @@ SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 @@ -1272,7 +1272,7 @@ SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +INNER JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -1280,7 +1280,7 @@ SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id -INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +INNER JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 @@ -1290,265 +1290,265 @@ SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; +0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 -0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +LEFT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_2_Value_3 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; +0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 0 0 2 Join_1_Value_2 0 0 -0 3 Join_2_Value_3 0 -0 1 Join_2_Value_1 0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; +0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 0 0 2 Join_1_Value_2 0 0 -0 1 Join_2_Value_1 0 -0 3 Join_2_Value_3 0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +LEFT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_2_Value_3 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +LEFT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id -LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +LEFT JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 SELECT 'First JOIN FULL second JOIN RIGHT'; First JOIN FULL second JOIN RIGHT SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 -0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +RIGHT JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_3_Value_4 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; -0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; 0 0 1 Join_3_Value_1 0 0 4 Join_3_Value_4 +0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +RIGHT JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; + Join_3_Value_4 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_3_Value_4 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id -RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +RIGHT JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 0 4 Join_3_Value_4 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 0 4 Join_3_Value_4 SELECT 'First JOIN FULL second JOIN FULL'; First JOIN FULL second JOIN FULL SELECT 'JOIN ON without conditions'; JOIN ON without conditions SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 FULL JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; +0 0 4 Join_3_Value_4 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; +0 0 4 Join_3_Value_4 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; +0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 +0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 0 0 Join_1_Value_0 Join_1_Value_0 0 0 Join_2_Value_0 Join_2_Value_0 0 0 Join_3_Value_0 Join_3_Value_0 1 1 Join_1_Value_1 Join_1_Value_1 1 1 Join_2_Value_1 Join_2_Value_1 1 1 Join_3_Value_1 Join_3_Value_1 2 2 Join_1_Value_2 Join_1_Value_2 0 0 0 0 Join_3_Value_0 Join_3_Value_0 -0 0 3 3 Join_2_Value_3 Join_2_Value_3 0 0 -0 0 0 0 4 4 Join_3_Value_4 Join_3_Value_4 SELECT '--'; -- SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +FULL JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; + Join_3_Value_4 + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_2_Value_3 - Join_3_Value_4 SELECT 'JOIN ON with conditions'; JOIN ON with conditions SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; +0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 0 0 2 Join_1_Value_2 0 0 -0 3 Join_2_Value_3 0 -0 1 Join_2_Value_1 0 -0 0 4 Join_3_Value_4 -0 0 1 Join_3_Value_1 SELECT '--'; -- SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; +0 0 1 Join_3_Value_1 +0 0 4 Join_3_Value_4 +0 1 Join_2_Value_1 0 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 0 0 2 Join_1_Value_2 0 0 -0 1 Join_2_Value_1 0 -0 3 Join_2_Value_3 0 -0 0 1 Join_3_Value_1 -0 0 4 Join_3_Value_4 SELECT 'JOIN multiple clauses'; JOIN multiple clauses SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +FULL JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; + Join_3_Value_4 + Join_2_Value_3 Join_1_Value_0 Join_2_Value_0 Join_3_Value_0 Join_1_Value_1 Join_2_Value_1 Join_3_Value_1 Join_1_Value_2 Join_3_Value_0 - Join_2_Value_3 - Join_3_Value_4 SELECT 'JOIN expression aliases'; JOIN expression aliases SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +FULL JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 0 4 Join_3_Value_4 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 -0 0 4 Join_3_Value_4 SELECT '--'; -- SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1_id = t2_id -FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +FULL JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; +0 0 4 Join_3_Value_4 +0 3 Join_2_Value_3 0 0 Join_1_Value_0 0 Join_2_Value_0 0 Join_3_Value_0 1 Join_1_Value_1 1 Join_2_Value_1 1 Join_3_Value_1 2 Join_1_Value_2 0 0 Join_3_Value_0 -0 3 Join_2_Value_3 0 -0 0 4 Join_3_Value_4 diff --git a/tests/queries/0_stateless/02372_analyzer_join.sql.j2 b/tests/queries/0_stateless/02372_analyzer_join.sql.j2 index f6032a96b33d..481c300b9993 100644 --- a/tests/queries/0_stateless/02372_analyzer_join.sql.j2 +++ b/tests/queries/0_stateless/02372_analyzer_join.sql.j2 @@ -45,22 +45,22 @@ SELECT 'JOIN {{ join_type }}'; SELECT 'JOIN ON without conditions'; SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value -FROM test_table_join_1 {{ join_type }} JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; +FROM test_table_join_1 {{ join_type }} JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id order by test_table_join_1.id, test_table_join_1.value; SELECT '--'; SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id order by t1.id, t1.value; SELECT '--'; SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value -FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; SELECT '--'; SELECT t1.value, t2.value -FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.value; SELECT id FROM test_table_join_1 {{ join_type }} JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id; -- { serverError 207 } @@ -69,35 +69,35 @@ SELECT value FROM test_table_join_1 {{ join_type }} JOIN test_table_join_2 ON te SELECT 'JOIN ON with conditions'; SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0'; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; SELECT '--'; SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t2.value = 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; SELECT '--'; SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; SELECT '--'; SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0'; +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON toString(t1.id) = toString(t2.id) AND t1.value = 'Join_1_Value_0' AND t2.value = 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value; SELECT 'JOIN multiple clauses'; SELECT t1.id, t1.value, t2.id, t2.value -FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id; +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id order by t1.id, t1.value; SELECT 'JOIN expression aliases'; -SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id); +SELECT t1_id, t1.value, t2_id, t2.value FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) order by t1_id, t1.value, t2_id; SELECT '--'; -SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1_id = t2_id; +SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 ON t1_id = t2_id order by t1_id, t1.value, t2_id; {% endfor %} @@ -110,56 +110,56 @@ SELECT 'JOIN ON without conditions'; SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value FROM test_table_join_1 {{ first_join_type }} JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id -{{ second_join_type }} JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id; +{{ second_join_type }} JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id ORDER BY test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value; SELECT '--'; SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -{{ second_join_type }} JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; SELECT '--'; SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, t3.id, test_table_join_3.id, t3.value, test_table_join_3.value FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -{{ second_join_type }} JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.id, t1.value, t2.id; SELECT '--'; SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON test_table_join_1.id = test_table_join_2.id -{{ second_join_type }} JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id; +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON test_table_join_2.id = test_table_join_3.id ORDER BY t1.value, t2.value, t3.value; SELECT 'JOIN ON with conditions'; SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' -{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0'; +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; SELECT '--'; SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id AND t1.value == 'Join_1_Value_0' AND t2.value == 'Join_2_Value_0' -{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0'; +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2.id = t3.id AND t2.value == 'Join_2_Value_0' AND t3.value == 'Join_3_Value_0' ORDER BY t1.id, t1.value, t2.id, t2.value, t3.id, t3.value; SELECT 'JOIN multiple clauses'; SELECT t1.value, t2.value, t3.value FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON t1.id = t2.id OR t1.id = t2.id -{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id; +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2.id = t3.id OR t3.id = t2.id ORDER BY t1.value, t2.value, t3.value; SELECT 'JOIN expression aliases'; SELECT t1_id, t1.value, t2_id, t2.value, t3_id, t3.value FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON (t1.id AS t1_id) = (t2.id AS t2_id) -{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id); +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2_id = (t3.id AS t3_id) ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; SELECT '--'; SELECT t1.id AS t1_id, t1.value, t2.id AS t2_id, t2.value, t3.id AS t3_id, t3.value FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 ON t1_id = t2_id -{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2_id = t3_id; +{{ second_join_type }} JOIN test_table_join_3 AS t3 ON t2_id = t3_id ORDER BY t1_id, t1.value, t2_id, t2.value, t3_id, t3.value; {% endfor %} {% endfor %} diff --git a/tests/queries/0_stateless/02373_analyzer_join_use_nulls.reference b/tests/queries/0_stateless/02373_analyzer_join_use_nulls.reference index 3722c23e4a05..52010b718f4a 100644 --- a/tests/queries/0_stateless/02373_analyzer_join_use_nulls.reference +++ b/tests/queries/0_stateless/02373_analyzer_join_use_nulls.reference @@ -1,27 +1,27 @@ -- { echoOn } SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id; 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String SELECT '--'; -- SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id; 0 UInt64 Join_1_Value_0 String 0 Nullable(UInt64) Join_2_Value_0 Nullable(String) 1 UInt64 Join_1_Value_1 String 1 Nullable(UInt64) Join_2_Value_1 Nullable(String) 2 UInt64 Join_1_Value_2 String \N Nullable(UInt64) \N Nullable(String) SELECT '--'; -- SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id; 0 Nullable(UInt64) Join_1_Value_0 Nullable(String) 0 UInt64 Join_2_Value_0 String 1 Nullable(UInt64) Join_1_Value_1 Nullable(String) 1 UInt64 Join_2_Value_1 String \N Nullable(UInt64) \N Nullable(String) 3 UInt64 Join_2_Value_3 String SELECT '--'; -- SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id; 0 Nullable(UInt64) Join_1_Value_0 Nullable(String) 0 Nullable(UInt64) Join_2_Value_0 Nullable(String) 1 Nullable(UInt64) Join_1_Value_1 Nullable(String) 1 Nullable(UInt64) Join_2_Value_1 Nullable(String) 2 Nullable(UInt64) Join_1_Value_2 Nullable(String) \N Nullable(UInt64) \N Nullable(String) @@ -30,14 +30,14 @@ SELECT '--'; -- SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) ORDER BY t1.id; 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String SELECT '--'; -- SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) ORDER BY t1.id; 0 UInt64 0 UInt64 Join_1_Value_0 String 0 Nullable(UInt64) Join_2_Value_0 Nullable(String) 1 UInt64 1 UInt64 Join_1_Value_1 String 1 Nullable(UInt64) Join_2_Value_1 Nullable(String) 2 UInt64 2 UInt64 Join_1_Value_2 String \N Nullable(UInt64) \N Nullable(String) @@ -45,7 +45,7 @@ SELECT '--'; -- SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) ORDER BY t1.id; 0 UInt64 0 Nullable(UInt64) Join_1_Value_0 Nullable(String) 0 UInt64 Join_2_Value_0 String 1 UInt64 1 Nullable(UInt64) Join_1_Value_1 Nullable(String) 1 UInt64 Join_2_Value_1 String 3 UInt64 \N Nullable(UInt64) \N Nullable(String) 3 UInt64 Join_2_Value_3 String @@ -53,7 +53,7 @@ SELECT '--'; -- SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) ORDER BY t1.id; 0 Nullable(UInt64) 0 Nullable(UInt64) Join_1_Value_0 Nullable(String) 0 Nullable(UInt64) Join_2_Value_0 Nullable(String) 1 Nullable(UInt64) 1 Nullable(UInt64) Join_1_Value_1 Nullable(String) 1 Nullable(UInt64) Join_2_Value_1 Nullable(String) 2 Nullable(UInt64) 2 Nullable(UInt64) Join_1_Value_2 Nullable(String) \N Nullable(UInt64) \N Nullable(String) diff --git a/tests/queries/0_stateless/02373_analyzer_join_use_nulls.sql b/tests/queries/0_stateless/02373_analyzer_join_use_nulls.sql index db7895084e8b..6b69dad6fcc7 100644 --- a/tests/queries/0_stateless/02373_analyzer_join_use_nulls.sql +++ b/tests/queries/0_stateless/02373_analyzer_join_use_nulls.sql @@ -26,46 +26,46 @@ INSERT INTO test_table_join_2 VALUES (3, 'Join_2_Value_3'); -- { echoOn } SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id; SELECT '--'; SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id; SELECT '--'; SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id; SELECT '--'; SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id; SELECT '--'; SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) ORDER BY t1.id; SELECT '--'; SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) ORDER BY t1.id; SELECT '--'; SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) ORDER BY t1.id; SELECT '--'; SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) ORDER BY t1.id; -- { echoOff } diff --git a/tests/queries/0_stateless/02374_analyzer_join_using.reference b/tests/queries/0_stateless/02374_analyzer_join_using.reference index 622476942d0f..1d8d513af695 100644 --- a/tests/queries/0_stateless/02374_analyzer_join_using.reference +++ b/tests/queries/0_stateless/02374_analyzer_join_using.reference @@ -4,13 +4,13 @@ SELECT 'JOIN INNER'; JOIN INNER SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) ORDER BY id; 0 UInt16 0 UInt16 Join_1_Value_0 String 0 UInt16 Join_2_Value_0 String 1 UInt16 1 UInt16 Join_1_Value_1 String 1 UInt16 Join_2_Value_1 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) ORDER BY t1_value NULLS LAST; Join_1_Value_0 String Join_2_Value_0 String Join_1_Value_1 String Join_2_Value_1 String SELECT '--'; @@ -24,14 +24,14 @@ SELECT 'JOIN LEFT'; JOIN LEFT SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) ORDER BY id; 0 UInt16 0 UInt16 Join_1_Value_0 String 0 UInt16 Join_2_Value_0 String 1 UInt16 1 UInt16 Join_1_Value_1 String 1 UInt16 Join_2_Value_1 String 2 UInt16 2 UInt16 Join_1_Value_2 String 0 UInt16 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) ORDER BY t1_value NULLS LAST; Join_1_Value_0 String Join_2_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_1_Value_2 String String @@ -47,17 +47,17 @@ SELECT 'JOIN RIGHT'; JOIN RIGHT SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) ORDER BY id; 0 UInt16 0 UInt16 Join_1_Value_0 String 0 UInt16 Join_2_Value_0 String 1 UInt16 1 UInt16 Join_1_Value_1 String 1 UInt16 Join_2_Value_1 String 3 UInt16 0 UInt16 String 3 UInt16 Join_2_Value_3 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) ORDER BY t1_value NULLS LAST; + String Join_2_Value_3 String Join_1_Value_0 String Join_2_Value_0 String Join_1_Value_1 String Join_2_Value_1 String - String Join_2_Value_3 String SELECT '--'; -- SELECT 1 FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id); @@ -70,19 +70,19 @@ SELECT 'JOIN FULL'; JOIN FULL SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) ORDER BY id; 0 UInt16 0 UInt16 Join_1_Value_0 String 0 UInt16 Join_2_Value_0 String +0 UInt16 0 UInt16 String 3 UInt16 Join_2_Value_3 String 1 UInt16 1 UInt16 Join_1_Value_1 String 1 UInt16 Join_2_Value_1 String 2 UInt16 2 UInt16 Join_1_Value_2 String 0 UInt16 String -0 UInt16 0 UInt16 String 3 UInt16 Join_2_Value_3 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) ORDER BY t1_value NULLS LAST; + String Join_2_Value_3 String Join_1_Value_0 String Join_2_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_1_Value_2 String String - String Join_2_Value_3 String SELECT '--'; -- SELECT 1 FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id); @@ -96,13 +96,13 @@ SELECT 'First JOIN INNER second JOIN INNER'; First JOIN INNER second JOIN INNER SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String SELECT '--'; @@ -115,13 +115,13 @@ SELECT 'First JOIN INNER second JOIN LEFT'; First JOIN INNER second JOIN LEFT SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String SELECT '--'; @@ -134,17 +134,17 @@ SELECT 'First JOIN INNER second JOIN RIGHT'; First JOIN INNER second JOIN RIGHT SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String 4 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; + String String Join_3_Value_4 String Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String - String String Join_3_Value_4 String SELECT '--'; -- SELECT 1 FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); @@ -156,17 +156,17 @@ SELECT 'First JOIN INNER second JOIN FULL'; First JOIN INNER second JOIN FULL SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; +0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String -0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; + String String Join_3_Value_4 String Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String - String String Join_3_Value_4 String SELECT '--'; -- SELECT 1 FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); @@ -178,13 +178,13 @@ SELECT 'First JOIN LEFT second JOIN INNER'; First JOIN LEFT second JOIN INNER SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String SELECT '--'; @@ -197,14 +197,14 @@ SELECT 'First JOIN LEFT second JOIN LEFT'; First JOIN LEFT second JOIN LEFT SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String 2 UInt64 2 UInt64 Join_1_Value_2 String 0 UInt64 String 0 UInt64 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String Join_1_Value_2 String String String @@ -219,17 +219,17 @@ SELECT 'First JOIN LEFT second JOIN RIGHT'; First JOIN LEFT second JOIN RIGHT SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String 4 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; + String String Join_3_Value_4 String Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String - String String Join_3_Value_4 String SELECT '--'; -- SELECT 1 FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); @@ -241,19 +241,19 @@ SELECT 'First JOIN LEFT second JOIN FULL'; First JOIN LEFT second JOIN FULL SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; +0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String 2 UInt64 2 UInt64 Join_1_Value_2 String 0 UInt64 String 0 UInt64 String -0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; + String String Join_3_Value_4 String Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String Join_1_Value_2 String String String - String String Join_3_Value_4 String SELECT '--'; -- SELECT 1 FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); @@ -266,13 +266,13 @@ SELECT 'First JOIN RIGHT second JOIN INNER'; First JOIN RIGHT second JOIN INNER SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String SELECT '--'; @@ -285,17 +285,17 @@ SELECT 'First JOIN RIGHT second JOIN LEFT'; First JOIN RIGHT second JOIN LEFT SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String 3 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; + String Join_2_Value_3 String String Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String - String Join_2_Value_3 String String SELECT '--'; -- SELECT 1 FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); @@ -307,17 +307,17 @@ SELECT 'First JOIN RIGHT second JOIN RIGHT'; First JOIN RIGHT second JOIN RIGHT SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String 4 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; + String String Join_3_Value_4 String Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String - String String Join_3_Value_4 String SELECT '--'; -- SELECT 1 FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); @@ -329,19 +329,19 @@ SELECT 'First JOIN RIGHT second JOIN FULL'; First JOIN RIGHT second JOIN FULL SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; +0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String 3 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 String -0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; + String String Join_3_Value_4 String + String Join_2_Value_3 String String Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String - String Join_2_Value_3 String String - String String Join_3_Value_4 String SELECT '--'; -- SELECT 1 FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); @@ -354,14 +354,14 @@ SELECT 'First JOIN FULL second JOIN INNER'; First JOIN FULL second JOIN INNER SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; +0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String -0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String SELECT '--'; @@ -374,19 +374,19 @@ SELECT 'First JOIN FULL second JOIN LEFT'; First JOIN FULL second JOIN LEFT SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; +0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String 2 UInt64 2 UInt64 Join_1_Value_2 String 0 UInt64 String 0 UInt64 String -0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; + String Join_2_Value_3 String String Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String Join_1_Value_2 String String String - String Join_2_Value_3 String String SELECT '--'; -- SELECT 1 FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING(id); @@ -399,18 +399,18 @@ SELECT 'First JOIN FULL second JOIN RIGHT'; First JOIN FULL second JOIN RIGHT SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; +0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String -0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String 4 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; + String String Join_3_Value_4 String Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String - String String Join_3_Value_4 String SELECT '--'; -- SELECT 1 FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING(id); @@ -422,21 +422,21 @@ SELECT 'First JOIN FULL second JOIN FULL'; First JOIN FULL second JOIN FULL SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; +0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String +0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String 0 UInt64 0 UInt64 Join_1_Value_0 String 0 UInt64 Join_2_Value_0 String 0 UInt64 Join_3_Value_0 String 1 UInt64 1 UInt64 Join_1_Value_1 String 1 UInt64 Join_2_Value_1 String 1 UInt64 Join_3_Value_1 String 2 UInt64 2 UInt64 Join_1_Value_2 String 0 UInt64 String 0 UInt64 String -0 UInt64 0 UInt64 String 3 UInt64 Join_2_Value_3 String 0 UInt64 Join_3_Value_0 String -0 UInt64 0 UInt64 String 0 UInt64 String 4 UInt64 Join_3_Value_4 String SELECT '--'; -- SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; + String String Join_3_Value_4 String + String Join_2_Value_3 String String Join_1_Value_0 String Join_2_Value_0 String Join_3_Value_0 String Join_1_Value_1 String Join_2_Value_1 String Join_3_Value_1 String Join_1_Value_2 String String String - String Join_2_Value_3 String String - String String Join_3_Value_4 String SELECT '--'; -- SELECT 1 FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING(id); diff --git a/tests/queries/0_stateless/02374_analyzer_join_using.sql.j2 b/tests/queries/0_stateless/02374_analyzer_join_using.sql.j2 index 26fb52716ff9..6b81c89ee7c9 100644 --- a/tests/queries/0_stateless/02374_analyzer_join_using.sql.j2 +++ b/tests/queries/0_stateless/02374_analyzer_join_using.sql.j2 @@ -41,12 +41,12 @@ SELECT 'JOIN {{ join_type }}'; SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 USING (id) ORDER BY id; SELECT '--'; SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value) -FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 USING (id); +FROM test_table_join_1 AS t1 {{ join_type }} JOIN test_table_join_2 AS t2 USING (id) ORDER BY t1_value NULLS LAST; SELECT '--'; @@ -64,12 +64,12 @@ SELECT 'First JOIN {{ first_join_type }} second JOIN {{ second_join_type }}'; SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value), t3.id AS t3_id, toTypeName(t3_id), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 USING (id) {{ second_join_type }} JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 USING (id) {{ second_join_type }} JOIN test_table_join_3 AS t3 USING(id) ORDER BY id, t1_value, t2_value; SELECT '--'; SELECT t1.value AS t1_value, toTypeName(t1_value), t2.value AS t2_value, toTypeName(t2_value), t3.value AS t3_value, toTypeName(t3_value) -FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 USING (id) {{ second_join_type }} JOIN test_table_join_3 AS t3 USING(id); +FROM test_table_join_1 AS t1 {{ first_join_type }} JOIN test_table_join_2 AS t2 USING (id) {{ second_join_type }} JOIN test_table_join_3 AS t3 USING(id) ORDER BY t1_value, t2_value; SELECT '--'; diff --git a/tests/queries/0_stateless/02380_analyzer_join_sample.sql b/tests/queries/0_stateless/02380_analyzer_join_sample.sql index e417f47d1735..5dc6c29b7901 100644 --- a/tests/queries/0_stateless/02380_analyzer_join_sample.sql +++ b/tests/queries/0_stateless/02380_analyzer_join_sample.sql @@ -23,7 +23,7 @@ SAMPLE BY id; INSERT INTO test_table_join_2 VALUES (0, 'Value'), (1, 'Value_1'); SELECT t1.id AS t1_id, t2.id AS t2_id, t1._sample_factor AS t1_sample_factor, t2._sample_factor AS t2_sample_factor -FROM test_table_join_1 AS t1 SAMPLE 1/2 INNER JOIN test_table_join_2 AS t2 SAMPLE 1/2 ON t1.id = t2.id; +FROM test_table_join_1 AS t1 SAMPLE 1/2 INNER JOIN test_table_join_2 AS t2 SAMPLE 1/2 ON t1.id = t2.id ORDER BY t1.id; DROP TABLE test_table_join_1; DROP TABLE test_table_join_2; diff --git a/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql b/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql index 3688a649d5ea..88fb2cdf9b13 100644 --- a/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql +++ b/tests/queries/0_stateless/02432_s3_parallel_parts_cleanup.sql @@ -1,5 +1,7 @@ -- Tags: no-fasttest +SET send_logs_level = 'fatal'; + drop table if exists rmt; drop table if exists rmt2; diff --git a/tests/queries/0_stateless/02447_drop_database_replica.reference b/tests/queries/0_stateless/02447_drop_database_replica.reference index 1d65fe66c6e8..f2b415695407 100644 --- a/tests/queries/0_stateless/02447_drop_database_replica.reference +++ b/tests/queries/0_stateless/02447_drop_database_replica.reference @@ -6,10 +6,16 @@ t 2 2 2 -rdb_default 1 1 -rdb_default 1 2 2 2 2 +2 +rdb_default 1 1 s1 r1 1 +2 +2 +rdb_default 1 1 s1 r1 1 +rdb_default 1 2 s1 r2 0 +2 +2 t -rdb_default_3 1 1 +rdb_default_4 1 1 s1 r1 1 diff --git a/tests/queries/0_stateless/02447_drop_database_replica.sh b/tests/queries/0_stateless/02447_drop_database_replica.sh index 4bfd6243c2ed..47a6cf10bda3 100755 --- a/tests/queries/0_stateless/02447_drop_database_replica.sh +++ b/tests/queries/0_stateless/02447_drop_database_replica.sh @@ -13,35 +13,49 @@ $CLICKHOUSE_CLIENT -q "show tables from $db" $CLICKHOUSE_CLIENT -q "system drop database replica 's1|r1' from table t" 2>&1| grep -Fac "SYNTAX_ERROR" $CLICKHOUSE_CLIENT -q "system drop database replica 's1|r1' from database $db" 2>&1| grep -Fac "There is a local database" +$CLICKHOUSE_CLIENT -q "system drop database replica 'r1' from shard 's1' from database $db" 2>&1| grep -Fac "There is a local database" $CLICKHOUSE_CLIENT -q "system drop database replica 's1|r1' from zkpath '/test/$CLICKHOUSE_DATABASE/rdb'" 2>&1| grep -Fac "There is a local database" $CLICKHOUSE_CLIENT -q "system drop database replica 's1|r1' from zkpath '/test/$CLICKHOUSE_DATABASE/rdb/'" 2>&1| grep -Fac "There is a local database" +$CLICKHOUSE_CLIENT -q "system drop database replica 'r1' from shard 's1' from zkpath '/test/$CLICKHOUSE_DATABASE/rdb/'" 2>&1| grep -Fac "There is a local database" $CLICKHOUSE_CLIENT -q "system drop database replica 's1|r1' from zkpath '/test/$CLICKHOUSE_DATABASE/'" 2>&1| grep -Fac "does not look like a path of Replicated database" $CLICKHOUSE_CLIENT -q "system drop database replica 's2|r1' from zkpath '/test/$CLICKHOUSE_DATABASE/rdb'" 2>&1| grep -Fac "does not exist" +$CLICKHOUSE_CLIENT -q "system drop database replica 's1' from shard 'r1' from zkpath '/test/$CLICKHOUSE_DATABASE/rdb'" 2>&1| grep -Fac "does not exist" +$CLICKHOUSE_CLIENT -q "system drop database replica 's1|r1' from shard 's1' from zkpath '/test/$CLICKHOUSE_DATABASE/rdb'" 2>&1| grep -Fac "does not exist" $CLICKHOUSE_CLIENT -q "system drop database replica 's2/r1' from zkpath '/test/$CLICKHOUSE_DATABASE/rdb'" 2>&1| grep -Fac "Invalid replica name" db2="${db}_2" +db3="${db}_3" $CLICKHOUSE_CLIENT --allow_experimental_database_replicated=1 -q "create database $db2 engine=Replicated('/test/$CLICKHOUSE_DATABASE/rdb', 's1', 'r2')" +$CLICKHOUSE_CLIENT --allow_experimental_database_replicated=1 -q "create database $db3 engine=Replicated('/test/$CLICKHOUSE_DATABASE/rdb', 's2', 'r1')" $CLICKHOUSE_CLIENT -q "system sync database replica $db" -$CLICKHOUSE_CLIENT -q "select cluster, shard_num, replica_num from system.clusters where cluster='$db' order by shard_num, replica_num" +$CLICKHOUSE_CLIENT -q "select cluster, shard_num, replica_num, database_shard_name, database_replica_name, is_active from system.clusters where cluster='$db' and shard_num=1 and replica_num=1" $CLICKHOUSE_CLIENT -q "system drop database replica 's1|r1' from database $db2" 2>&1| grep -Fac "is active, cannot drop it" +$CLICKHOUSE_CLIENT -q "detach database $db3" +$CLICKHOUSE_CLIENT -q "system drop database replica 'r1' from shard 's2' from database $db" +$CLICKHOUSE_CLIENT -q "attach database $db3" 2>/dev/null +$CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none -q "create table $db3.t2 as system.query_log" 2>&1| grep -Fac "Database is in readonly mode" # Suppress style check: current_database=$CLICKHOUSE_DATABASE + $CLICKHOUSE_CLIENT -q "detach database $db2" +$CLICKHOUSE_CLIENT -q "system sync database replica $db" +$CLICKHOUSE_CLIENT -q "select cluster, shard_num, replica_num, database_shard_name, database_replica_name, is_active from system.clusters where cluster='$db' order by shard_num, replica_num" $CLICKHOUSE_CLIENT -q "system drop database replica 's1|r2' from database $db" $CLICKHOUSE_CLIENT -q "attach database $db2" 2>/dev/null $CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none -q "create table $db2.t2 as system.query_log" 2>&1| grep -Fac "Database is in readonly mode" # Suppress style check: current_database=$CLICKHOUSE_DATABASE $CLICKHOUSE_CLIENT -q "detach database $db" -$CLICKHOUSE_CLIENT -q "system drop database replica 's1|r1' from zkpath '/test/$CLICKHOUSE_DATABASE/rdb/'" +$CLICKHOUSE_CLIENT -q "system drop database replica 'r1' from shard 's1' from zkpath '/test/$CLICKHOUSE_DATABASE/rdb/'" $CLICKHOUSE_CLIENT -q "attach database $db" 2>/dev/null $CLICKHOUSE_CLIENT --distributed_ddl_output_mode=none -q "create table $db.t2 as system.query_log" 2>&1| grep -Fac "Database is in readonly mode" # Suppress style check: current_database=$CLICKHOUSE_DATABASE $CLICKHOUSE_CLIENT -q "show tables from $db" -db3="${db}_3" -$CLICKHOUSE_CLIENT --allow_experimental_database_replicated=1 -q "create database $db3 engine=Replicated('/test/$CLICKHOUSE_DATABASE/rdb', 's1', 'r1')" -$CLICKHOUSE_CLIENT -q "system sync database replica $db3" -$CLICKHOUSE_CLIENT -q "select cluster, shard_num, replica_num from system.clusters where cluster='$db3'" +db4="${db}_4" +$CLICKHOUSE_CLIENT --allow_experimental_database_replicated=1 -q "create database $db4 engine=Replicated('/test/$CLICKHOUSE_DATABASE/rdb', 's1', 'r1')" +$CLICKHOUSE_CLIENT -q "system sync database replica $db4" +$CLICKHOUSE_CLIENT -q "select cluster, shard_num, replica_num, database_shard_name, database_replica_name, is_active from system.clusters where cluster='$db4'" $CLICKHOUSE_CLIENT -q "drop database $db" $CLICKHOUSE_CLIENT -q "drop database $db2" $CLICKHOUSE_CLIENT -q "drop database $db3" +$CLICKHOUSE_CLIENT -q "drop database $db4" diff --git a/tests/queries/0_stateless/02479_nullable_primary_key_non_first_column.reference b/tests/queries/0_stateless/02479_nullable_primary_key_non_first_column.reference new file mode 100644 index 000000000000..ed6ac232d9c5 --- /dev/null +++ b/tests/queries/0_stateless/02479_nullable_primary_key_non_first_column.reference @@ -0,0 +1,2 @@ +a \N +1 1 \N diff --git a/tests/queries/0_stateless/02479_nullable_primary_key_non_first_column.sql b/tests/queries/0_stateless/02479_nullable_primary_key_non_first_column.sql new file mode 100644 index 000000000000..2d56e315bd18 --- /dev/null +++ b/tests/queries/0_stateless/02479_nullable_primary_key_non_first_column.sql @@ -0,0 +1,11 @@ +drop table if exists test_table; +create table test_table (A Nullable(String), B Nullable(String)) engine MergeTree order by (A,B) settings index_granularity = 1, allow_nullable_key=1; +insert into test_table values ('a', 'b'), ('a', null), (null, 'b'); +select * from test_table where B is null; +drop table test_table; + +DROP TABLE IF EXISTS dm_metric_small2; +CREATE TABLE dm_metric_small2 (`x` Nullable(Int64), `y` Nullable(Int64), `z` Nullable(Int64)) ENGINE = MergeTree() ORDER BY (x, y, z) SETTINGS index_granularity = 1, allow_nullable_key = 1; +INSERT INTO dm_metric_small2 VALUES (1,1,NULL) (1,1,1) (1,2,0) (1,2,1) (1,2,NULL) (1,2,NULL); +SELECT * FROM dm_metric_small2 WHERE (x = 1) AND (y = 1) AND z IS NULL; +DROP TABLE dm_metric_small2; \ No newline at end of file diff --git a/tests/queries/0_stateless/02479_nullable_primary_key_second_column.reference b/tests/queries/0_stateless/02479_nullable_primary_key_second_column.reference deleted file mode 100644 index f0227e1a41ec..000000000000 --- a/tests/queries/0_stateless/02479_nullable_primary_key_second_column.reference +++ /dev/null @@ -1 +0,0 @@ -a \N diff --git a/tests/queries/0_stateless/02479_nullable_primary_key_second_column.sql b/tests/queries/0_stateless/02479_nullable_primary_key_second_column.sql deleted file mode 100644 index ad0c09222c22..000000000000 --- a/tests/queries/0_stateless/02479_nullable_primary_key_second_column.sql +++ /dev/null @@ -1,9 +0,0 @@ -drop table if exists test_table; - -create table test_table (A Nullable(String), B Nullable(String)) engine MergeTree order by (A,B) settings index_granularity = 1, allow_nullable_key=1; - -insert into test_table values ('a', 'b'), ('a', null), (null, 'b'); - -select * from test_table where B is null; - -drop table test_table; diff --git a/tests/queries/0_stateless/02497_if_transform_strings_to_enum.reference b/tests/queries/0_stateless/02497_if_transform_strings_to_enum.reference index f5284f38b86a..a1a653361ee2 100644 --- a/tests/queries/0_stateless/02497_if_transform_strings_to_enum.reference +++ b/tests/queries/0_stateless/02497_if_transform_strings_to_enum.reference @@ -405,16 +405,6 @@ QUERY id: 0 TABLE id: 7, table_name: system.numbers LIMIT CONSTANT id: 17, constant_value: UInt64_10, constant_value_type: UInt64 -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N SELECT transform(number, [NULL], _CAST([\'google\', \'censor.net\', \'yahoo\'], \'Array(Enum8(\\\'censor.net\\\' = 1, \\\'google\\\' = 2, \\\'other\\\' = 3, \\\'yahoo\\\' = 4))\'), _CAST(\'other\', \'Enum8(\\\'censor.net\\\' = 1, \\\'google\\\' = 2, \\\'other\\\' = 3, \\\'yahoo\\\' = 4)\')) FROM ( @@ -424,56 +414,38 @@ FROM ) QUERY id: 0 PROJECTION COLUMNS - transform(number, [NULL], [\'google\', \'censor.net\', \'yahoo\'], \'other\') Nullable(Nothing) + transform(number, [NULL], [\'google\', \'censor.net\', \'yahoo\'], \'other\') String PROJECTION LIST id: 1, nodes: 1 - FUNCTION id: 2, function_name: transform, function_type: ordinary, result_type: Nullable(Nothing) + FUNCTION id: 2, function_name: toString, function_type: ordinary, result_type: String ARGUMENTS - LIST id: 3, nodes: 4 - COLUMN id: 4, column_name: number, result_type: Nullable(Nothing), source_id: 5 - CONSTANT id: 6, constant_value: Array_[NULL], constant_value_type: Array(Nullable(Nothing)) - CONSTANT id: 7, constant_value: Array_[\'google\', \'censor.net\', \'yahoo\'], constant_value_type: Array(String) - CONSTANT id: 8, constant_value: \'other\', constant_value_type: String + LIST id: 3, nodes: 1 + FUNCTION id: 4, function_name: transform, function_type: ordinary, result_type: Enum8(\'censor.net\' = 1, \'google\' = 2, \'other\' = 3, \'yahoo\' = 4) + ARGUMENTS + LIST id: 5, nodes: 4 + COLUMN id: 6, column_name: number, result_type: Nullable(Nothing), source_id: 7 + CONSTANT id: 8, constant_value: Array_[NULL], constant_value_type: Array(Nullable(Nothing)) + FUNCTION id: 9, function_name: _CAST, function_type: ordinary, result_type: Array(Enum8(\'censor.net\' = 1, \'google\' = 2, \'other\' = 3, \'yahoo\' = 4)) + ARGUMENTS + LIST id: 10, nodes: 2 + CONSTANT id: 11, constant_value: Array_[\'google\', \'censor.net\', \'yahoo\'], constant_value_type: Array(String) + CONSTANT id: 12, constant_value: \'Array(Enum8(\\\'censor.net\\\' = 1, \\\'google\\\' = 2, \\\'other\\\' = 3, \\\'yahoo\\\' = 4))\', constant_value_type: String + FUNCTION id: 13, function_name: _CAST, function_type: ordinary, result_type: Enum8(\'censor.net\' = 1, \'google\' = 2, \'other\' = 3, \'yahoo\' = 4) + ARGUMENTS + LIST id: 14, nodes: 2 + CONSTANT id: 15, constant_value: \'other\', constant_value_type: String + CONSTANT id: 16, constant_value: \'Enum8(\\\'censor.net\\\' = 1, \\\'google\\\' = 2, \\\'other\\\' = 3, \\\'yahoo\\\' = 4)\', constant_value_type: String JOIN TREE - QUERY id: 5, is_subquery: 1 + QUERY id: 7, is_subquery: 1 PROJECTION COLUMNS number Nullable(Nothing) PROJECTION - LIST id: 9, nodes: 1 - CONSTANT id: 10, constant_value: NULL, constant_value_type: Nullable(Nothing) + LIST id: 17, nodes: 1 + CONSTANT id: 18, constant_value: NULL, constant_value_type: Nullable(Nothing) JOIN TREE - TABLE id: 11, table_name: system.numbers + TABLE id: 19, table_name: system.numbers LIMIT - CONSTANT id: 12, constant_value: UInt64_10, constant_value_type: UInt64 -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N -SELECT transform(number, NULL, _CAST([\'google\', \'censor.net\', \'yahoo\'], \'Array(Enum8(\\\'censor.net\\\' = 1, \\\'google\\\' = 2, \\\'other\\\' = 3, \\\'yahoo\\\' = 4))\'), _CAST(\'other\', \'Enum8(\\\'censor.net\\\' = 1, \\\'google\\\' = 2, \\\'other\\\' = 3, \\\'yahoo\\\' = 4)\')) -FROM system.numbers -LIMIT 10 -QUERY id: 0 - PROJECTION COLUMNS - transform(number, NULL, [\'google\', \'censor.net\', \'yahoo\'], \'other\') Nullable(Nothing) - PROJECTION - LIST id: 1, nodes: 1 - FUNCTION id: 2, function_name: transform, function_type: ordinary, result_type: Nullable(Nothing) - ARGUMENTS - LIST id: 3, nodes: 4 - COLUMN id: 4, column_name: number, result_type: UInt64, source_id: 5 - CONSTANT id: 6, constant_value: NULL, constant_value_type: Nullable(Nothing) - CONSTANT id: 7, constant_value: Array_[\'google\', \'censor.net\', \'yahoo\'], constant_value_type: Array(String) - CONSTANT id: 8, constant_value: \'other\', constant_value_type: String - JOIN TREE - TABLE id: 5, table_name: system.numbers - LIMIT - CONSTANT id: 9, constant_value: UInt64_10, constant_value_type: UInt64 + CONSTANT id: 20, constant_value: UInt64_10, constant_value_type: UInt64 other other google diff --git a/tests/queries/0_stateless/02497_if_transform_strings_to_enum.sql b/tests/queries/0_stateless/02497_if_transform_strings_to_enum.sql index c23046c7b208..492d42cb6bc2 100644 --- a/tests/queries/0_stateless/02497_if_transform_strings_to_enum.sql +++ b/tests/queries/0_stateless/02497_if_transform_strings_to_enum.sql @@ -33,13 +33,13 @@ SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') EXPLAIN SYNTAX SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') as value, value FROM system.numbers LIMIT 10; EXPLAIN QUERY TREE run_passes = 1 SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') as value, value FROM system.numbers LIMIT 10; -SELECT transform(number, [NULL], ['google', 'censor.net', 'yahoo'], 'other') FROM (SELECT NULL as number FROM system.numbers LIMIT 10); +SELECT transform(number, [NULL], ['google', 'censor.net', 'yahoo'], 'other') FROM (SELECT NULL as number FROM system.numbers LIMIT 10); -- { serverError 36 } EXPLAIN SYNTAX SELECT transform(number, [NULL], ['google', 'censor.net', 'yahoo'], 'other') FROM (SELECT NULL as number FROM system.numbers LIMIT 10); EXPLAIN QUERY TREE run_passes = 1 SELECT transform(number, [NULL], ['google', 'censor.net', 'yahoo'], 'other') FROM (SELECT NULL as number FROM system.numbers LIMIT 10); -SELECT transform(number, NULL, ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; -EXPLAIN SYNTAX SELECT transform(number, NULL, ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; -EXPLAIN QUERY TREE run_passes = 1 SELECT transform(number, NULL, ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; +SELECT transform(number, NULL, ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; -- { serverError 43 } +EXPLAIN SYNTAX SELECT transform(number, NULL, ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; -- { serverError 43 } +EXPLAIN QUERY TREE run_passes = 1 SELECT transform(number, NULL, ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; -- { serverError 43 } SET optimize_if_transform_strings_to_enum = 0; diff --git a/tests/queries/0_stateless/02516_join_with_totals_and_subquery_bug.reference b/tests/queries/0_stateless/02516_join_with_totals_and_subquery_bug.reference index fd0b223f8e57..19da8828c30e 100644 --- a/tests/queries/0_stateless/02516_join_with_totals_and_subquery_bug.reference +++ b/tests/queries/0_stateless/02516_join_with_totals_and_subquery_bug.reference @@ -2,6 +2,10 @@ 1 0 +1 +1 + +1 \N 100000000000000000000 diff --git a/tests/queries/0_stateless/02516_join_with_totals_and_subquery_bug.sql b/tests/queries/0_stateless/02516_join_with_totals_and_subquery_bug.sql index b6e60aa2e1f1..6b58d737a3ec 100644 --- a/tests/queries/0_stateless/02516_join_with_totals_and_subquery_bug.sql +++ b/tests/queries/0_stateless/02516_join_with_totals_and_subquery_bug.sql @@ -1,3 +1,5 @@ +SET allow_experimental_analyzer = 1; + SELECT * FROM ( @@ -12,7 +14,26 @@ INNER JOIN SELECT 1 GROUP BY 1 WITH TOTALS -) AS t2 USING (a); +) AS t2 USING (a) +SETTINGS allow_experimental_analyzer=0; + +SELECT * +FROM +( + SELECT 1 AS a +) AS t1 +INNER JOIN +( + SELECT 1 AS a + GROUP BY 1 + WITH TOTALS + UNION ALL + SELECT 1 + GROUP BY 1 + WITH TOTALS +) AS t2 USING (a) +SETTINGS allow_experimental_analyzer=1; + SELECT a FROM diff --git a/tests/queries/0_stateless/02542_case_no_else.reference b/tests/queries/0_stateless/02542_case_no_else.reference new file mode 100644 index 000000000000..8f3fdf29168a --- /dev/null +++ b/tests/queries/0_stateless/02542_case_no_else.reference @@ -0,0 +1,3 @@ +2 +1 Z +1 Z diff --git a/tests/queries/0_stateless/02542_case_no_else.sql b/tests/queries/0_stateless/02542_case_no_else.sql new file mode 100644 index 000000000000..0c7975a750ee --- /dev/null +++ b/tests/queries/0_stateless/02542_case_no_else.sql @@ -0,0 +1,14 @@ +SELECT CASE 1 WHEN 1 THEN 2 END; + +SELECT id, + CASE id + WHEN 1 THEN 'Z' + END x +FROM (SELECT 1 as id); + +SELECT id, + CASE id + WHEN 1 THEN 'Z' + ELSE 'X' + END x +FROM (SELECT 1 as id); diff --git a/tests/queries/0_stateless/02542_transform_new.reference b/tests/queries/0_stateless/02542_transform_new.reference new file mode 100644 index 000000000000..b6eaa692c416 --- /dev/null +++ b/tests/queries/0_stateless/02542_transform_new.reference @@ -0,0 +1,32 @@ +1 +1 +1 +1 +9 +9 +\N +7 +1 +9 +7 +b +b +b +b +a +a +\N +c +sep1 +80000 +80000 +sep2 +80000 +80000 +sep3 +1 +sep4 +8000 +sep5 +8000 +sep6 diff --git a/tests/queries/0_stateless/02542_transform_new.sql b/tests/queries/0_stateless/02542_transform_new.sql new file mode 100644 index 000000000000..43da0a507317 --- /dev/null +++ b/tests/queries/0_stateless/02542_transform_new.sql @@ -0,0 +1,35 @@ +select transform(2, [1,2], [9,1], materialize(null)); +select transform(2, [1,2], [9,1], materialize(7)); +select transform(2, [1,2], [9,1], null); +select transform(2, [1,2], [9,1], 7); +select transform(1, [1,2], [9,1], null); +select transform(1, [1,2], [9,1], 7); +select transform(5, [1,2], [9,1], null); +select transform(5, [1,2], [9,1], 7); +select transform(2, [1,2], [9,1]); +select transform(1, [1,2], [9,1]); +select transform(7, [1,2], [9,1]); + +select transform(2, [1,2], ['a','b'], materialize(null)); +select transform(2, [1,2], ['a','b'], materialize('c')); +select transform(2, [1,2], ['a','b'], null); +select transform(2, [1,2], ['a','b'], 'c'); +select transform(1, [1,2], ['a','b'], null); +select transform(1, [1,2], ['a','b'], 'c'); +select transform(5, [1,2], ['a','b'], null); +select transform(5, [1,2], ['a','b'], 'c'); + +select 'sep1'; +SELECT transform(number, [2], [toDecimal32(1, 1)], materialize(80000)) as x FROM numbers(2); +select 'sep2'; +SELECT transform(number, [2], [toDecimal32(1, 1)], 80000) as x FROM numbers(2); +select 'sep3'; +SELECT transform(toDecimal32(2, 1), [toDecimal32(2, 1)], [1]); +select 'sep4'; +SELECT transform(8000, [1], [toDecimal32(2, 1)]); +select 'sep5'; +SELECT transform(toDecimal32(8000,0), [1], [toDecimal32(2, 1)]); +select 'sep6'; +SELECT transform(-9223372036854775807, [-1], [toDecimal32(1024, 3)]) FROM system.numbers LIMIT 7; -- { serverError BAD_ARGUMENTS } +SELECT [NULL, NULL, NULL, NULL], transform(number, [2147483648], [toDecimal32(1, 2)]) AS x FROM numbers(257) WHERE materialize(10); -- { serverError BAD_ARGUMENTS } +SELECT transform(-2147483649, [1], [toDecimal32(1, 2)]) GROUP BY [1] WITH TOTALS; -- { serverError BAD_ARGUMENTS } diff --git a/tests/queries/0_stateless/02542_transform_old.reference b/tests/queries/0_stateless/02542_transform_old.reference new file mode 100644 index 000000000000..d03b17d40a32 --- /dev/null +++ b/tests/queries/0_stateless/02542_transform_old.reference @@ -0,0 +1,72 @@ +google +other +yahoo +yandex +#1 +20 +21 +22 +29 +#2 +0 +1 +3 +5 +7 +8 +9 +20 +21 +29 +#3 +20 +21 +22 +29 +#4 +google +other +yahoo +yandex +#5 +0 +1 +3 +5 +7 +8 +9 +google +yahoo +yandex +---- +google +other +yahoo +yandex +#1 +20 +21 +22 +29 +#3 +20 +21 +22 +29 +#4 +google +other +yahoo +yandex +---- +2000 +2100 +2200 +2900 +#1 +2000 +2100 +2200 +2900 +---- diff --git a/tests/queries/0_stateless/02542_transform_old.sql b/tests/queries/0_stateless/02542_transform_old.sql new file mode 100644 index 000000000000..01a960ec3674 --- /dev/null +++ b/tests/queries/0_stateless/02542_transform_old.sql @@ -0,0 +1,25 @@ +SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], 'other') as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#1'; +SELECT transform(number, [2, 4, 6], [29, 20, 21], 22) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#2'; +SELECT transform(number, [2, 4, 6], [29, 20, 21]) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#3'; +SELECT transform(toString(number), ['2', '4', '6'], [29, 20, 21], 22) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#4'; +SELECT transform(toString(number), ['2', '4', '6'], ['google', 'yandex', 'yahoo'], 'other') as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#5'; +SELECT transform(toString(number), ['2', '4', '6'], ['google', 'yandex', 'yahoo']) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '----'; +SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], materialize('other')) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#1'; +SELECT transform(number, [2, 4, 6], [29, 20, 21], materialize(22)) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#3'; +SELECT transform(toString(number), ['2', '4', '6'], [29, 20, 21], materialize(22)) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#4'; +SELECT transform(toString(number), ['2', '4', '6'], ['google', 'yandex', 'yahoo'], materialize('other')) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '----'; +SELECT transform(number, [2, 4, 6], [2900, 2000, 2100], 2200) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#1'; +SELECT transform(number, [2, 4, 6], [2900, 2000, 2100], materialize(2200)) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '----'; +SELECT transform(number, [1], [null]) FROM system.numbers LIMIT 1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/tests/queries/0_stateless/02596_build_set_and_remote.reference b/tests/queries/0_stateless/02596_build_set_and_remote.reference new file mode 100644 index 000000000000..8d12196ae334 --- /dev/null +++ b/tests/queries/0_stateless/02596_build_set_and_remote.reference @@ -0,0 +1,19 @@ +-- {echoOn} +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM system.one; +1 +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM remote('127.0.0.{2,3}', system.one); +1 +1 +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM remote('127.0.0.{2,3}', system.one) GROUP BY NULL; +1 +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM remote('127.0.0.{2,3}', system.one) GROUP BY 1; +1 +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM remote('127.0.0.{2,3}', system.one) GROUP BY 'A'; +1 +SELECT 1 IN ( SELECT 1 ) FROM remote('127.0.0.{1,2}', system.one) GROUP BY dummy; +1 +SELECT 1000.0001, toUInt64(arrayJoin([NULL, 257, 65536, NULL])), arrayExists(x -> (x IN (SELECT '2.55')), [-9223372036854775808]) FROM remote('127.0.0.{1,2}', system.one) GROUP BY NULL, NULL, NULL, NULL; +1000.0001 \N 0 +1000.0001 257 0 +1000.0001 65536 0 +1000.0001 \N 0 diff --git a/tests/queries/0_stateless/02596_build_set_and_remote.sql b/tests/queries/0_stateless/02596_build_set_and_remote.sql new file mode 100644 index 000000000000..7a904344c913 --- /dev/null +++ b/tests/queries/0_stateless/02596_build_set_and_remote.sql @@ -0,0 +1,14 @@ +-- {echoOn} +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM system.one; + +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM remote('127.0.0.{2,3}', system.one); + +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM remote('127.0.0.{2,3}', system.one) GROUP BY NULL; + +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM remote('127.0.0.{2,3}', system.one) GROUP BY 1; + +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM remote('127.0.0.{2,3}', system.one) GROUP BY 'A'; + +SELECT 1 IN ( SELECT 1 ) FROM remote('127.0.0.{1,2}', system.one) GROUP BY dummy; + +SELECT 1000.0001, toUInt64(arrayJoin([NULL, 257, 65536, NULL])), arrayExists(x -> (x IN (SELECT '2.55')), [-9223372036854775808]) FROM remote('127.0.0.{1,2}', system.one) GROUP BY NULL, NULL, NULL, NULL; diff --git a/tests/queries/0_stateless/02661_quantile_approx.reference b/tests/queries/0_stateless/02661_quantile_approx.reference index f4e66adc8d91..8369363aa9b2 100644 --- a/tests/queries/0_stateless/02661_quantile_approx.reference +++ b/tests/queries/0_stateless/02661_quantile_approx.reference @@ -19,8 +19,10 @@ select quantilesGK(1000, 100/1000, 200/1000, 250/1000, 314/1000, 777/1000)(numbe [99,199,249,313,776] select quantilesGK(10000, 100/1000, 200/1000, 250/1000, 314/1000, 777/1000)(number + 1) from numbers(1000); [100,200,250,314,777] -select medianGK()(number) from numbers(10); -- { serverError BAD_ARGUMENTS } -select quantileGK()(number) from numbers(10); -- { serverError BAD_ARGUMENTS } +select medianGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 0; -- { serverError BAD_ARGUMENTS } +select medianGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select quantileGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 0; -- { serverError BAD_ARGUMENTS } +select quantileGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } select medianGK(100)(number) from numbers(10); 4 select quantileGK(100)(number) from numbers(10); @@ -31,7 +33,8 @@ select quantileGK(100, 0.5, 0.75)(number) from numbers(10); -- { serverError NUM select quantileGK('abc', 0.5)(number) from numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } select quantileGK(1.23, 0.5)(number) from numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } select quantileGK(-100, 0.5)(number) from numbers(10); -- { serverError BAD_ARGUMENTS } -select quantilesGK()(number) from numbers(10); -- { serverError BAD_ARGUMENTS } +select quantilesGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 0; -- { serverError BAD_ARGUMENTS } +select quantilesGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } select quantilesGK(100)(number) from numbers(10); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } select quantilesGK(100, 0.5)(number) from numbers(10); [4] diff --git a/tests/queries/0_stateless/02661_quantile_approx.sql b/tests/queries/0_stateless/02661_quantile_approx.sql index 18c2e5de84b0..52c2979ad444 100644 --- a/tests/queries/0_stateless/02661_quantile_approx.sql +++ b/tests/queries/0_stateless/02661_quantile_approx.sql @@ -1,3 +1,5 @@ +set allow_experimental_analyzer = 1; + -- { echoOn } with arrayJoin([0, 1, 2, 10]) as x select quantilesGK(100, 0.5, 0.4, 0.1)(x); with arrayJoin([0, 6, 7, 9, 10]) as x select quantileGK(100, 0.5)(x); @@ -14,8 +16,12 @@ select quantilesGK(1000, 100/1000, 200/1000, 250/1000, 314/1000, 777/1000)(numbe select quantilesGK(10000, 100/1000, 200/1000, 250/1000, 314/1000, 777/1000)(number + 1) from numbers(1000); -select medianGK()(number) from numbers(10); -- { serverError BAD_ARGUMENTS } -select quantileGK()(number) from numbers(10); -- { serverError BAD_ARGUMENTS } +select medianGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 0; -- { serverError BAD_ARGUMENTS } +select medianGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +select quantileGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 0; -- { serverError BAD_ARGUMENTS } +select quantileGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + select medianGK(100)(number) from numbers(10); select quantileGK(100)(number) from numbers(10); select quantileGK(100, 0.5)(number) from numbers(10); @@ -24,7 +30,9 @@ select quantileGK('abc', 0.5)(number) from numbers(10); -- { serverError ILLEGAL select quantileGK(1.23, 0.5)(number) from numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } select quantileGK(-100, 0.5)(number) from numbers(10); -- { serverError BAD_ARGUMENTS } -select quantilesGK()(number) from numbers(10); -- { serverError BAD_ARGUMENTS } +select quantilesGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 0; -- { serverError BAD_ARGUMENTS } +select quantilesGK()(number) from numbers(10) SETTINGS allow_experimental_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + select quantilesGK(100)(number) from numbers(10); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } select quantilesGK(100, 0.5)(number) from numbers(10); select quantilesGK('abc', 0.5, 0.75)(number) from numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/tests/queries/0_stateless/02677_analyzer_bitmap_has_any.sql b/tests/queries/0_stateless/02677_analyzer_bitmap_has_any.sql index 4af06634c665..f0f9845d91d4 100644 --- a/tests/queries/0_stateless/02677_analyzer_bitmap_has_any.sql +++ b/tests/queries/0_stateless/02677_analyzer_bitmap_has_any.sql @@ -18,7 +18,7 @@ FROM bitmapHasAny(bitmapBuild([toUInt64(1)]), ( SELECT groupBitmapState(toUInt64(2)) )) has2 -); -- { serverError 43 } +) SETTINGS allow_experimental_analyzer = 0; -- { serverError 43 } SELECT '--------------'; diff --git a/tests/queries/0_stateless/02680_mysql_ast_logical_err.sql b/tests/queries/0_stateless/02680_mysql_ast_logical_err.sql index 5b0530e05ae7..bde91df83ca9 100644 --- a/tests/queries/0_stateless/02680_mysql_ast_logical_err.sql +++ b/tests/queries/0_stateless/02680_mysql_ast_logical_err.sql @@ -1,2 +1,4 @@ +CREATE TABLE foo (key UInt32, a String, b Int64, c String) ENGINE = TinyLog; + SELECT count() FROM mysql(mysql('127.0.0.1:9004', currentDatabase(), 'foo', 'default', ''), '127.0.0.1:9004', currentDatabase(), 'foo', '', ''); -- { serverError UNKNOWN_FUNCTION } --- SELECT count() FROM mysql(mysql('127.0.0.1:9004', currentDatabase(), 'foo', 'default', '', SETTINGS connection_pool_size = 1), '127.0.0.1:9004', currentDatabase(), 'foo', '', ''); -- { serverError UNKNOWN_FUNCTION } +SELECT count() FROM mysql(mysql('127.0.0.1:9004', currentDatabase(), 'foo', 'default', '', SETTINGS connection_pool_size = 1), '127.0.0.1:9004', currentDatabase(), 'foo', '', ''); -- { serverError UNKNOWN_FUNCTION, UNSUPPORTED_METHOD } diff --git a/tests/queries/0_stateless/02699_polygons_sym_difference_rollup.reference b/tests/queries/0_stateless/02699_polygons_sym_difference_rollup.reference index 346025b277b2..35c94347ac95 100644 --- a/tests/queries/0_stateless/02699_polygons_sym_difference_rollup.reference +++ b/tests/queries/0_stateless/02699_polygons_sym_difference_rollup.reference @@ -2,6 +2,8 @@ [] [[(2147483647,0),(10.0001,65535),(1,255),(1023,2147483646)]] [[[(2147483647,0),(10.0001,65535),(1023,2147483646),(2147483647,0)]]] [[(2147483647,0),(10.0001,65535),(1,255),(1023,2147483646)]] [] +[[(2147483647,0),(10.0001,65535),(1,255),(1023,2147483646)]] [[[(2147483647,0),(10.0001,65535),(1023,2147483646),(2147483647,0)]]] +[[(2147483647,0),(10.0001,65535),(1,255),(1023,2147483646)]] [[[(2147483647,0),(10.0001,65535),(1023,2147483646),(2147483647,0)]]] [[[(100.0001,1000.0001),(1000.0001,1.1920928955078125e-7),(20,-20),(20,20),(10,10),(-20,20),(100.0001,1000.0001)]]] [[[(100.0001,1000.0001),(1000.0001,1.1920928955078125e-7),(20,-20),(20,20),(10,10),(-20,20),(100.0001,1000.0001)]]] [(9223372036854775807,1.1754943508222875e-38)] [[(1,1.0001)]] \N [] diff --git a/tests/queries/0_stateless/02699_polygons_sym_difference_rollup.sql b/tests/queries/0_stateless/02699_polygons_sym_difference_rollup.sql index 8b9b63f7996f..85307bec6e59 100644 --- a/tests/queries/0_stateless/02699_polygons_sym_difference_rollup.sql +++ b/tests/queries/0_stateless/02699_polygons_sym_difference_rollup.sql @@ -1,5 +1,5 @@ - SELECT polygonsSymDifferenceCartesian([[[(1., 1.)]] AS x], [x]) GROUP BY x WITH ROLLUP; -SELECT [[(2147483647, 0.), (10.0001, 65535), (1, 255), (1023, 2147483646)]], polygonsSymDifferenceCartesian([[[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]]], [[[(1000.0001, 10.0001)]]]) GROUP BY [[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]] WITH ROLLUP; +SELECT [[(2147483647, 0.), (10.0001, 65535), (1, 255), (1023, 2147483646)]], polygonsSymDifferenceCartesian([[[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]]], [[[(1000.0001, 10.0001)]]]) GROUP BY [[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]] WITH ROLLUP SETTINGS allow_experimental_analyzer=0; +SELECT [[(2147483647, 0.), (10.0001, 65535), (1, 255), (1023, 2147483646)]], polygonsSymDifferenceCartesian([[[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]]], [[[(1000.0001, 10.0001)]]]) GROUP BY [[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]] WITH ROLLUP SETTINGS allow_experimental_analyzer=1; SELECT polygonsSymDifferenceCartesian([[[(100.0001, 1000.0001), (-20., 20.), (10., 10.), (20., 20.), (20., -20.), (1000.0001, 1.1920928955078125e-7)]],[[(0.0001, 100000000000000000000.)]] AS x],[x]) GROUP BY x WITH ROLLUP; SELECT [(9223372036854775807, 1.1754943508222875e-38)], x, NULL, polygonsSymDifferenceCartesian([[[(1.1754943508222875e-38, 1.1920928955078125e-7), (0.5, 0.5)]], [[(1.1754943508222875e-38, 1.1920928955078125e-7), (1.1754943508222875e-38, 1.1920928955078125e-7)], [(0., 1.0001)]], [[(1., 1.0001)]] AS x], [[[(3.4028234663852886e38, 0.9999)]]]) GROUP BY GROUPING SETS ((x)) WITH TOTALS diff --git a/tests/queries/0_stateless/02703_jit_external_aggregation.reference b/tests/queries/0_stateless/02703_jit_external_aggregation.reference index cdeec60f4efa..9c558e357c41 100644 --- a/tests/queries/0_stateless/02703_jit_external_aggregation.reference +++ b/tests/queries/0_stateless/02703_jit_external_aggregation.reference @@ -1 +1 @@ -..... +. diff --git a/tests/queries/0_stateless/02703_jit_external_aggregation.sh b/tests/queries/0_stateless/02703_jit_external_aggregation.sh index 2d1dda45de03..4bc17c106fba 100755 --- a/tests/queries/0_stateless/02703_jit_external_aggregation.sh +++ b/tests/queries/0_stateless/02703_jit_external_aggregation.sh @@ -5,11 +5,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -# This query should return empty result in every of five runs: - -for _ in {1..5} -do - $CLICKHOUSE_CLIENT --compile_aggregate_expressions 0 --query " +# This query should return empty result +$CLICKHOUSE_CLIENT --compile_aggregate_expressions 1 --min_count_to_compile_aggregate_expression=0 --query " SELECT COUNT() AS c, group_key, @@ -30,6 +27,5 @@ ORDER BY group_key ASC LIMIT 10 SETTINGS max_bytes_before_external_group_by = 200000 " && echo -n '.' -done echo diff --git a/tests/queries/0_stateless/02707_complex_query_fails_analyzer.reference b/tests/queries/0_stateless/02707_complex_query_fails_analyzer.reference deleted file mode 100644 index 192f8aa904a1..000000000000 --- a/tests/queries/0_stateless/02707_complex_query_fails_analyzer.reference +++ /dev/null @@ -1,10 +0,0 @@ -1 1 -59.952 -1 2 59.952 -1 3 -100 -2 1 -93.7611 -2 2 93.7611 -3 1 0 -3 2 0 ---------- -0 -0 diff --git a/tests/queries/0_stateless/02707_complex_query_fails_analyzer.sql b/tests/queries/0_stateless/02707_complex_query_fails_analyzer.sql deleted file mode 100644 index a9d83479d508..000000000000 --- a/tests/queries/0_stateless/02707_complex_query_fails_analyzer.sql +++ /dev/null @@ -1,117 +0,0 @@ -DROP TABLE IF EXISTS srv_account_parts; -DROP TABLE IF EXISTS etl_batch; - -CREATE TABLE srv_account_parts( - shard_num UInt16, - account_ids Array(Int64) -)ENGINE = ReplacingMergeTree -ORDER BY shard_num -as select * from values ((0,[]),(1,[1,2,3]),(2,[1,2,3]),(3,[1])); - -CREATE TABLE etl_batch( - batch_id UInt64, - batch_start DateTime, - batch_start_day Date DEFAULT toDate(batch_start), - batch_load DateTime, - total_num_records UInt32, - etl_server_id Int32, - account_id UInt64, - shard_num UInt16 -)ENGINE = ReplacingMergeTree -PARTITION BY toYYYYMM(batch_start_day) -ORDER BY (batch_id, etl_server_id, account_id); - -insert into etl_batch(batch_id, batch_start, batch_load, total_num_records, etl_server_id, account_id, shard_num) -select number batch_id, - toDateTime('2022-01-01') + INTERVAL 23 HOUR batch_start, - batch_start batch_load, - 333 total_num_records, - 1 etl_server_id, - number%3+1 account_id, - 1 shard_num -from numbers(1000); - -insert into etl_batch(batch_id, batch_start, batch_load, total_num_records, etl_server_id, account_id, shard_num) -select number+2000 batch_id, - toDateTime('2022-01-01') + INTERVAL 23 HOUR batch_start, - batch_start batch_load, - 333 total_num_records, - 1 etl_server_id, - number%3+1 account_id, - 2 shard_num -from numbers(1000); - -insert into etl_batch(batch_id, batch_start, batch_load, total_num_records, etl_server_id, account_id, shard_num) -select number+4000 batch_id, - toDateTime('2022-01-01') + INTERVAL 3 HOUR batch_start, - batch_start batch_load, - 3333 total_num_records, - 1 etl_server_id, - 2 account_id, - 2 shard_num -from numbers(1000); - -insert into etl_batch(batch_id, batch_start, batch_load, total_num_records, etl_server_id, account_id, shard_num) -select number+6000 batch_id, - toDateTime('2022-01-01') + INTERVAL 23 HOUR batch_start, - batch_start batch_load, - 333 total_num_records, - 1 etl_server_id, - 1 account_id, - 2 shard_num -from numbers(1000); - -insert into etl_batch(batch_id, batch_start, batch_load, total_num_records, etl_server_id, account_id, shard_num) -select number+8000 batch_id, - toDateTime('2022-01-01') + INTERVAL 23 HOUR batch_start, - batch_start batch_load, - 1000 total_num_records, - 1 etl_server_id, - 3 account_id, - 3 shard_num -from numbers(1000); - -CREATE OR REPLACE VIEW v_num_records_by_node_bias_acc as -SELECT shard_num, - arrayJoin(account_ids) AS account_id, - records_24h, - records_12h, - IF (b = '',-100,xbias) AS bias, - IF (bias > 10,0,IF (bias > 0,1,IF (bias < -10,301,300))) AS sbias -FROM srv_account_parts - LEFT JOIN (SELECT account_id, - shard_num, - records_24h, - records_12h, - xbias, - 'b' AS b - FROM (SELECT account_id, - groupArray((shard_num,records_24h,records_12h)) AS ga, - arraySum(ga.2) AS tot24, - arraySum(ga.3) AS tot12, - arrayMap(i ->(((((i.2)*LENGTH(ga))*100) / tot24) - 100),ga) AS bias24, - arrayMap(i ->(((((i.3)*LENGTH(ga))*100) / tot12) - 100),ga) AS bias12, - arrayMap((i,j,k) ->(i,IF (tot12 = 0,0,IF (ABS(j) > ABS(k),j,k))),ga,bias24,bias12) AS a_bias - FROM (SELECT shard_num, - toInt64(account_id) AS account_id, - SUM(total_num_records) AS records_24h, - sumIf(total_num_records,batch_load >(toDateTime('2022-01-02') -(3600*12))) AS records_12h - FROM etl_batch FINAL PREWHERE (batch_start_day >= (toDate('2022-01-02') - 2)) AND (batch_load > (toDateTime('2022-01-02') - (3600*24))) - where (shard_num, account_id) in (select shard_num, arrayJoin(account_ids) from srv_account_parts) - GROUP BY shard_num,account_id) - GROUP BY account_id) - ARRAY JOIN (a_bias.1).1 AS shard_num,a_bias.2 AS xbias, (a_bias.1).2 AS records_24h, (a_bias.1).3 AS records_12h - ) s USING (shard_num,account_id); - -select account_id, shard_num, round(bias,4) -from v_num_records_by_node_bias_acc -order by account_id, shard_num, bias; - -select '---------'; - -SELECT a AS b FROM (SELECT 0 a) s LEFT JOIN (SELECT 0 b) t USING (b); - -SELECT arrayJoin(a) AS b FROM (SELECT [0] a) s LEFT JOIN (SELECT 0 b) t USING (b); - -DROP TABLE srv_account_parts; -DROP TABLE etl_batch; diff --git a/tests/queries/0_stateless/02713_create_user_substitutions.reference b/tests/queries/0_stateless/02713_create_user_substitutions.reference new file mode 100644 index 000000000000..f9b5cc495b5b --- /dev/null +++ b/tests/queries/0_stateless/02713_create_user_substitutions.reference @@ -0,0 +1,11 @@ +1 +2 +3 +4 +5 +6 +7 +8 +CREATE USER user9_02713 IDENTIFIED WITH ldap SERVER \'qwerty9\' +CREATE USER user10_02713 IDENTIFIED WITH kerberos REALM \'qwerty10\' +CREATE USER user11_02713 IDENTIFIED WITH ssl_certificate CN \'qwerty11\', \'qwerty12\' diff --git a/tests/queries/0_stateless/02713_create_user_substitutions.sh b/tests/queries/0_stateless/02713_create_user_substitutions.sh new file mode 100755 index 000000000000..42926335acbb --- /dev/null +++ b/tests/queries/0_stateless/02713_create_user_substitutions.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-parallel + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_CLIENT -q "DROP USER IF EXISTS user1_02713, user2_02713, user3_02713, user4_02713, user5_02713, user6_02713, user7_02713"; + +$CLICKHOUSE_CLIENT --param_password=qwerty1 -q "CREATE USER user1_02713 IDENTIFIED BY {password:String}"; +$CLICKHOUSE_CLIENT --param_password=qwerty2 -q "CREATE USER user2_02713 IDENTIFIED WITH PLAINTEXT_PASSWORD BY {password:String}"; +$CLICKHOUSE_CLIENT --param_password=qwerty3 -q "CREATE USER user3_02713 IDENTIFIED WITH SHA256_PASSWORD BY {password:String}"; +$CLICKHOUSE_CLIENT --param_password=qwerty4 -q "CREATE USER user4_02713 IDENTIFIED WITH DOUBLE_SHA1_PASSWORD BY {password:String}"; +$CLICKHOUSE_CLIENT --param_password=qwerty5 -q "CREATE USER user5_02713 IDENTIFIED WITH BCRYPT_PASSWORD BY {password:String}"; + +# Generated online +$CLICKHOUSE_CLIENT --param_hash=310cef2caff72c0224f38ca8e2141ca6012cd4da550c692573c25a917d9a75e6 \ + -q "CREATE USER user6_02713 IDENTIFIED WITH SHA256_HASH BY {hash:String}"; +# Generated with ClickHouse +$CLICKHOUSE_CLIENT --param_hash=5886A74C452575627522F3A80D8B9E239FD8955F \ + -q "CREATE USER user7_02713 IDENTIFIED WITH DOUBLE_SHA1_HASH BY {hash:String}"; +# Generated online +$CLICKHOUSE_CLIENT --param_hash=\$2a\$12\$wuohz0HFSBBNE8huN0Yx6.kmWrefiYVKeMp4gsuNoO1rOWwF2FXXC \ + -q "CREATE USER user8_02713 IDENTIFIED WITH BCRYPT_HASH BY {hash:String}"; + +$CLICKHOUSE_CLIENT --param_server=qwerty9 -q "CREATE USER user9_02713 IDENTIFIED WITH LDAP SERVER {server:String}"; +$CLICKHOUSE_CLIENT --param_realm=qwerty10 -q "CREATE USER user10_02713 IDENTIFIED WITH KERBEROS REALM {realm:String}"; +$CLICKHOUSE_CLIENT --param_cert1=qwerty11 --param_cert2=qwerty12 -q "CREATE USER user11_02713 IDENTIFIED WITH SSL_CERTIFICATE CN {cert1:String}, {cert2:String}"; + +$CLICKHOUSE_CLIENT --user=user1_02713 --password=qwerty1 -q "SELECT 1"; +$CLICKHOUSE_CLIENT --user=user2_02713 --password=qwerty2 -q "SELECT 2"; +$CLICKHOUSE_CLIENT --user=user3_02713 --password=qwerty3 -q "SELECT 3"; +$CLICKHOUSE_CLIENT --user=user4_02713 --password=qwerty4 -q "SELECT 4"; +$CLICKHOUSE_CLIENT --user=user5_02713 --password=qwerty5 -q "SELECT 5"; +$CLICKHOUSE_CLIENT --user=user6_02713 --password=qwerty6 -q "SELECT 6"; +$CLICKHOUSE_CLIENT --user=user7_02713 --password=qwerty7 -q "SELECT 7"; +$CLICKHOUSE_CLIENT --user=user8_02713 --password=qwerty8 -q "SELECT 8"; + +$CLICKHOUSE_CLIENT -q "SHOW CREATE USER user9_02713"; +$CLICKHOUSE_CLIENT -q "SHOW CREATE USER user10_02713"; +$CLICKHOUSE_CLIENT -q "SHOW CREATE USER user11_02713"; + +$CLICKHOUSE_CLIENT -q "DROP USER user1_02713, user2_02713, user3_02713, user4_02713, user5_02713, user6_02713, user7_02713, user8_02713, user9_02713, user10_02713, user11_02713"; diff --git a/tests/queries/0_stateless/02713_sequence_match_serialization_fix.reference b/tests/queries/0_stateless/02713_sequence_match_serialization_fix.reference new file mode 100644 index 000000000000..2a1c127e635a --- /dev/null +++ b/tests/queries/0_stateless/02713_sequence_match_serialization_fix.reference @@ -0,0 +1,3 @@ +serialized state is not used 1 +serialized state is used 1 +via Distributed 1 diff --git a/tests/queries/0_stateless/02713_sequence_match_serialization_fix.sql b/tests/queries/0_stateless/02713_sequence_match_serialization_fix.sql new file mode 100644 index 000000000000..3521cb8470fc --- /dev/null +++ b/tests/queries/0_stateless/02713_sequence_match_serialization_fix.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS 02713_seqt; +DROP TABLE IF EXISTS 02713_seqt_distr; + +SELECT + 'serialized state is not used', sequenceMatch('(?1)(?2)')(time, number_ = 1, number_ = 0) AS seq +FROM +( + SELECT + number AS time, + number % 2 AS number_ + FROM numbers_mt(100) +); + + +CREATE TABLE 02713_seqt +ENGINE = MergeTree +ORDER BY n AS +SELECT + sequenceMatchState('(?1)(?2)')(time, number_ = 1, number_ = 0) AS seq, + 1 AS n +FROM +( + SELECT + number AS time, + number % 2 AS number_ + FROM numbers_mt(100) +); + + +SELECT 'serialized state is used', sequenceMatchMerge('(?1)(?2)')(seq) AS seq +FROM 02713_seqt; + + +CREATE TABLE 02713_seqt_distr ( seq AggregateFunction(sequenceMatch('(?1)(?2)'), UInt64, UInt8, UInt8) , n UInt8) ENGINE = Distributed(test_shard_localhost, currentDatabase(), '02713_seqt'); + +SELECT 'via Distributed', sequenceMatchMerge('(?1)(?2)')(seq) AS seq FROM 02713_seqt_distr; diff --git a/tests/queries/0_stateless/02720_s3_strict_upload_part_size.reference b/tests/queries/0_stateless/02720_s3_strict_upload_part_size.reference new file mode 100644 index 000000000000..360b484bf28d --- /dev/null +++ b/tests/queries/0_stateless/02720_s3_strict_upload_part_size.reference @@ -0,0 +1,4 @@ +Size: 6000001 +Size: 6000001 +Size: 6000001 +Size: 2971517 diff --git a/tests/queries/0_stateless/02720_s3_strict_upload_part_size.sh b/tests/queries/0_stateless/02720_s3_strict_upload_part_size.sh new file mode 100755 index 000000000000..69e2f7349144 --- /dev/null +++ b/tests/queries/0_stateless/02720_s3_strict_upload_part_size.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, long +# Tag no-fasttest: requires S3 + +CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CUR_DIR"/../shell_config.sh + +in="$CUR_DIR/$CLICKHOUSE_TEST_UNIQUE_NAME.in" +out="$CUR_DIR/$CLICKHOUSE_TEST_UNIQUE_NAME.out" +log="$CUR_DIR/$CLICKHOUSE_TEST_UNIQUE_NAME.log" + +set -e +trap 'rm -f "${out:?}" "${in:?}" "${log:?}"' EXIT + +# Generate a file of 20MiB in size, with our part size it will have 4 parts +# NOTE: 1 byte is for new line, so 1023 not 1024 +$CLICKHOUSE_LOCAL -q "SELECT randomPrintableASCII(1023) FROM numbers(20*1024) FORMAT LineAsString" > "$in" + +$CLICKHOUSE_CLIENT --send_logs_level=trace --server_logs_file="$log" -q "INSERT INTO FUNCTION s3(s3_conn, filename='$CLICKHOUSE_TEST_UNIQUE_NAME', format='LineAsString', structure='line String') FORMAT LineAsString" --s3_strict_upload_part_size=6000001 < "$in" +grep -F '' "$log" || : +grep -o 'WriteBufferFromS3: Writing part.*Size: .*' "$log" | grep -o 'Size: .*' +$CLICKHOUSE_CLIENT -q "SELECT * FROM s3(s3_conn, filename='$CLICKHOUSE_TEST_UNIQUE_NAME', format='LineAsString', structure='line String') FORMAT LineAsString" > "$out" + +diff -q "$in" "$out" diff --git a/tests/queries/0_stateless/02723_jit_aggregation_bug_48120.reference b/tests/queries/0_stateless/02723_jit_aggregation_bug_48120.reference new file mode 100644 index 000000000000..6f9b4b4fc6a7 --- /dev/null +++ b/tests/queries/0_stateless/02723_jit_aggregation_bug_48120.reference @@ -0,0 +1,7 @@ +-- { echoOn } +SYSTEM DROP COMPILED EXPRESSION CACHE; +SELECT minIf(num1, num1 < 5) FROM dummy GROUP BY num2; +0 +SYSTEM DROP COMPILED EXPRESSION CACHE; +SELECT minIf(num1, num1 >= 5) FROM dummy GROUP BY num2; +5 diff --git a/tests/queries/0_stateless/02723_jit_aggregation_bug_48120.sql b/tests/queries/0_stateless/02723_jit_aggregation_bug_48120.sql new file mode 100644 index 000000000000..04e0fc5e0ba5 --- /dev/null +++ b/tests/queries/0_stateless/02723_jit_aggregation_bug_48120.sql @@ -0,0 +1,17 @@ +-- Tags: no-fasttest, no-ubsan, no-cpu-aarch64 + +drop table if exists dummy; +CREATE TABLE dummy ( num1 Int32, num2 Enum8('foo' = 0, 'bar' = 1, 'tar' = 2) ) +ENGINE = MergeTree ORDER BY num1 as select 5, 'bar'; + +set compile_aggregate_expressions=1; +set min_count_to_compile_aggregate_expression=0; + +-- { echoOn } +SYSTEM DROP COMPILED EXPRESSION CACHE; +SELECT minIf(num1, num1 < 5) FROM dummy GROUP BY num2; +SYSTEM DROP COMPILED EXPRESSION CACHE; +SELECT minIf(num1, num1 >= 5) FROM dummy GROUP BY num2; +-- { echoOff } + +drop table dummy; diff --git a/tests/queries/0_stateless/02724_decompress_filename_exception.reference b/tests/queries/0_stateless/02724_decompress_filename_exception.reference new file mode 100644 index 000000000000..f9c5aacff7be --- /dev/null +++ b/tests/queries/0_stateless/02724_decompress_filename_exception.reference @@ -0,0 +1,8 @@ +Ok +Ok +Ok +Ok +Ok +Ok +Ok +Ok diff --git a/tests/queries/0_stateless/02724_decompress_filename_exception.sh b/tests/queries/0_stateless/02724_decompress_filename_exception.sh new file mode 100755 index 000000000000..bbc2b8d066ba --- /dev/null +++ b/tests/queries/0_stateless/02724_decompress_filename_exception.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# Tags: no-fasttest, no-parallel + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +USER_FILES_PATH=$(clickhouse-client --query "select _path,_file from file('nonexist.txt', 'CSV', 'val1 char')" 2>&1 | grep Exception | awk '{gsub("/nonexist.txt","",$9); print $9}') +FILENAME="${USER_FILES_PATH}/corrupted_file.tsv.xx" + +echo 'corrupted file' > $FILENAME; + +$CLICKHOUSE_CLIENT --query "SELECT * FROM file('${FILENAME}', 'TSV', 'c UInt32', 'gzip')" 2>&1 | grep -q "While reading from: $FILENAME" && echo 'Ok' || echo 'Fail'; +$CLICKHOUSE_CLIENT --query "SELECT * FROM file('${FILENAME}', 'TSV', 'c UInt32', 'deflate')" 2>&1 | grep -q "While reading from: $FILENAME" && echo 'Ok' || echo 'Fail'; +$CLICKHOUSE_CLIENT --query "SELECT * FROM file('${FILENAME}', 'TSV', 'c UInt32', 'br')" 2>&1 | grep -q "While reading from: $FILENAME" && echo 'Ok' || echo 'Fail'; +$CLICKHOUSE_CLIENT --query "SELECT * FROM file('${FILENAME}', 'TSV', 'c UInt32', 'xz')" 2>&1 | grep -q "While reading from: $FILENAME" && echo 'Ok' || echo 'Fail'; +$CLICKHOUSE_CLIENT --query "SELECT * FROM file('${FILENAME}', 'TSV', 'c UInt32', 'zstd')" 2>&1 | grep -q "While reading from: $FILENAME" && echo 'Ok' || echo 'Fail'; +$CLICKHOUSE_CLIENT --query "SELECT * FROM file('${FILENAME}', 'TSV', 'c UInt32', 'lz4')" 2>&1 | grep -q "While reading from: $FILENAME" && echo 'Ok' || echo 'Fail'; +$CLICKHOUSE_CLIENT --query "SELECT * FROM file('${FILENAME}', 'TSV', 'c UInt32', 'bz2')" 2>&1 | grep -q "While reading from: $FILENAME" && echo 'Ok' || echo 'Fail'; +$CLICKHOUSE_CLIENT --query "SELECT * FROM file('${FILENAME}', 'TSV', 'c UInt32', 'snappy')" 2>&1 | grep -q "While reading from: $FILENAME" && echo 'Ok' || echo 'Fail'; + +rm $FILENAME; diff --git a/tests/queries/0_stateless/02724_function_in_left_table_clause_asof_join.reference b/tests/queries/0_stateless/02724_function_in_left_table_clause_asof_join.reference new file mode 100644 index 000000000000..d00491fd7e5b --- /dev/null +++ b/tests/queries/0_stateless/02724_function_in_left_table_clause_asof_join.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02724_function_in_left_table_clause_asof_join.sql b/tests/queries/0_stateless/02724_function_in_left_table_clause_asof_join.sql new file mode 100644 index 000000000000..13dfb5debe7a --- /dev/null +++ b/tests/queries/0_stateless/02724_function_in_left_table_clause_asof_join.sql @@ -0,0 +1,8 @@ +select count(*) +from ( + select 1 as id, [1, 2, 3] as arr +) as sessions +ASOF LEFT JOIN ( + select 1 as session_id, 4 as id +) as visitors +ON visitors.session_id <= sessions.id AND arrayFirst(a -> a, arrayMap((a) -> a, sessions.arr)) = visitors.id diff --git a/tests/queries/0_stateless/02724_jit_logical_functions.reference b/tests/queries/0_stateless/02724_jit_logical_functions.reference new file mode 100644 index 000000000000..673ffe02613d --- /dev/null +++ b/tests/queries/0_stateless/02724_jit_logical_functions.reference @@ -0,0 +1,18 @@ +Logical functions not null +0 0 0 0 0 +0 1 0 1 1 +1 0 0 1 1 +1 1 1 1 0 +Logical functions nullable +0 0 0 0 0 +0 1 0 1 1 +1 0 0 1 1 +1 1 1 1 0 +0 \N 0 \N \N +1 \N \N 1 \N +0 0 0 +1 1 0 +0 0 0 +1 1 0 +\N \N \N +\N \N \N diff --git a/tests/queries/0_stateless/02724_jit_logical_functions.sql b/tests/queries/0_stateless/02724_jit_logical_functions.sql new file mode 100644 index 000000000000..fe6646337d04 --- /dev/null +++ b/tests/queries/0_stateless/02724_jit_logical_functions.sql @@ -0,0 +1,21 @@ +SET compile_expressions = 1; +SET min_count_to_compile_expression = 0; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table (a UInt8, b UInt8) ENGINE = TinyLog; +INSERT INTO test_table VALUES (0, 0), (0, 1), (1, 0), (1, 1); + +SELECT 'Logical functions not null'; +SELECT a, b, and(a, b), or(a, b), xor(a, b) FROM test_table; + +DROP TABLE test_table; + +DROP TABLE IF EXISTS test_table_nullable; +CREATE TABLE test_table_nullable (a UInt8, b Nullable(UInt8)) ENGINE = TinyLog; +INSERT INTO test_table_nullable VALUES (0, 0), (0, 1), (1, 0), (1, 1), (0, NULL), (1, NULL); + +SELECT 'Logical functions nullable'; +SELECT a, b, and(a, b), or(a, b), xor(a, b) FROM test_table_nullable; +SELECT and(b, b), or(b, b), xor(b, b) FROM test_table_nullable; + +DROP TABLE test_table_nullable; diff --git a/tests/queries/0_stateless/02724_mutliple_storage_join.reference b/tests/queries/0_stateless/02724_mutliple_storage_join.reference new file mode 100644 index 000000000000..f7eb44d66e0b --- /dev/null +++ b/tests/queries/0_stateless/02724_mutliple_storage_join.reference @@ -0,0 +1,6 @@ +0 +0 +0 +0 +0 +0 diff --git a/tests/queries/0_stateless/02724_mutliple_storage_join.sql b/tests/queries/0_stateless/02724_mutliple_storage_join.sql new file mode 100644 index 000000000000..286e867704df --- /dev/null +++ b/tests/queries/0_stateless/02724_mutliple_storage_join.sql @@ -0,0 +1,21 @@ +CREATE TABLE user(id UInt32, name String) ENGINE = Join(ANY, LEFT, id); +INSERT INTO user VALUES (1,'U1')(2,'U2')(3,'U3'); + +CREATE TABLE product(id UInt32, name String, cate String) ENGINE = Join(ANY, LEFT, id); +INSERT INTO product VALUES (1,'P1','C1')(2,'P2','C1')(3,'P3','C2'); + +CREATE TABLE order(id UInt32, pId UInt32, uId UInt32) ENGINE = TinyLog; +INSERT INTO order VALUES (1,1,1)(2,1,2)(3,2,3); + +SELECT ignore(*) FROM ( + SELECT + uId, + user.id as `uuu` + FROM order + LEFT ANY JOIN user + ON uId = `uuu` +); + +SELECT ignore(*) FROM order +LEFT ANY JOIN user ON uId = user.id +LEFT ANY JOIN product ON pId = product.id; diff --git a/tests/queries/0_stateless/02725_alias_columns_should_not_allow_compression_codec.reference b/tests/queries/0_stateless/02725_alias_columns_should_not_allow_compression_codec.reference new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/queries/0_stateless/02725_alias_columns_should_not_allow_compression_codec.sql b/tests/queries/0_stateless/02725_alias_columns_should_not_allow_compression_codec.sql new file mode 100644 index 000000000000..083a3aefdaff --- /dev/null +++ b/tests/queries/0_stateless/02725_alias_columns_should_not_allow_compression_codec.sql @@ -0,0 +1,7 @@ +drop table if exists alias_column_should_not_allow_compression; +create table if not exists alias_column_should_not_allow_compression ( user_id UUID, user_id_hashed ALIAS (cityHash64(user_id))) engine=MergeTree() order by tuple(); +create table if not exists alias_column_should_not_allow_compression_fail ( user_id UUID, user_id_hashed ALIAS (cityHash64(user_id)) codec(LZ4HC(1))) engine=MergeTree() order by tuple(); -- { serverError BAD_ARGUMENTS } +alter table alias_column_should_not_allow_compression modify column user_id codec(LZ4HC(1)); +alter table alias_column_should_not_allow_compression modify column user_id_hashed codec(LZ4HC(1)); -- { serverError BAD_ARGUMENTS } +alter table alias_column_should_not_allow_compression add column user_id_hashed_1 UInt64 ALIAS (cityHash64(user_id)) codec(LZ4HC(1)); -- { serverError BAD_ARGUMENTS } +drop table if exists alias_column_should_not_allow_compression; diff --git a/tests/queries/0_stateless/02725_alias_with_restricted_keywords.reference b/tests/queries/0_stateless/02725_alias_with_restricted_keywords.reference new file mode 100644 index 000000000000..9874d6464ab7 --- /dev/null +++ b/tests/queries/0_stateless/02725_alias_with_restricted_keywords.reference @@ -0,0 +1 @@ +1 2 diff --git a/tests/queries/0_stateless/02725_alias_with_restricted_keywords.sql b/tests/queries/0_stateless/02725_alias_with_restricted_keywords.sql new file mode 100644 index 000000000000..6df0e8560610 --- /dev/null +++ b/tests/queries/0_stateless/02725_alias_with_restricted_keywords.sql @@ -0,0 +1 @@ +SELECT 1 `array`, 2 "union"; diff --git a/tests/queries/0_stateless/02725_async_insert_table_setting.reference b/tests/queries/0_stateless/02725_async_insert_table_setting.reference new file mode 100644 index 000000000000..5f5235c569f7 --- /dev/null +++ b/tests/queries/0_stateless/02725_async_insert_table_setting.reference @@ -0,0 +1,4 @@ +2 +2 +default.t_mt_async_insert 1 +default.t_mt_sync_insert 0 diff --git a/tests/queries/0_stateless/02725_async_insert_table_setting.sh b/tests/queries/0_stateless/02725_async_insert_table_setting.sh new file mode 100755 index 000000000000..13911e8d6778 --- /dev/null +++ b/tests/queries/0_stateless/02725_async_insert_table_setting.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -n --query " +DROP TABLE IF EXISTS t_mt_async_insert; +DROP TABLE IF EXISTS t_mt_sync_insert; + +CREATE TABLE t_mt_async_insert (id UInt64, s String) +ENGINE = MergeTree ORDER BY id SETTINGS async_insert = 1; + +CREATE TABLE t_mt_sync_insert (id UInt64, s String) +ENGINE = MergeTree ORDER BY id SETTINGS async_insert = 0;" + +url="${CLICKHOUSE_URL}&async_insert=0&wait_for_async_insert=1" + +${CLICKHOUSE_CURL} -sS "$url" -d "INSERT INTO t_mt_async_insert VALUES (1, 'aa'), (2, 'bb')" +${CLICKHOUSE_CURL} -sS "$url" -d "INSERT INTO t_mt_sync_insert VALUES (1, 'aa'), (2, 'bb')" + +${CLICKHOUSE_CLIENT} -n --query " +SELECT count() FROM t_mt_async_insert; +SELECT count() FROM t_mt_sync_insert; + +SYSTEM FLUSH LOGS; +SELECT tables[1], ProfileEvents['AsyncInsertQuery'] FROM system.query_log +WHERE + type = 'QueryFinish' AND + current_database = currentDatabase() AND + query ILIKE 'INSERT INTO t_mt_%sync_insert%' +ORDER BY tables[1]; + +DROP TABLE IF EXISTS t_mt_async_insert; +DROP TABLE IF EXISTS t_mt_sync_insert;" diff --git a/tests/queries/0_stateless/02725_keeper_fault_inject_sequential_cleanup.reference b/tests/queries/0_stateless/02725_keeper_fault_inject_sequential_cleanup.reference new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/queries/0_stateless/02725_keeper_fault_inject_sequential_cleanup.sql b/tests/queries/0_stateless/02725_keeper_fault_inject_sequential_cleanup.sql new file mode 100644 index 000000000000..e1db4ba2fa63 --- /dev/null +++ b/tests/queries/0_stateless/02725_keeper_fault_inject_sequential_cleanup.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS keeper_fault_inject_sequential_cleanup; + +CREATE TABLE keeper_fault_inject_sequential_cleanup (d Int8) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_02725/tables/keeper_fault_inject_sequential_cleanup', '1') ORDER BY d; + +INSERT INTO keeper_fault_inject_sequential_cleanup VALUES (1); +INSERT INTO keeper_fault_inject_sequential_cleanup SETTINGS insert_deduplicate = 0 VALUES (1); +INSERT INTO keeper_fault_inject_sequential_cleanup SETTINGS insert_deduplicate = 0, insert_keeper_fault_injection_probability = 0.4, insert_keeper_fault_injection_seed = 5619964844601345291 VALUES (1); + +-- with database ordinary it produced a warning +DROP TABLE keeper_fault_inject_sequential_cleanup; diff --git a/tests/queries/0_stateless/02725_memory-for-merges.reference b/tests/queries/0_stateless/02725_memory-for-merges.reference new file mode 100644 index 000000000000..d00491fd7e5b --- /dev/null +++ b/tests/queries/0_stateless/02725_memory-for-merges.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02725_memory-for-merges.sql b/tests/queries/0_stateless/02725_memory-for-merges.sql new file mode 100644 index 000000000000..b6ae7af7f1ae --- /dev/null +++ b/tests/queries/0_stateless/02725_memory-for-merges.sql @@ -0,0 +1,27 @@ +-- Tags: no-s3-storage +-- We allocate a lot of memory for buffers when reading or writing to S3 + +DROP TABLE IF EXISTS 02725_memory_for_merges SYNC; + +CREATE TABLE 02725_memory_for_merges +( n UInt64, + s String +) +ENGINE = MergeTree +ORDER BY n +SETTINGS merge_max_block_size_bytes=1024, index_granularity_bytes=1024; + +INSERT INTO 02725_memory_for_merges SELECT number, randomPrintableASCII(1000000) FROM numbers(100); +INSERT INTO 02725_memory_for_merges SELECT number, randomPrintableASCII(1000000) FROM numbers(100); +INSERT INTO 02725_memory_for_merges SELECT number, randomPrintableASCII(1000000) FROM numbers(100); +INSERT INTO 02725_memory_for_merges SELECT number, randomPrintableASCII(1000000) FROM numbers(100); +INSERT INTO 02725_memory_for_merges SELECT number, randomPrintableASCII(1000000) FROM numbers(100); + +OPTIMIZE TABLE 02725_memory_for_merges FINAL; + +SYSTEM FLUSH LOGS; + +WITH (SELECT uuid FROM system.tables WHERE table='02725_memory_for_merges' and database=currentDatabase()) as uuid +SELECT sum(peak_memory_usage) < 1024 * 1024 * 200 from system.part_log where table_uuid=uuid and event_type='MergeParts'; + +DROP TABLE IF EXISTS 02725_memory_for_merges SYNC; diff --git a/tests/queries/0_stateless/02725_start_stop_fetches.reference b/tests/queries/0_stateless/02725_start_stop_fetches.reference new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/queries/0_stateless/02725_start_stop_fetches.sh b/tests/queries/0_stateless/02725_start_stop_fetches.sh new file mode 100755 index 000000000000..0ca687ae951a --- /dev/null +++ b/tests/queries/0_stateless/02725_start_stop_fetches.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash +# Tags: race, zookeeper, no-parallel, no-upgrade-check, no-replicated-database + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +set -e + +NUM_REPLICAS=5 + +for i in $(seq 1 $NUM_REPLICAS); do + $CLICKHOUSE_CLIENT -n -q " + DROP TABLE IF EXISTS r$i SYNC; + CREATE TABLE r$i (x UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/r', 'r$i') ORDER BY x SETTINGS replicated_deduplication_window = 1, allow_remote_fs_zero_copy_replication = 1; + " +done + +function thread { + while true; do + REPLICA=$(($RANDOM % 5 + 1)) + $CLICKHOUSE_CLIENT --query "INSERT INTO r$REPLICA SELECT rand()" + done +} + +function nemesis_thread1 { + while true; do + REPLICA=$(($RANDOM % 5 + 1)) + $CLICKHOUSE_CLIENT --query "SYSTEM STOP REPLICATED SENDS r$REPLICA" + sleep 0.5 + $CLICKHOUSE_CLIENT --query "SYSTEM START REPLICATED SENDS r$REPLICA" + done +} + +function nemesis_thread2 { + while true; do + REPLICA=$(($RANDOM % 5 + 1)) + $CLICKHOUSE_CLIENT --query "SYSTEM STOP FETCHES r$REPLICA" + sleep 0.5 + $CLICKHOUSE_CLIENT --query "SYSTEM START FETCHES r$REPLICA" + done +} + + + +export -f thread +export -f nemesis_thread1 +export -f nemesis_thread2 + +TIMEOUT=20 + +timeout $TIMEOUT bash -c thread 2>/dev/null & +timeout $TIMEOUT bash -c thread 2>/dev/null & +timeout $TIMEOUT bash -c thread 2>/dev/null & +timeout $TIMEOUT bash -c nemesis_thread1 2>/dev/null & +timeout $TIMEOUT bash -c nemesis_thread1 2>/dev/null & +timeout $TIMEOUT bash -c nemesis_thread1 2>/dev/null & +timeout $TIMEOUT bash -c nemesis_thread2 2>/dev/null & +timeout $TIMEOUT bash -c nemesis_thread2 2>/dev/null & +timeout $TIMEOUT bash -c nemesis_thread2 2>/dev/null & + +wait + + +for i in $(seq 1 $NUM_REPLICAS); do + $CLICKHOUSE_CLIENT -q "SYSTEM START FETCHES r$REPLICA" + $CLICKHOUSE_CLIENT -q "SYSTEM START REPLICATED SENDS r$REPLICA" +done + +for i in $(seq 1 $NUM_REPLICAS); do + $CLICKHOUSE_CLIENT --max_execution_time 60 -q "SYSTEM SYNC REPLICA r$i PULL" +done + +for i in $(seq 1 $NUM_REPLICAS); do + $CLICKHOUSE_CLIENT -q "DROP TABLE r$i" 2>/dev/null & +done + +wait diff --git a/tests/queries/0_stateless/02730_dictionary_hashed_load_factor_element_count.reference b/tests/queries/0_stateless/02730_dictionary_hashed_load_factor_element_count.reference new file mode 100644 index 000000000000..09d337562b53 --- /dev/null +++ b/tests/queries/0_stateless/02730_dictionary_hashed_load_factor_element_count.reference @@ -0,0 +1,2 @@ +dict_sharded 1 1000000 0.4768 +dict_sharded_multi 5 1000000 0.4768 diff --git a/tests/queries/0_stateless/02730_dictionary_hashed_load_factor_element_count.sql b/tests/queries/0_stateless/02730_dictionary_hashed_load_factor_element_count.sql new file mode 100644 index 000000000000..1e42f56889d7 --- /dev/null +++ b/tests/queries/0_stateless/02730_dictionary_hashed_load_factor_element_count.sql @@ -0,0 +1,17 @@ +DROP DICTIONARY IF EXISTS dict_sharded; +DROP DICTIONARY IF EXISTS dict_sharded_multi; +DROP TABLE IF EXISTS dict_data; + +CREATE TABLE dict_data (key UInt64, v0 UInt16, v1 UInt16, v2 UInt16, v3 UInt16, v4 UInt16) engine=Memory() AS SELECT number, number%65535, number%65535, number%6553, number%655355, number%65535 FROM numbers(1e6); + +CREATE DICTIONARY dict_sharded (key UInt64, v0 UInt16) PRIMARY KEY key SOURCE(CLICKHOUSE(TABLE 'dict_data')) LIFETIME(MIN 0 MAX 0) LAYOUT(HASHED(SHARDS 32)); +SYSTEM RELOAD DICTIONARY dict_sharded; +SELECT name, length(attribute.names), element_count, round(load_factor, 4) FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict_sharded'; +DROP DICTIONARY dict_sharded; + +CREATE DICTIONARY dict_sharded_multi (key UInt64, v0 UInt16, v1 UInt16, v2 UInt16, v3 UInt16, v4 UInt16) PRIMARY KEY key SOURCE(CLICKHOUSE(TABLE 'dict_data')) LIFETIME(MIN 0 MAX 0) LAYOUT(HASHED(SHARDS 32)); +SYSTEM RELOAD DICTIONARY dict_sharded_multi; +SELECT name, length(attribute.names), element_count, round(load_factor, 4) FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict_sharded_multi'; +DROP DICTIONARY dict_sharded_multi; + +DROP TABLE dict_data; diff --git a/utils/check-style/check-style b/utils/check-style/check-style index 7dbd7d7a8164..afaf2ee6d48d 100755 --- a/utils/check-style/check-style +++ b/utils/check-style/check-style @@ -13,7 +13,7 @@ # and then to run formatter only for the specified files. ROOT_PATH=$(git rev-parse --show-toplevel) -EXCLUDE_DIRS='build/|integration/|widechar_width/|glibc-compatibility/|poco/|memcpy/|consistent-hashing|benchmark|tests/' +EXCLUDE_DIRS='build/|integration/|widechar_width/|glibc-compatibility/|poco/|memcpy/|consistent-hashing|benchmark|tests/|utils/keeper-bench/example.yaml' # From [1]: # But since array_to_string_internal() in array.c still loops over array diff --git a/utils/keeper-bench/CMakeLists.txt b/utils/keeper-bench/CMakeLists.txt index 2596be4adddc..87fa64b17619 100644 --- a/utils/keeper-bench/CMakeLists.txt +++ b/utils/keeper-bench/CMakeLists.txt @@ -1,2 +1,7 @@ +if (NOT TARGET ch_contrib::rapidjson) + message (${RECONFIGURE_MESSAGE_LEVEL} "Not building keeper-bench due to rapidjson is disabled") + return() +endif() + clickhouse_add_executable(keeper-bench Generator.cpp Runner.cpp Stats.cpp main.cpp) -target_link_libraries(keeper-bench PRIVATE clickhouse_common_zookeeper_no_log) +target_link_libraries(keeper-bench PRIVATE clickhouse_common_config_no_zookeeper_log ch_contrib::rapidjson) diff --git a/utils/keeper-bench/Generator.cpp b/utils/keeper-bench/Generator.cpp index b6d8223862c1..2212f7158aef 100644 --- a/utils/keeper-bench/Generator.cpp +++ b/utils/keeper-bench/Generator.cpp @@ -1,16 +1,18 @@ #include "Generator.h" +#include "Common/Exception.h" +#include "Common/ZooKeeper/ZooKeeperCommon.h" +#include #include #include +#include using namespace Coordination; using namespace zkutil; -namespace DB -{ -namespace ErrorCodes +namespace DB::ErrorCodes { extern const int LOGICAL_ERROR; -} + extern const int BAD_ARGUMENTS; } namespace @@ -38,16 +40,6 @@ std::string generateRandomString(size_t length) } } -std::string generateRandomPath(const std::string & prefix, size_t length) -{ - return std::filesystem::path(prefix) / generateRandomString(length); -} - -std::string generateRandomData(size_t size) -{ - return generateRandomString(size); -} - void removeRecursive(Coordination::ZooKeeper & zookeeper, const std::string & path) { namespace fs = std::filesystem; @@ -96,245 +88,679 @@ void removeRecursive(Coordination::ZooKeeper & zookeeper, const std::string & pa remove_future.get(); } - -void CreateRequestGenerator::startup(Coordination::ZooKeeper & zookeeper) +NumberGetter +NumberGetter::fromConfig(const std::string & key, const Poco::Util::AbstractConfiguration & config, std::optional default_value) { - removeRecursive(zookeeper, path_prefix); + NumberGetter number_getter; - auto promise = std::make_shared>(); - auto future = promise->get_future(); - auto create_callback = [promise] (const CreateResponse & response) + if (!config.has(key) && default_value.has_value()) { - if (response.error != Coordination::Error::ZOK) - promise->set_exception(std::make_exception_ptr(zkutil::KeeperException(response.error))); - else - promise->set_value(); - }; - zookeeper.create(path_prefix, "", false, false, default_acls, create_callback); - future.get(); + number_getter.value = *default_value; + } + else if (config.has(key + ".min_value") && config.has(key + ".max_value")) + { + NumberRange range{.min_value = config.getUInt64(key + ".min_value"), .max_value = config.getUInt64(key + ".max_value")}; + if (range.max_value <= range.min_value) + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Range is invalid for key {}: [{}, {}]", key, range.min_value, range.max_value); + number_getter.value = range; + } + else + { + number_getter.value = config.getUInt64(key); + } + + return number_getter; } -ZooKeeperRequestPtr CreateRequestGenerator::generate() +std::string NumberGetter::description() const { - auto request = std::make_shared(); - request->acls = default_acls; - size_t plength = 5; - if (path_length) - plength = *path_length; - auto path_candidate = generateRandomPath(path_prefix, plength); - - while (paths_created.contains(path_candidate)) - path_candidate = generateRandomPath(path_prefix, plength); - - paths_created.insert(path_candidate); - - request->path = path_candidate; - if (data_size) - request->data = generateRandomData(*data_size); + if (const auto * number = std::get_if(&value)) + return std::to_string(*number); - return request; + const auto & range = std::get(value); + return fmt::format("random value from range [{}, {}]", range.min_value, range.max_value); } - -void SetRequestGenerator::startup(Coordination::ZooKeeper & zookeeper) +uint64_t NumberGetter::getNumber() const { - removeRecursive(zookeeper, path_prefix); + if (const auto * number = std::get_if(&value)) + return *number; - auto promise = std::make_shared>(); - auto future = promise->get_future(); - auto create_callback = [promise] (const CreateResponse & response) - { - if (response.error != Coordination::Error::ZOK) - promise->set_exception(std::make_exception_ptr(zkutil::KeeperException(response.error))); - else - promise->set_value(); - }; - zookeeper.create(path_prefix, "", false, false, default_acls, create_callback); - future.get(); + const auto & range = std::get(value); + static pcg64 rng(randomSeed()); + return std::uniform_int_distribution(range.min_value, range.max_value)(rng); } -ZooKeeperRequestPtr SetRequestGenerator::generate() +StringGetter StringGetter::fromConfig(const std::string & key, const Poco::Util::AbstractConfiguration & config) { - auto request = std::make_shared(); - request->path = path_prefix; - request->data = generateRandomData(data_size); - - return request; + StringGetter string_getter; + if (config.has(key + ".random_string")) + string_getter.value + = NumberGetter::fromConfig(key + ".random_string.size", config); + else + string_getter.value = config.getString(key); + + return string_getter; } -void MixedRequestGenerator::startup(Coordination::ZooKeeper & zookeeper) +void StringGetter::setString(std::string name) { - for (auto & generator : generators) - generator->startup(zookeeper); + value = std::move(name); } -ZooKeeperRequestPtr MixedRequestGenerator::generate() +std::string StringGetter::getString() const { - pcg64 rng(randomSeed()); - std::uniform_int_distribution distribution(0, generators.size() - 1); + if (const auto * string = std::get_if(&value)) + return *string; - return generators[distribution(rng)]->generate(); + const auto number_getter = std::get(value); + return generateRandomString(number_getter.getNumber()); } -void GetRequestGenerator::startup(Coordination::ZooKeeper & zookeeper) +std::string StringGetter::description() const { - auto promise = std::make_shared>(); - auto future = promise->get_future(); - auto create_callback = [promise] (const CreateResponse & response) - { - if (response.error != Coordination::Error::ZOK) - promise->set_exception(std::make_exception_ptr(zkutil::KeeperException(response.error))); - else - promise->set_value(); - }; - zookeeper.create(path_prefix, "", false, false, default_acls, create_callback); - future.get(); - size_t total_nodes = 1; - if (num_nodes) - total_nodes = *num_nodes; + if (const auto * string = std::get_if(&value)) + return *string; - for (size_t i = 0; i < total_nodes; ++i) - { - auto path = generateRandomPath(path_prefix, 5); - while (std::find(paths_to_get.begin(), paths_to_get.end(), path) != paths_to_get.end()) - path = generateRandomPath(path_prefix, 5); - - auto create_promise = std::make_shared>(); - auto create_future = create_promise->get_future(); - auto callback = [create_promise] (const CreateResponse & response) - { - if (response.error != Coordination::Error::ZOK) - create_promise->set_exception(std::make_exception_ptr(zkutil::KeeperException(response.error))); - else - create_promise->set_value(); - }; - std::string data; - if (nodes_data_size) - data = generateRandomString(*nodes_data_size); - - zookeeper.create(path, data, false, false, default_acls, callback); - create_future.get(); - paths_to_get.push_back(path); - } + const auto number_getter = std::get(value); + return fmt::format("random string with size of {}", number_getter.description()); } -Coordination::ZooKeeperRequestPtr GetRequestGenerator::generate() +bool StringGetter::isRandom() const { - auto request = std::make_shared(); - - size_t path_index = distribution(rng); - request->path = paths_to_get[path_index]; - return request; + return std::holds_alternative(value); } -void ListRequestGenerator::startup(Coordination::ZooKeeper & zookeeper) +PathGetter PathGetter::fromConfig(const std::string & key, const Poco::Util::AbstractConfiguration & config) { - auto promise = std::make_shared>(); - auto future = promise->get_future(); - auto create_callback = [promise] (const CreateResponse & response) + static constexpr std::string_view path_key_string = "path"; + + PathGetter path_getter; + Poco::Util::AbstractConfiguration::Keys path_keys; + config.keys(key, path_keys); + + for (const auto & path_key : path_keys) { - if (response.error != Coordination::Error::ZOK) - promise->set_exception(std::make_exception_ptr(zkutil::KeeperException(response.error))); + if (!path_key.starts_with(path_key_string)) + continue; + + const auto current_path_key_string = key + "." + path_key; + const auto children_of_key = current_path_key_string + ".children_of"; + if (config.has(children_of_key)) + { + auto parent_node = config.getString(children_of_key); + if (parent_node.empty() || parent_node[0] != '/') + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid path for request generator: '{}'", parent_node); + path_getter.parent_paths.push_back(std::move(parent_node)); + } else - promise->set_value(); - }; - zookeeper.create(path_prefix, "", false, false, default_acls, create_callback); - future.get(); + { + auto path = config.getString(key + "." + path_key); - size_t total_nodes = 1; - if (num_nodes) - total_nodes = *num_nodes; + if (path.empty() || path[0] != '/') + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Invalid path for request generator: '{}'", path); - size_t path_length = 5; - if (paths_length) - path_length = *paths_length; + path_getter.paths.push_back(std::move(path)); + } + } - for (size_t i = 0; i < total_nodes; ++i) - { - auto path = generateRandomPath(path_prefix, path_length); + path_getter.path_picker = std::uniform_int_distribution(0, path_getter.paths.size() - 1); + return path_getter; +} - auto create_promise = std::make_shared>(); - auto create_future = create_promise->get_future(); - auto callback = [create_promise] (const CreateResponse & response) +void PathGetter::initialize(Coordination::ZooKeeper & zookeeper) +{ + for (const auto & parent_path : parent_paths) + { + auto list_promise = std::make_shared>(); + auto list_future = list_promise->get_future(); + auto callback = [list_promise] (const ListResponse & response) { if (response.error != Coordination::Error::ZOK) - create_promise->set_exception(std::make_exception_ptr(zkutil::KeeperException(response.error))); + list_promise->set_exception(std::make_exception_ptr(zkutil::KeeperException(response.error))); else - create_promise->set_value(); + list_promise->set_value(response); }; - zookeeper.create(path, "", false, false, default_acls, callback); - create_future.get(); + zookeeper.list(parent_path, ListRequestType::ALL, std::move(callback), {}); + auto list_response = list_future.get(); + + for (const auto & child : list_response.names) + paths.push_back(std::filesystem::path(parent_path) / child); } + + path_picker = std::uniform_int_distribution(0, paths.size() - 1); + initialized = true; } -Coordination::ZooKeeperRequestPtr ListRequestGenerator::generate() +std::string PathGetter::getPath() const { - auto request = std::make_shared(); - request->path = path_prefix; - return request; + if (!initialized) + throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "PathGetter is not initialized"); + + if (paths.size() == 1) + return paths[0]; + + static pcg64 rng(randomSeed()); + return paths[path_picker(rng)]; } -std::unique_ptr getGenerator(const std::string & name) +std::string PathGetter::description() const { - if (name == "create_no_data") + std::string description; + for (const auto & path : parent_paths) { - return std::make_unique(); + if (!description.empty()) + description += ", "; + description += fmt::format("children of {}", path); } - else if (name == "create_small_data") + + for (const auto & path : paths) { - return std::make_unique("/create_generator", 5, 32); + if (!description.empty()) + description += ", "; + description += path; } - else if (name == "create_medium_data") + + return description; +} + +RequestGetter::RequestGetter(std::vector request_generators_) + : request_generators(std::move(request_generators_)) +{} + +RequestGetter RequestGetter::fromConfig(const std::string & key, const Poco::Util::AbstractConfiguration & config, bool for_multi) +{ + RequestGetter request_getter; + + Poco::Util::AbstractConfiguration::Keys generator_keys; + config.keys(key, generator_keys); + + bool use_weights = false; + size_t weight_sum = 0; + auto & generators = request_getter.request_generators; + for (const auto & generator_key : generator_keys) { - return std::make_unique("/create_generator", 5, 1024); + RequestGeneratorPtr request_generator; + + if (generator_key.starts_with("create")) + request_generator = std::make_unique(); + else if (generator_key.starts_with("set")) + request_generator = std::make_unique(); + else if (generator_key.starts_with("get")) + request_generator = std::make_unique(); + else if (generator_key.starts_with("list")) + request_generator = std::make_unique(); + else if (generator_key.starts_with("multi")) + { + if (for_multi) + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Nested multi requests are not allowed"); + request_generator = std::make_unique(); + } + else + { + if (for_multi) + continue; + + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Unknown generator {}", key + "." + generator_key); + } + + request_generator->getFromConfig(key + "." + generator_key, config); + + auto weight = request_generator->getWeight(); + use_weights |= weight != 1; + weight_sum += weight; + + generators.push_back(std::move(request_generator)); } - else if (name == "create_big_data") + + if (generators.empty()) + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "No request generators found in config for key '{}'", key); + + + size_t max_value = use_weights ? weight_sum - 1 : generators.size() - 1; + request_getter.request_generator_picker = std::uniform_int_distribution(0, max_value); + + /// construct weight vector + if (use_weights) { - return std::make_unique("/create_generator", 5, 512 * 1024); + auto & weights = request_getter.weights; + weights.reserve(generators.size()); + weights.push_back(generators[0]->getWeight() - 1); + + for (size_t i = 1; i < generators.size(); ++i) + weights.push_back(weights.back() + generators[i]->getWeight()); } - else if (name == "get_no_data") + + return request_getter; +} + +RequestGeneratorPtr RequestGetter::getRequestGenerator() const +{ + static pcg64 rng(randomSeed()); + + auto random_number = request_generator_picker(rng); + + if (weights.empty()) + return request_generators[random_number]; + + for (size_t i = 0; i < request_generators.size(); ++i) { - return std::make_unique("/get_generator", 10, 0); + if (random_number <= weights[i]) + return request_generators[i]; } - else if (name == "get_small_data") + + throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Invalid number generated: {}", random_number); +} + +std::string RequestGetter::description() const +{ + std::string guard(30, '-'); + std::string description = guard; + + for (const auto & request_generator : request_generators) + description += fmt::format("\n{}\n", request_generator->description()); + return description + guard; +} + +void RequestGetter::startup(Coordination::ZooKeeper & zookeeper) +{ + for (const auto & request_generator : request_generators) + request_generator->startup(zookeeper); +} + +const std::vector & RequestGetter::requestGenerators() const +{ + return request_generators; +} + +void RequestGenerator::getFromConfig(const std::string & key, const Poco::Util::AbstractConfiguration & config) +{ + if (config.has(key + ".weight")) + weight = config.getUInt64(key + ".weight"); + getFromConfigImpl(key, config); +} + +std::string RequestGenerator::description() +{ + std::string weight_string = weight == 1 ? "" : fmt::format("\n- weight: {}", weight); + return fmt::format("{}{}", descriptionImpl(), weight_string); +} + +Coordination::ZooKeeperRequestPtr RequestGenerator::generate(const Coordination::ACLs & acls) +{ + return generateImpl(acls); +} + +void RequestGenerator::startup(Coordination::ZooKeeper & zookeeper) +{ + startupImpl(zookeeper); +} + +size_t RequestGenerator::getWeight() const +{ + return weight; +} + +CreateRequestGenerator::CreateRequestGenerator() + : rng(randomSeed()) + , remove_picker(0, 1.0) +{} + +void CreateRequestGenerator::getFromConfigImpl(const std::string & key, const Poco::Util::AbstractConfiguration & config) +{ + parent_path = PathGetter::fromConfig(key, config); + + name = StringGetter(NumberGetter::fromConfig(key + ".name_length", config, 5)); + + if (config.has(key + ".data")) + data = StringGetter::fromConfig(key + ".data", config); + + if (config.has(key + ".remove_factor")) + remove_factor = config.getDouble(key + ".remove_factor"); +} + +std::string CreateRequestGenerator::descriptionImpl() +{ + std::string data_string + = data.has_value() ? fmt::format("data for created nodes: {}", data->description()) : "no data for created nodes"; + std::string remove_factor_string + = remove_factor.has_value() ? fmt::format("- remove factor: {}", *remove_factor) : "- without removes"; + return fmt::format( + "Create Request Generator\n" + "- parent path(s) for created nodes: {}\n" + "- name for created nodes: {}\n" + "- {}\n" + "{}", + parent_path.description(), + name.description(), + data_string, + remove_factor_string); +} + +void CreateRequestGenerator::startupImpl(Coordination::ZooKeeper & zookeeper) +{ + parent_path.initialize(zookeeper); +} + +Coordination::ZooKeeperRequestPtr CreateRequestGenerator::generateImpl(const Coordination::ACLs & acls) +{ + if (remove_factor.has_value() && !paths_created.empty() && remove_picker(rng) < *remove_factor) { - return std::make_unique("/get_generator", 10, 32); + auto request = std::make_shared(); + auto it = paths_created.begin(); + request->path = *it; + paths_created.erase(it); + return request; } - else if (name == "get_medium_data") + + auto request = std::make_shared(); + request->acls = acls; + + std::string path_candidate = std::filesystem::path(parent_path.getPath()) / name.getString(); + + while (paths_created.contains(path_candidate)) + path_candidate = std::filesystem::path(parent_path.getPath()) / name.getString(); + + paths_created.insert(path_candidate); + + request->path = std::move(path_candidate); + + if (data) + request->data = data->getString(); + + return request; +} + +void SetRequestGenerator::getFromConfigImpl(const std::string & key, const Poco::Util::AbstractConfiguration & config) +{ + path = PathGetter::fromConfig(key, config); + + data = StringGetter::fromConfig(key + ".data", config); +} + +std::string SetRequestGenerator::descriptionImpl() +{ + return fmt::format( + "Set Request Generator\n" + "- path(s) to set: {}\n" + "- data to set: {}", + path.description(), + data.description()); +} + +Coordination::ZooKeeperRequestPtr SetRequestGenerator::generateImpl(const Coordination::ACLs & /*acls*/) +{ + auto request = std::make_shared(); + request->path = path.getPath(); + request->data = data.getString(); + return request; +} + +void SetRequestGenerator::startupImpl(Coordination::ZooKeeper & zookeeper) +{ + path.initialize(zookeeper); +} + +void GetRequestGenerator::getFromConfigImpl(const std::string & key, const Poco::Util::AbstractConfiguration & config) +{ + path = PathGetter::fromConfig(key, config); +} + +std::string GetRequestGenerator::descriptionImpl() +{ + return fmt::format( + "Get Request Generator\n" + "- path(s) to get: {}", + path.description()); +} + +Coordination::ZooKeeperRequestPtr GetRequestGenerator::generateImpl(const Coordination::ACLs & /*acls*/) +{ + auto request = std::make_shared(); + request->path = path.getPath(); + return request; +} + +void GetRequestGenerator::startupImpl(Coordination::ZooKeeper & zookeeper) +{ + path.initialize(zookeeper); +} + +void ListRequestGenerator::getFromConfigImpl(const std::string & key, const Poco::Util::AbstractConfiguration & config) +{ + path = PathGetter::fromConfig(key, config); +} + +std::string ListRequestGenerator::descriptionImpl() +{ + return fmt::format( + "List Request Generator\n" + "- path(s) to get: {}", + path.description()); +} + +Coordination::ZooKeeperRequestPtr ListRequestGenerator::generateImpl(const Coordination::ACLs & /*acls*/) +{ + auto request = std::make_shared(); + request->path = path.getPath(); + return request; +} + +void ListRequestGenerator::startupImpl(Coordination::ZooKeeper & zookeeper) +{ + path.initialize(zookeeper); +} + +void MultiRequestGenerator::getFromConfigImpl(const std::string & key, const Poco::Util::AbstractConfiguration & config) +{ + if (config.has(key + ".size")) + size = NumberGetter::fromConfig(key + ".size", config); + + request_getter = RequestGetter::fromConfig(key, config, /*for_multi*/ true); +}; + +std::string MultiRequestGenerator::descriptionImpl() +{ + std::string size_string = size.has_value() ? fmt::format("- number of requests: {}\n", size->description()) : ""; + return fmt::format( + "Multi Request Generator\n" + "{}" + "- requests:\n{}", + size_string, + request_getter.description()); +} + +Coordination::ZooKeeperRequestPtr MultiRequestGenerator::generateImpl(const Coordination::ACLs & acls) +{ + Coordination::Requests ops; + + if (size) { - return std::make_unique("/get_generator", 10, 1024); + auto request_count = size->getNumber(); + + for (size_t i = 0; i < request_count; ++i) + ops.push_back(request_getter.getRequestGenerator()->generate(acls)); } - else if (name == "get_big_data") + else { - return std::make_unique("/get_generator", 10, 512 * 1024); + for (const auto & request_generator : request_getter.requestGenerators()) + ops.push_back(request_generator->generate(acls)); } - else if (name == "list_no_nodes") + + return std::make_shared(ops, acls); +} + +void MultiRequestGenerator::startupImpl(Coordination::ZooKeeper & zookeeper) +{ + request_getter.startup(zookeeper); +} + +Generator::Generator(const Poco::Util::AbstractConfiguration & config) +{ + Coordination::ACL acl; + acl.permissions = Coordination::ACL::All; + acl.scheme = "world"; + acl.id = "anyone"; + default_acls.emplace_back(std::move(acl)); + + static const std::string generator_key = "generator"; + + std::cerr << "---- Parsing setup ---- " << std::endl; + static const std::string setup_key = generator_key + ".setup"; + Poco::Util::AbstractConfiguration::Keys keys; + config.keys(setup_key, keys); + for (const auto & key : keys) { - return std::make_unique("/list_generator", 0, 1); + if (key.starts_with("node")) + { + auto node_key = setup_key + "." + key; + auto parsed_root_node = parseNode(node_key, config); + const auto node = root_nodes.emplace_back(parsed_root_node); + + if (config.has(node_key + ".repeat")) + { + if (!node->name.isRandom()) + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Repeating node creation for key {}, but name is not randomly generated", node_key); + + auto repeat_count = config.getUInt64(node_key + ".repeat"); + node->repeat_count = repeat_count; + for (size_t i = 1; i < repeat_count; ++i) + root_nodes.emplace_back(node->clone()); + } + + std::cerr << "Tree to create:" << std::endl; + + node->dumpTree(); + std::cerr << std::endl; + } } - else if (name == "list_few_nodes") + std::cerr << "---- Done parsing data setup ----\n" << std::endl; + + std::cerr << "---- Collecting request generators ----" << std::endl; + static const std::string requests_key = generator_key + ".requests"; + request_getter = RequestGetter::fromConfig(requests_key, config); + std::cerr << request_getter.description() << std::endl; + std::cerr << "---- Done collecting request generators ----\n" << std::endl; +} + +std::shared_ptr Generator::parseNode(const std::string & key, const Poco::Util::AbstractConfiguration & config) +{ + auto node = std::make_shared(); + node->name = StringGetter::fromConfig(key + ".name", config); + + if (config.has(key + ".data")) + node->data = StringGetter::fromConfig(key + ".data", config); + + Poco::Util::AbstractConfiguration::Keys node_keys; + config.keys(key, node_keys); + + for (const auto & node_key : node_keys) { - return std::make_unique("/list_generator", 10, 5); + if (!node_key.starts_with("node")) + continue; + + const auto node_key_string = key + "." + node_key; + auto child_node = parseNode(node_key_string, config); + node->children.push_back(child_node); + + if (config.has(node_key_string + ".repeat")) + { + if (!child_node->name.isRandom()) + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Repeating node creation for key {}, but name is not randomly generated", node_key_string); + + auto repeat_count = config.getUInt64(node_key_string + ".repeat"); + child_node->repeat_count = repeat_count; + for (size_t i = 1; i < repeat_count; ++i) + node->children.push_back(child_node); + } } - else if (name == "list_medium_nodes") + + return node; +} + +void Generator::Node::dumpTree(int level) const +{ + std::string data_string + = data.has_value() ? fmt::format("{}", data->description()) : "no data"; + + std::string repeat_count_string = repeat_count != 0 ? fmt::format(", repeated {} times", repeat_count) : ""; + + std::cerr << fmt::format("{}name: {}, data: {}{}", std::string(level, '\t'), name.description(), data_string, repeat_count_string) << std::endl; + + for (auto it = children.begin(); it != children.end();) { - return std::make_unique("/list_generator", 1000, 5); + const auto & child = *it; + child->dumpTree(level + 1); + std::advance(it, child->repeat_count != 0 ? child->repeat_count : 1); } - else if (name == "list_a_lot_nodes") +} + +std::shared_ptr Generator::Node::clone() const +{ + auto new_node = std::make_shared(); + new_node->name = name; + new_node->data = data; + new_node->repeat_count = repeat_count; + + // don't do deep copy of children because we will do clone only for root nodes + new_node->children = children; + + return new_node; +} + +void Generator::Node::createNode(Coordination::ZooKeeper & zookeeper, const std::string & parent_path, const Coordination::ACLs & acls) const +{ + auto path = std::filesystem::path(parent_path) / name.getString(); + auto promise = std::make_shared>(); + auto future = promise->get_future(); + auto create_callback = [promise] (const CreateResponse & response) { - return std::make_unique("/list_generator", 100000, 5); - } - else if (name == "set_small_data") + if (response.error != Coordination::Error::ZOK) + promise->set_exception(std::make_exception_ptr(zkutil::KeeperException(response.error))); + else + promise->set_value(); + }; + zookeeper.create(path, data ? data->getString() : "", false, false, acls, create_callback); + future.get(); + + for (const auto & child : children) + child->createNode(zookeeper, path, acls); +} + +void Generator::startup(Coordination::ZooKeeper & zookeeper) +{ + std::cerr << "---- Creating test data ----" << std::endl; + for (const auto & node : root_nodes) { - return std::make_unique("/set_generator", 5); + auto node_name = node->name.getString(); + node->name.setString(node_name); + + std::string root_path = std::filesystem::path("/") / node_name; + std::cerr << "Cleaning up " << root_path << std::endl; + removeRecursive(zookeeper, root_path); + + node->createNode(zookeeper, "/", default_acls); } - else if (name == "mixed_small_data") + std::cerr << "---- Created test data ----\n" << std::endl; + + std::cerr << "---- Initializing generators ----" << std::endl; + + request_getter.startup(zookeeper); +} + +Coordination::ZooKeeperRequestPtr Generator::generate() +{ + return request_getter.getRequestGenerator()->generate(default_acls); +} + +void Generator::cleanup(Coordination::ZooKeeper & zookeeper) +{ + std::cerr << "---- Cleaning up test data ----" << std::endl; + for (const auto & node : root_nodes) { - std::vector> generators; - generators.push_back(std::make_unique("/set_generator", 5)); - generators.push_back(std::make_unique("/get_generator", 10, 32)); - return std::make_unique(std::move(generators)); + auto node_name = node->name.getString(); + std::string root_path = std::filesystem::path("/") / node_name; + std::cerr << "Cleaning up " << root_path << std::endl; + removeRecursive(zookeeper, root_path); } - - throw DB::Exception(DB::ErrorCodes::LOGICAL_ERROR, "Unknown generator {}", name); } diff --git a/utils/keeper-bench/Generator.h b/utils/keeper-bench/Generator.h index e2c546e4bce0..5b4c05b2d8b4 100644 --- a/utils/keeper-bench/Generator.h +++ b/utils/keeper-bench/Generator.h @@ -6,135 +6,194 @@ #include #include #include +#include #include +struct NumberGetter +{ + static NumberGetter fromConfig(const std::string & key, const Poco::Util::AbstractConfiguration & config, std::optional default_value = std::nullopt); + uint64_t getNumber() const; + std::string description() const; +private: + struct NumberRange + { + uint64_t min_value; + uint64_t max_value; + }; + + std::variant value; +}; + +struct StringGetter +{ + explicit StringGetter(NumberGetter number_getter) + : value(std::move(number_getter)) + {} -std::string generateRandomPath(const std::string & prefix, size_t length = 5); + StringGetter() = default; -std::string generateRandomData(size_t size); + static StringGetter fromConfig(const std::string & key, const Poco::Util::AbstractConfiguration & config); + void setString(std::string name); + std::string getString() const; + std::string description() const; + bool isRandom() const; +private: + std::variant value; +}; -class IGenerator +struct PathGetter { -public: - IGenerator() - { - Coordination::ACL acl; - acl.permissions = Coordination::ACL::All; - acl.scheme = "world"; - acl.id = "anyone"; - default_acls.emplace_back(std::move(acl)); - } - virtual void startup(Coordination::ZooKeeper & /*zookeeper*/) {} - virtual Coordination::ZooKeeperRequestPtr generate() = 0; + static PathGetter fromConfig(const std::string & key, const Poco::Util::AbstractConfiguration & config); - virtual ~IGenerator() = default; + std::string getPath() const; + std::string description() const; - Coordination::ACLs default_acls; + void initialize(Coordination::ZooKeeper & zookeeper); +private: + std::vector parent_paths; + bool initialized = false; + + std::vector paths; + mutable std::uniform_int_distribution path_picker; }; -class CreateRequestGenerator final : public IGenerator +struct RequestGenerator { -public: - explicit CreateRequestGenerator( - std::string path_prefix_ = "/create_generator", - std::optional path_length_ = std::nullopt, - std::optional data_size_ = std::nullopt) - : path_prefix(path_prefix_) - , path_length(path_length_) - , data_size(data_size_) - {} + virtual ~RequestGenerator() = default; + + void getFromConfig(const std::string & key, const Poco::Util::AbstractConfiguration & config); - void startup(Coordination::ZooKeeper & zookeeper) override; - Coordination::ZooKeeperRequestPtr generate() override; + Coordination::ZooKeeperRequestPtr generate(const Coordination::ACLs & acls); + std::string description(); + + void startup(Coordination::ZooKeeper & zookeeper); + + size_t getWeight() const; private: - std::string path_prefix; - std::optional path_length; - std::optional data_size; - std::unordered_set paths_created; + virtual void getFromConfigImpl(const std::string & key, const Poco::Util::AbstractConfiguration & config) = 0; + virtual std::string descriptionImpl() = 0; + virtual Coordination::ZooKeeperRequestPtr generateImpl(const Coordination::ACLs & acls) = 0; + virtual void startupImpl(Coordination::ZooKeeper &) {} + + size_t weight = 1; }; +using RequestGeneratorPtr = std::shared_ptr; -class GetRequestGenerator final : public IGenerator +struct CreateRequestGenerator final : public RequestGenerator { -public: - explicit GetRequestGenerator( - std::string path_prefix_ = "/get_generator", - std::optional num_nodes_ = std::nullopt, - std::optional nodes_data_size_ = std::nullopt) - : path_prefix(path_prefix_) - , num_nodes(num_nodes_) - , nodes_data_size(nodes_data_size_) - , rng(randomSeed()) - , distribution(0, num_nodes ? *num_nodes - 1 : 0) - {} + CreateRequestGenerator(); +private: + void getFromConfigImpl(const std::string & key, const Poco::Util::AbstractConfiguration & config) override; + std::string descriptionImpl() override; + Coordination::ZooKeeperRequestPtr generateImpl(const Coordination::ACLs & acls) override; + void startupImpl(Coordination::ZooKeeper & zookeeper) override; - void startup(Coordination::ZooKeeper & zookeeper) override; - Coordination::ZooKeeperRequestPtr generate() override; + PathGetter parent_path; + StringGetter name; + std::optional data; + std::optional remove_factor; + pcg64 rng; + std::uniform_real_distribution remove_picker; + + std::unordered_set paths_created; +}; + +struct SetRequestGenerator final : public RequestGenerator +{ private: - std::string path_prefix; - std::optional num_nodes; - std::optional nodes_data_size; - std::vector paths_to_get; + void getFromConfigImpl(const std::string & key, const Poco::Util::AbstractConfiguration & config) override; + std::string descriptionImpl() override; + Coordination::ZooKeeperRequestPtr generateImpl(const Coordination::ACLs & acls) override; + void startupImpl(Coordination::ZooKeeper & zookeeper) override; - pcg64 rng; - std::uniform_int_distribution distribution; + PathGetter path; + StringGetter data; }; -class ListRequestGenerator final : public IGenerator +struct GetRequestGenerator final : public RequestGenerator { -public: - explicit ListRequestGenerator( - std::string path_prefix_ = "/list_generator", - std::optional num_nodes_ = std::nullopt, - std::optional paths_length_ = std::nullopt) - : path_prefix(path_prefix_) - , num_nodes(num_nodes_) - , paths_length(paths_length_) - {} +private: + void getFromConfigImpl(const std::string & key, const Poco::Util::AbstractConfiguration & config) override; + std::string descriptionImpl() override; + Coordination::ZooKeeperRequestPtr generateImpl(const Coordination::ACLs & acls) override; + void startupImpl(Coordination::ZooKeeper & zookeeper) override; - void startup(Coordination::ZooKeeper & zookeeper) override; - Coordination::ZooKeeperRequestPtr generate() override; + PathGetter path; +}; +struct ListRequestGenerator final : public RequestGenerator +{ private: - std::string path_prefix; - std::optional num_nodes; - std::optional paths_length; + void getFromConfigImpl(const std::string & key, const Poco::Util::AbstractConfiguration & config) override; + std::string descriptionImpl() override; + Coordination::ZooKeeperRequestPtr generateImpl(const Coordination::ACLs & acls) override; + void startupImpl(Coordination::ZooKeeper & zookeeper) override; + + PathGetter path; }; -class SetRequestGenerator final : public IGenerator +struct RequestGetter { -public: - explicit SetRequestGenerator( - std::string path_prefix_ = "/set_generator", - uint64_t data_size_ = 5) - : path_prefix(path_prefix_) - , data_size(data_size_) - {} + explicit RequestGetter(std::vector request_generators_); + + RequestGetter() = default; - void startup(Coordination::ZooKeeper & zookeeper) override; - Coordination::ZooKeeperRequestPtr generate() override; + static RequestGetter fromConfig(const std::string & key, const Poco::Util::AbstractConfiguration & config, bool for_multi = false); + RequestGeneratorPtr getRequestGenerator() const; + std::string description() const; + void startup(Coordination::ZooKeeper & zookeeper); + const std::vector & requestGenerators() const; private: - std::string path_prefix; - uint64_t data_size; + std::vector request_generators; + std::vector weights; + mutable std::uniform_int_distribution request_generator_picker; }; -class MixedRequestGenerator final : public IGenerator +struct MultiRequestGenerator final : public RequestGenerator { -public: - explicit MixedRequestGenerator(std::vector> generators_) - : generators(std::move(generators_)) - {} +private: + void getFromConfigImpl(const std::string & key, const Poco::Util::AbstractConfiguration & config) override; + std::string descriptionImpl() override; + Coordination::ZooKeeperRequestPtr generateImpl(const Coordination::ACLs & acls) override; + void startupImpl(Coordination::ZooKeeper & zookeeper) override; - void startup(Coordination::ZooKeeper & zookeeper) override; - Coordination::ZooKeeperRequestPtr generate() override; + std::optional size; + RequestGetter request_getter; +}; +class Generator +{ +public: + explicit Generator(const Poco::Util::AbstractConfiguration & config); + + void startup(Coordination::ZooKeeper & zookeeper); + Coordination::ZooKeeperRequestPtr generate(); + void cleanup(Coordination::ZooKeeper & zookeeper); private: - std::vector> generators; -}; + struct Node + { + StringGetter name; + std::optional data; + std::vector> children; + size_t repeat_count = 0; + + std::shared_ptr clone() const; + + void createNode(Coordination::ZooKeeper & zookeeper, const std::string & parent_path, const Coordination::ACLs & acls) const; + void dumpTree(int level = 0) const; + }; + static std::shared_ptr parseNode(const std::string & key, const Poco::Util::AbstractConfiguration & config); + + std::uniform_int_distribution request_picker; + std::vector> root_nodes; + RequestGetter request_getter; + Coordination::ACLs default_acls; +}; -std::unique_ptr getGenerator(const std::string & name); +std::optional getGenerator(const std::string & name); diff --git a/utils/keeper-bench/README.md b/utils/keeper-bench/README.md new file mode 100644 index 000000000000..8b498228799d --- /dev/null +++ b/utils/keeper-bench/README.md @@ -0,0 +1,317 @@ +# Keeper Bench + +Keeper Bench is a tool for benchmarking Keeper or any ZooKeeper compatible systems. + +To run it call following command from the build folder: + +``` +./utils/keeper-bench --config benchmark_config_file.yaml +``` + +## Configuration file + +Keeper Bench runs need to be configured inside a yaml or XML file. +An example of a configuration file can be found in `./utils/keeper-bench/example.yaml` + +### Table of contents +- [Special Types](#special-types) +- [General settings](#general-settings) +- [Connections](#connections) +- [Generator](#generator) +- [Output](#output) + + +## Special types + +### IntegerGetter + +Can be defined with constant integer or as a random value from a range. + +```yaml +key: integer +key: + min_value: integer + max_value: integer +``` + +Example for a constant value: + +```yaml +some_key: 2 +``` + +Example for random value from [10, 20]: + +```yaml +some_key: + min_value: 10 + max_value: 20 +``` + +### StringGetter + +Can be defined with constant string or as a random string of some size. + +```yaml +key: string +key: + random_string: + size: IntegerGetter +``` + +Example for a constant value: +```yaml +some_key: "string" +``` + +Example for a random string with a random size from [10, 20]: +```yaml +some_key: + random_string: + size: + min_value: 10 + max_value: 20 +``` + + +### PathGetter + +If a section contains one or more `path` keys, all `path` keys are collected into a list. \ +Additionally, paths can be defined with key `children_of` which will add all children of some path to the list. + +```yaml +path: string +path: + children_of: string +``` + +Example for defining list of paths (`/path1`, `/path2` and children of `/path3`): + +```yaml +main: + path: + - "/path1" + - "/path2" + path: + children_of: "/path3" +``` + + +## General settings + +```yaml +# number of parallel queries (default: 1) +concurrency: integer + +# amount of queries to be executed, set 0 to disable limit (default: 0) +iterations: integer + +# delay between intermediate reports in seconds, set 0 to disable reports (default: 1.0) +report_delay: double + +# stop launch of queries after specified time limit, set 0 to disable limit (default: 0) +timelimit: double + +# continue testing even if a query fails (default: false) +continue_on_errors: boolean +``` + + +## Connections + +Connection definitions that will be used throughout tests defined under `connections` key. + +Following configurations can be defined under `connections` key or for each specific connection. \ +If it's defined under `connections` key, it will be used by default unless a specific connection overrides it. + +```yaml +secure: boolean +operation_timeout_ms: integer +session_timeout_ms: integer +connection_timeout_ms: integer +``` + +Specific configuration can be defined with a string or with a detailed description. + +```yaml +host: string +connection: + host: string + + # number of sessions to create for host + sessions: integer + # any connection configuration defined above +``` + +Example definition of 3 connections in total, 1 to `localhost:9181` and 2 to `localhost:9182` both will use secure connections: + +```yaml +connections: + secure: true + + host: "localhost:9181" + connection: + host: "localhost:9182" + sessions: 2 +``` + + +## Generator + +Main part of the benchmark is the generator itself which creates necessary nodes and defines how the requests will be generated. \ +It is defined under `generator` key. + +### Setup + +Setup defines nodes that are needed for test, defined under `setup` key. + +Each node is defined with a `node` key in the following format: + +```yaml +node: StringGetter + +node: + name: StringGetter + data: StringGetter + repeat: integer + node: Node +``` + +If only string is defined, a node with that name will be created. \ +Otherwise more detailed definition could be included to set data or the children of the node. \ +If `repeat` key is set, the node definition will be used multiple times. For a `repeat` key to be valid, the name of the node needs to be a random string. + +Example for a setup: + +```yaml +generator: + setup: + node: "node1" + node: + name: + random_string: + size: 20 + data: "somedata" + repeat: 4 + node: + name: + random_string: + size: 10 + repeat: 2 +``` + +We will create node `/node1` with no data and 4 children of random name of size 20 and data set to `somedata`. \ +We will also create 2 nodes with no data and random name of size 10 under `/` node. + +### Requests + +While benchmark is running, we are generating requests. + +Request generator is defined under `requests` key. \ +For each request `weight` (default: 1) can be defined which defines preference for a certain request. + +#### `create` + +```yaml +create: + # parent path for created nodes + path: string + + # length of the name for the create node (default: 5) + name_length: IntegerGetter + + # data for create nodes (default: "") + data: StringGetter + + # value in range [0.0, 1.0> denoting how often a remove request should be generated compared to create request (default: 0) + remove_factor: double +``` + +#### `set` + +```yaml +set: + # paths on which we randomly set data + path: PathGetter + + # data to set + data: StringGetter +``` + +#### `get` + +```yaml +get: + # paths for which we randomly get data + path: PathGetter +``` + +#### `list` + +```yaml +list: + # paths for which we randomly do list request + path: PathGetter +``` + +#### `multi` + +```yaml +multi: + # any request definition defined above can be added + + # optional size for the multi request + size: IntegerGetter +``` + +Multi request definition can contain any other request generator definitions described above. \ +If `size` key is defined, we will randomly pick `size` amount of requests from defined request generators. \ +All request generators can have a higher pick probability by using `weight` key. \ +If `size` is not defined, multi request with same request generators will always be generated. \ +Both write and read multi requests are supported. + +#### Example + +```yaml +generator: + requests: + create: + path: "/test_create" + name_length: + min_value: 10 + max_value: 20 + multi: + weight: 20 + size: 10 + get: + path: + children_of: "/test_get1" + get: + weight: 2 + path: + children_of: "/test_get2" +``` + +We defined a request geneator that will generate either a `create` or a `multi` request. \ +Each `create` request will create a node under `/test_create` with a randomly generated name with size from range `[10, 20]`. \ +`multi` request will be generated 20 times more than `create` request. \ +`multi` request will contain 10 requests and approximately twice as much get requests to children of "/test_get2". + + +## Output + +```yaml +output: + # if defined, JSON output of results will be stored at the defined path + file: string + # or + file: + # if defined, JSON output of results will be stored at the defined path + path: string + + # if set to true, timestamp will be appended to the output file name (default: false) + with_timestamp: boolean + + # if set to true, output will be printed to stdout also (default: false) + stdout: boolean +``` diff --git a/utils/keeper-bench/Runner.cpp b/utils/keeper-bench/Runner.cpp index c858b476483f..f86d2b44dd7a 100644 --- a/utils/keeper-bench/Runner.cpp +++ b/utils/keeper-bench/Runner.cpp @@ -1,15 +1,160 @@ #include "Runner.h" - -namespace DB +#include + +#include "Common/ZooKeeper/ZooKeeperCommon.h" +#include "Common/ZooKeeper/ZooKeeperConstants.h" +#include +#include +#include "IO/ReadBufferFromString.h" +#include +#include +#include + +namespace CurrentMetrics { + extern const Metric LocalThread; + extern const Metric LocalThreadActive; +} -namespace ErrorCodes +namespace DB::ErrorCodes { extern const int CANNOT_BLOCK_SIGNAL; + extern const int BAD_ARGUMENTS; } +Runner::Runner( + std::optional concurrency_, + const std::string & config_path, + const Strings & hosts_strings_, + std::optional max_time_, + std::optional delay_, + std::optional continue_on_error_, + std::optional max_iterations_) + : info(std::make_shared()) +{ + + DB::ConfigProcessor config_processor(config_path, true, false); + auto config = config_processor.loadConfig().configuration; + + generator.emplace(*config); + + if (!hosts_strings_.empty()) + { + for (const auto & host : hosts_strings_) + connection_infos.push_back({.host = host}); + } + else + { + if (!config) + throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "No config file or hosts defined"); + + parseHostsFromConfig(*config); + } + + std::cerr << "---- Run options ---- " << std::endl; + static constexpr uint64_t DEFAULT_CONCURRENCY = 1; + if (concurrency_) + concurrency = *concurrency_; + else + concurrency = config->getUInt64("concurrency", DEFAULT_CONCURRENCY); + std::cerr << "Concurrency: " << concurrency << std::endl; + + static constexpr uint64_t DEFAULT_ITERATIONS = 0; + if (max_iterations_) + max_iterations = *max_iterations_; + else + max_iterations = config->getUInt64("iterations", DEFAULT_ITERATIONS); + std::cerr << "Iterations: " << max_iterations << std::endl; + + static constexpr double DEFAULT_DELAY = 1.0; + if (delay_) + delay = *delay_; + else + delay = config->getDouble("report_delay", DEFAULT_DELAY); + std::cerr << "Report delay: " << delay << std::endl; + + static constexpr double DEFAULT_TIME_LIMIT = 0.0; + if (max_time_) + max_time = *max_time_; + else + max_time = config->getDouble("timelimit", DEFAULT_TIME_LIMIT); + std::cerr << "Time limit: " << max_time << std::endl; + + if (continue_on_error_) + continue_on_error = *continue_on_error_; + else + continue_on_error = config->getBool("continue_on_error", false); + std::cerr << "Continue on error: " << continue_on_error << std::endl; + + static const std::string output_key = "output"; + print_to_stdout = config->getBool(output_key + ".stdout", false); + std::cerr << "Printing output to stdout: " << print_to_stdout << std::endl; + + static const std::string output_file_key = output_key + ".file"; + if (config->has(output_file_key)) + { + if (config->has(output_file_key + ".path")) + { + file_output = config->getString(output_file_key + ".path"); + output_file_with_timestamp = config->getBool(output_file_key + ".with_timestamp"); + } + else + file_output = config->getString(output_file_key); + + std::cerr << "Result file path: " << file_output->string() << std::endl; + } + + std::cerr << "---- Run options ----\n" << std::endl; + + pool.emplace(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, concurrency); + queue.emplace(concurrency); } +void Runner::parseHostsFromConfig(const Poco::Util::AbstractConfiguration & config) +{ + ConnectionInfo default_connection_info; + + const auto fill_connection_details = [&](const std::string & key, auto & connection_info) + { + if (config.has(key + ".secure")) + connection_info.secure = config.getBool(key + ".secure"); + + if (config.has(key + ".session_timeout_ms")) + connection_info.session_timeout_ms = config.getInt(key + ".session_timeout_ms"); + + if (config.has(key + ".operation_timeout_ms")) + connection_info.operation_timeout_ms = config.getInt(key + ".operation_timeout_ms"); + + if (config.has(key + ".connection_timeout_ms")) + connection_info.connection_timeout_ms = config.getInt(key + ".connection_timeout_ms"); + }; + + fill_connection_details("connections", default_connection_info); + + Poco::Util::AbstractConfiguration::Keys connections_keys; + config.keys("connections", connections_keys); + + for (const auto & key : connections_keys) + { + std::string connection_key = "connections." + key; + auto connection_info = default_connection_info; + if (key.starts_with("host")) + { + connection_info.host = config.getString(connection_key); + connection_infos.push_back(std::move(connection_info)); + } + else if (key.starts_with("connection") && key != "connection_timeout_ms") + { + connection_info.host = config.getString(connection_key + ".host"); + if (config.has(connection_key + ".sessions")) + connection_info.sessions = config.getUInt64(connection_key + ".sessions"); + + fill_connection_details(connection_key, connection_info); + + connection_infos.push_back(std::move(connection_info)); + } + } +} void Runner::thread(std::vector> zookeepers) { @@ -33,7 +178,7 @@ void Runner::thread(std::vector> zookee while (!extracted) { - extracted = queue.tryPop(request, 100); + extracted = queue->tryPop(request, 100); if (shutdown || (max_iterations && requests_executed >= max_iterations)) @@ -47,9 +192,35 @@ void Runner::thread(std::vector> zookee auto promise = std::make_shared>(); auto future = promise->get_future(); - Coordination::ResponseCallback callback = [promise](const Coordination::Response & response) + Coordination::ResponseCallback callback = [&request, promise](const Coordination::Response & response) { - if (response.error != Coordination::Error::ZOK) + bool set_exception = true; + + if (response.error == Coordination::Error::ZOK) + { + set_exception = false; + } + else if (response.error == Coordination::Error::ZNONODE) + { + /// remove can fail with ZNONODE because of different order of execution + /// of generated create and remove requests + /// this is okay for concurrent runs + if (dynamic_cast(&response)) + set_exception = false; + else if (const auto * multi_response = dynamic_cast(&response)) + { + const auto & responses = multi_response->responses; + size_t i = 0; + while (responses[i]->error != Coordination::Error::ZNONODE) + ++i; + + const auto & multi_request = dynamic_cast(*request); + if (dynamic_cast(&*multi_request.requests[i])) + set_exception = false; + } + } + + if (set_exception) promise->set_exception(std::make_exception_ptr(zkutil::KeeperException(response.error))); else promise->set_value(response.bytesSize()); @@ -62,14 +233,14 @@ void Runner::thread(std::vector> zookee try { auto response_size = future.get(); - double seconds = watch.elapsedSeconds(); + auto microseconds = watch.elapsedMicroseconds(); std::lock_guard lock(mutex); if (request->isReadRequest()) - info->addRead(seconds, 1, request->bytesSize() + response_size); + info->addRead(microseconds, 1, request->bytesSize() + response_size); else - info->addWrite(seconds, 1, request->bytesSize() + response_size); + info->addWrite(microseconds, 1, request->bytesSize() + response_size); } catch (...) { @@ -95,7 +266,7 @@ void Runner::thread(std::vector> zookee { try { - zookeepers = getConnections(); + zookeepers = refreshConnections(); break; } catch (...) @@ -110,13 +281,13 @@ void Runner::thread(std::vector> zookee } } -bool Runner::tryPushRequestInteractively(const Coordination::ZooKeeperRequestPtr & request, DB::InterruptListener & interrupt_listener) +bool Runner::tryPushRequestInteractively(Coordination::ZooKeeperRequestPtr && request, DB::InterruptListener & interrupt_listener) { bool inserted = false; while (!inserted) { - inserted = queue.tryPush(request, 100); + inserted = queue->tryPush(std::move(request), 100); if (shutdown) { @@ -126,13 +297,13 @@ bool Runner::tryPushRequestInteractively(const Coordination::ZooKeeperRequestPtr if (max_time > 0 && total_watch.elapsedSeconds() >= max_time) { - std::cout << "Stopping launch of queries. Requested time limit is exhausted.\n"; + std::cerr << "Stopping launch of queries. Requested time limit is exhausted.\n"; return false; } if (interrupt_listener.check()) { - std::cout << "Stopping launch of queries. SIGINT received." << std::endl; + std::cerr << "Stopping launch of queries. SIGINT received." << std::endl; return false; } @@ -141,7 +312,7 @@ bool Runner::tryPushRequestInteractively(const Coordination::ZooKeeperRequestPtr printNumberOfRequestsExecuted(requests_executed); std::lock_guard lock(mutex); - report(info, concurrency); + info->report(concurrency); delay_watch.restart(); } } @@ -152,23 +323,26 @@ bool Runner::tryPushRequestInteractively(const Coordination::ZooKeeperRequestPtr void Runner::runBenchmark() { - auto aux_connections = getConnections(); + createConnections(); std::cerr << "Preparing to run\n"; - generator->startup(*aux_connections[0]); + generator->startup(*connections[0]); std::cerr << "Prepared\n"; + + auto start_timestamp_ms = Poco::Timestamp().epochMicroseconds() / 1000; + try { - auto connections = getConnections(); for (size_t i = 0; i < concurrency; ++i) { - pool.scheduleOrThrowOnError([this, connections]() mutable { thread(connections); }); + auto thread_connections = connections; + pool->scheduleOrThrowOnError([this, connections = std::move(thread_connections)]() mutable { thread(connections); }); } } catch (...) { shutdown = true; - pool.wait(); + pool->wait(); throw; } @@ -185,31 +359,102 @@ void Runner::runBenchmark() } } - pool.wait(); + pool->wait(); total_watch.stop(); printNumberOfRequestsExecuted(requests_executed); std::lock_guard lock(mutex); - report(info, concurrency); + info->report(concurrency); + + DB::WriteBufferFromOwnString out; + info->writeJSON(out, concurrency, start_timestamp_ms); + auto output_string = std::move(out.str()); + + if (print_to_stdout) + std::cout << output_string << std::endl; + + if (file_output) + { + auto path = *file_output; + + if (output_file_with_timestamp) + { + auto filename = file_output->filename(); + filename = fmt::format("{}_{}{}", filename.stem().generic_string(), start_timestamp_ms, filename.extension().generic_string()); + path = file_output->parent_path() / filename; + } + + std::cerr << "Storing output to " << path << std::endl; + + DB::WriteBufferFromFile file_output_buffer(path); + DB::ReadBufferFromString read_buffer(output_string); + DB::copyData(read_buffer, file_output_buffer); + } } -std::vector> Runner::getConnections() +void Runner::createConnections() { - std::vector> zookeepers; - for (const auto & host_string : hosts_strings) + DB::EventNotifier::init(); + std::cerr << "---- Creating connections ---- " << std::endl; + for (size_t connection_info_idx = 0; connection_info_idx < connection_infos.size(); ++connection_info_idx) { - Coordination::ZooKeeper::Node node{Poco::Net::SocketAddress{host_string}, false}; - std::vector nodes; - nodes.push_back(node); - zkutil::ZooKeeperArgs args; - args.session_timeout_ms = 30000; - args.connection_timeout_ms = 1000; - args.operation_timeout_ms = 10000; - zookeepers.emplace_back(std::make_shared(nodes, args, nullptr)); + const auto & connection_info = connection_infos[connection_info_idx]; + std::cerr << fmt::format("Creating {} session(s) for:\n" + "- host: {}\n" + "- secure: {}\n" + "- session timeout: {}ms\n" + "- operation timeout: {}ms\n" + "- connection timeout: {}ms", + connection_info.sessions, + connection_info.host, + connection_info.secure, + connection_info.session_timeout_ms, + connection_info.operation_timeout_ms, + connection_info.connection_timeout_ms) << std::endl; + + for (size_t session = 0; session < connection_info.sessions; ++session) + { + connections.emplace_back(getConnection(connection_info)); + connections_to_info_map[connections.size() - 1] = connection_info_idx; + } } + std::cerr << "---- Done creating connections ----\n" << std::endl; +} +std::shared_ptr Runner::getConnection(const ConnectionInfo & connection_info) +{ + Coordination::ZooKeeper::Node node{Poco::Net::SocketAddress{connection_info.host}, connection_info.secure}; + std::vector nodes; + nodes.push_back(node); + zkutil::ZooKeeperArgs args; + args.session_timeout_ms = connection_info.session_timeout_ms; + args.connection_timeout_ms = connection_info.operation_timeout_ms; + args.operation_timeout_ms = connection_info.connection_timeout_ms; + return std::make_shared(nodes, args, nullptr); +} - return zookeepers; +std::vector> Runner::refreshConnections() +{ + std::lock_guard lock(connection_mutex); + for (size_t connection_idx = 0; connection_idx < connections.size(); ++connection_idx) + { + auto & connection = connections[connection_idx]; + if (connection->isExpired()) + { + const auto & connection_info = connection_infos[connections_to_info_map[connection_idx]]; + connection = getConnection(connection_info); + } + } + return connections; } + +Runner::~Runner() +{ + queue->clearAndFinish(); + shutdown = true; + pool->wait(); + generator->cleanup(*connections[0]); +} + diff --git a/utils/keeper-bench/Runner.h b/utils/keeper-bench/Runner.h index a00b7b43effb..f899f1d538d3 100644 --- a/utils/keeper-bench/Runner.h +++ b/utils/keeper-bench/Runner.h @@ -1,50 +1,35 @@ #pragma once +#include "Common/ZooKeeper/ZooKeeperConstants.h" #include #include "Generator.h" #include +#include #include #include #include -#include -#include #include #include #include +#include #include "Stats.h" +#include + using Ports = std::vector; using Strings = std::vector; -namespace CurrentMetrics -{ - extern const Metric LocalThread; - extern const Metric LocalThreadActive; -} - class Runner { public: Runner( - size_t concurrency_, - const std::string & generator_name, + std::optional concurrency_, + const std::string & config_path, const Strings & hosts_strings_, - double max_time_, - double delay_, - bool continue_on_error_, - size_t max_iterations_) - : concurrency(concurrency_) - , pool(CurrentMetrics::LocalThread, CurrentMetrics::LocalThreadActive, concurrency) - , hosts_strings(hosts_strings_) - , generator(getGenerator(generator_name)) - , max_time(max_time_) - , delay(delay_) - , continue_on_error(continue_on_error_) - , max_iterations(max_iterations_) - , info(std::make_shared()) - , queue(concurrency) - { - } + std::optional max_time_, + std::optional delay_, + std::optional continue_on_error_, + std::optional max_iterations_); void thread(std::vector> zookeepers); @@ -53,18 +38,19 @@ class Runner std::cerr << "Requests executed: " << num << ".\n"; } - bool tryPushRequestInteractively(const Coordination::ZooKeeperRequestPtr & request, DB::InterruptListener & interrupt_listener); + bool tryPushRequestInteractively(Coordination::ZooKeeperRequestPtr && request, DB::InterruptListener & interrupt_listener); void runBenchmark(); - + ~Runner(); private: + void parseHostsFromConfig(const Poco::Util::AbstractConfiguration & config); size_t concurrency = 1; - ThreadPool pool; - Strings hosts_strings; - std::unique_ptr generator; + std::optional pool; + + std::optional generator; double max_time = 0; double delay = 1; bool continue_on_error = false; @@ -73,6 +59,9 @@ class Runner std::atomic shutdown = false; std::shared_ptr info; + bool print_to_stdout; + std::optional file_output; + bool output_file_with_timestamp; Stopwatch total_watch; Stopwatch delay_watch; @@ -80,7 +69,26 @@ class Runner std::mutex mutex; using Queue = ConcurrentBoundedQueue; - Queue queue; + std::optional queue; + + struct ConnectionInfo + { + std::string host; + + bool secure = false; + int32_t session_timeout_ms = Coordination::DEFAULT_SESSION_TIMEOUT_MS; + int32_t connection_timeout_ms = Coordination::DEFAULT_CONNECTION_TIMEOUT_MS; + int32_t operation_timeout_ms = Coordination::DEFAULT_OPERATION_TIMEOUT_MS; + + size_t sessions = 1; + }; + + std::mutex connection_mutex; + std::vector connection_infos; + std::vector> connections; + std::unordered_map connections_to_info_map; - std::vector> getConnections(); + void createConnections(); + std::shared_ptr getConnection(const ConnectionInfo & connection_info); + std::vector> refreshConnections(); }; diff --git a/utils/keeper-bench/Stats.cpp b/utils/keeper-bench/Stats.cpp index 1f8b02ed09d6..f5e5f84ba147 100644 --- a/utils/keeper-bench/Stats.cpp +++ b/utils/keeper-bench/Stats.cpp @@ -1,67 +1,177 @@ #include "Stats.h" #include -void report(std::shared_ptr & info, size_t concurrency) +#include +#include +#include +#include + +void Stats::StatsCollector::add(uint64_t microseconds, size_t requests_inc, size_t bytes_inc) +{ + work_time += microseconds; + requests += requests_inc; + requests_bytes += bytes_inc; + sampler.insert(microseconds); +} + +void Stats::addRead(uint64_t microseconds, size_t requests_inc, size_t bytes_inc) +{ + read_collector.add(microseconds, requests_inc, bytes_inc); +} + +void Stats::addWrite(uint64_t microseconds, size_t requests_inc, size_t bytes_inc) +{ + write_collector.add(microseconds, requests_inc, bytes_inc); +} + +void Stats::StatsCollector::clear() +{ + requests = 0; + work_time = 0; + requests_bytes = 0; + sampler.clear(); +} + +void Stats::clear() +{ + read_collector.clear(); + write_collector.clear(); +} + +std::pair Stats::StatsCollector::getThroughput(size_t concurrency) +{ + assert(requests != 0); + double seconds = work_time / 1'000'000.0 / concurrency; + + return {requests / seconds, requests_bytes / seconds}; +} + +double Stats::StatsCollector::getPercentile(double percent) +{ + return sampler.quantileNearest(percent / 100.0) / 1000.0; +} + +void Stats::report(size_t concurrency) { std::cerr << "\n"; + const auto & read_requests = read_collector.requests; + const auto & write_requests = write_collector.requests; + /// Avoid zeros, nans or exceptions - if (0 == info->read_requests && 0 == info->write_requests) + if (0 == read_requests && 0 == write_requests) return; - double read_seconds = info->read_work_time / concurrency; - double write_seconds = info->write_work_time / concurrency; + auto [read_rps, read_bps] = read_collector.getThroughput(concurrency); + auto [write_rps, write_bps] = write_collector.getThroughput(concurrency); - std::cerr << "read requests " << info->read_requests << ", write requests " << info->write_requests << ", "; - if (info->errors) - { - std::cerr << "errors " << info->errors << ", "; - } - if (0 != info->read_requests) + std::cerr << "read requests " << read_requests << ", write requests " << write_requests << ", "; + if (errors) + std::cerr << "errors " << errors << ", "; + + if (0 != read_requests) { std::cerr - << "Read RPS: " << (info->read_requests / read_seconds) << ", " - << "Read MiB/s: " << (info->requests_read_bytes / read_seconds / 1048576); - if (0 != info->write_requests) + << "Read RPS: " << read_rps << ", " + << "Read MiB/s: " << read_bps / 1048576; + + if (0 != write_requests) std::cerr << ", "; } - if (0 != info->write_requests) + + if (0 != write_requests) { std::cerr - << "Write RPS: " << (info->write_requests / write_seconds) << ", " - << "Write MiB/s: " << (info->requests_write_bytes / write_seconds / 1048576) << ". " + << "Write RPS: " << write_rps << ", " + << "Write MiB/s: " << write_bps / 1048576 << ". " << "\n"; } std::cerr << "\n"; - auto print_percentile = [&](double percent, Stats::Sampler & sampler) + auto print_percentile = [&](double percent, Stats::StatsCollector & collector) { std::cerr << percent << "%\t\t"; - std::cerr << sampler.quantileNearest(percent / 100.0) << " sec.\t"; + std::cerr << collector.getPercentile(percent) << " msec.\t"; std::cerr << "\n"; }; - if (0 != info->read_requests) + const auto print_all_percentiles = [&](auto & collector) { - std::cerr << "Read sampler:\n"; for (int percent = 0; percent <= 90; percent += 10) - print_percentile(percent, info->read_sampler); + print_percentile(percent, collector); - print_percentile(95, info->read_sampler); - print_percentile(99, info->read_sampler); - print_percentile(99.9, info->read_sampler); - print_percentile(99.99, info->read_sampler); + print_percentile(95, collector); + print_percentile(99, collector); + print_percentile(99.9, collector); + print_percentile(99.99, collector); + }; + + if (0 != read_requests) + { + std::cerr << "Read sampler:\n"; + print_all_percentiles(read_collector); } - if (0 != info->write_requests) + if (0 != write_requests) { std::cerr << "Write sampler:\n"; + print_all_percentiles(write_collector); + } +} + +void Stats::writeJSON(DB::WriteBuffer & out, size_t concurrency, int64_t start_timestamp) +{ + using namespace rapidjson; + Document results; + auto & allocator = results.GetAllocator(); + results.SetObject(); + + results.AddMember("timestamp", Value(start_timestamp), allocator); + + const auto get_results = [&](auto & collector) + { + Value specific_results(kObjectType); + + specific_results.AddMember("total_requests", Value(collector.requests), allocator); + + auto [rps, bps] = collector.getThroughput(concurrency); + specific_results.AddMember("requests_per_second", Value(rps), allocator); + specific_results.AddMember("bytes_per_second", Value(bps), allocator); + + Value percentiles(kArrayType); + + const auto add_percentile = [&](double percent) + { + Value percentile(kObjectType); + Value percent_key(fmt::format("{:.2f}", percent).c_str(), allocator); + percentile.AddMember(percent_key, Value(collector.getPercentile(percent)), allocator); + percentiles.PushBack(percentile, allocator); + }; + for (int percent = 0; percent <= 90; percent += 10) - print_percentile(percent, info->write_sampler); + add_percentile(percent); - print_percentile(95, info->write_sampler); - print_percentile(99, info->write_sampler); - print_percentile(99.9, info->write_sampler); - print_percentile(99.99, info->write_sampler); - } + add_percentile(95); + add_percentile(99); + add_percentile(99.9); + add_percentile(99.99); + + specific_results.AddMember("percentiles", percentiles, allocator); + + return specific_results; + }; + + if (read_collector.requests != 0) + results.AddMember("read_results", get_results(read_collector), results.GetAllocator()); + + if (write_collector.requests != 0) + results.AddMember("write_results", get_results(write_collector), results.GetAllocator()); + + StringBuffer strbuf; + strbuf.Clear(); + Writer writer(strbuf); + results.Accept(writer); + + const char * output_string = strbuf.GetString(); + out.write(output_string, strlen(output_string)); } diff --git a/utils/keeper-bench/Stats.h b/utils/keeper-bench/Stats.h index 1b9a31bb7349..bc50588e8377 100644 --- a/utils/keeper-bench/Stats.h +++ b/utils/keeper-bench/Stats.h @@ -5,48 +5,38 @@ #include +#include + struct Stats { - std::atomic read_requests{0}; - std::atomic write_requests{0}; size_t errors = 0; - size_t requests_write_bytes = 0; - size_t requests_read_bytes = 0; - double read_work_time = 0; - double write_work_time = 0; using Sampler = ReservoirSampler; - Sampler read_sampler {1 << 16}; - Sampler write_sampler {1 << 16}; - - void addRead(double seconds, size_t requests_inc, size_t bytes_inc) + struct StatsCollector { - read_work_time += seconds; - read_requests += requests_inc; - requests_read_bytes += bytes_inc; - read_sampler.insert(seconds); - } + std::atomic requests{0}; + uint64_t requests_bytes = 0; + uint64_t work_time = 0; + Sampler sampler; - void addWrite(double seconds, size_t requests_inc, size_t bytes_inc) - { - write_work_time += seconds; - write_requests += requests_inc; - requests_write_bytes += bytes_inc; - write_sampler.insert(seconds); - } + /// requests/second, bytes/second + std::pair getThroughput(size_t concurrency); + double getPercentile(double percent); - void clear() - { - read_requests = 0; - write_requests = 0; - read_work_time = 0; - write_work_time = 0; - requests_read_bytes = 0; - requests_write_bytes = 0; - read_sampler.clear(); - write_sampler.clear(); - } + void add(uint64_t microseconds, size_t requests_inc, size_t bytes_inc); + void clear(); + }; + + StatsCollector read_collector; + StatsCollector write_collector; + + void addRead(uint64_t microseconds, size_t requests_inc, size_t bytes_inc); + void addWrite(uint64_t microseconds, size_t requests_inc, size_t bytes_inc); + + void clear(); + + void report(size_t concurrency); + void writeJSON(DB::WriteBuffer & out, size_t concurrency, int64_t start_timestamp); }; -void report(std::shared_ptr & info, size_t concurrency); diff --git a/utils/keeper-bench/example.yaml b/utils/keeper-bench/example.yaml new file mode 100644 index 000000000000..e800e9234827 --- /dev/null +++ b/utils/keeper-bench/example.yaml @@ -0,0 +1,117 @@ +concurrency: 20 +iterations: 10000 +delay: 4 +timelimit: 300 +continue_on_errors: true + +connections: + operation_timeout_ms: 3000 + connection_timeout_ms: 40000 + + connection: + secure: false + operation_timeout_ms: 2000 + session_timeout_ms: 2000 + connection_timeout_ms: 50000 + host: "localhost:9181" + sessions: 1 + + host: "localhost:9181" + +generator: + setup: + node: + name: "test3" + node: + name: "test_create" + node: + name: "test4" + node: + name: "test" + data: "somedata" + node: + repeat: 4 + name: + random_string: + size: 15 + data: + random_string: + size: + min_value: 10 + max_value: 20 + node: + repeat: 2 + node: + repeat: 2 + name: + random_string: + size: 12 + name: + random_string: + size: 15 + data: + random_string: + size: + min_value: 10 + max_value: 20 + node: + name: "test2" + data: "somedata" + requests: + create: + path: "/test_create" + name_length: 10 + remove_factor: 0.5 + multi: + size: 20 + create: + path: "/test" + data: + random_string: + size: + min_value: 10 + max_value: 20 + remove_factor: 0.8 + set: + weight: 2 + path: + - "/test3" + - "/test4" + path: + children_of: "/test" + data: + random_string: + size: 10 + get: + path: + - "/test3" + - "/test4" + path: + children_of: "/test" + + multi: + weight: 10 + get: + path: + - "/test3" + - "/test4" + path: + children_of: "/test" + list: + path: + - "/test3" + path: + children_of: "/test" + + list: + path: + - "/test3" + - "/test4" + path: + children_of: "/test" + +output: + file: + path: "output.json" + with_timestamp: true + stdout: true diff --git a/utils/keeper-bench/main.cpp b/utils/keeper-bench/main.cpp index 39af28e7580a..0753d66850f5 100644 --- a/utils/keeper-bench/main.cpp +++ b/utils/keeper-bench/main.cpp @@ -3,10 +3,24 @@ #include "Runner.h" #include "Stats.h" #include "Generator.h" +#include "Common/Exception.h" #include #include +#include -using namespace std; +namespace +{ + +template +std::optional valueToOptional(const boost::program_options::variable_value & value) +{ + if (value.empty()) + return std::nullopt; + + return value.as(); +} + +} int main(int argc, char *argv[]) { @@ -19,15 +33,14 @@ int main(int argc, char *argv[]) boost::program_options::options_description desc = createOptionsDescription("Allowed options", getTerminalWidth()); desc.add_options() - ("help", "produce help message") - ("generator", value()->default_value("set_small_data"), "query to execute") - ("concurrency,c", value()->default_value(1), "number of parallel queries") - ("delay,d", value()->default_value(1), "delay between intermediate reports in seconds (set 0 to disable reports)") - ("iterations,i", value()->default_value(0), "amount of queries to be executed") - ("timelimit,t", value()->default_value(0.), "stop launch of queries after specified time limit") - ("hosts,h", value()->multitoken(), "") + ("help", "produce help message") + ("config", value()->default_value(""), "yaml/xml file containing configuration") + ("concurrency,c", value(), "number of parallel queries") + ("report-delay,d", value(), "delay between intermediate reports in seconds (set 0 to disable reports)") + ("iterations,i", value(), "amount of queries to be executed") + ("time-limit,t", value(), "stop launch of queries after specified time limit") + ("hosts,h", value()->multitoken()->default_value(Strings{}, ""), "") ("continue_on_errors", "continue testing even if a query fails") - ("reconnect", "establish new connection for every query") ; boost::program_options::variables_map options; @@ -41,15 +54,22 @@ int main(int argc, char *argv[]) return 1; } - Runner runner(options["concurrency"].as(), - options["generator"].as(), - options["hosts"].as(), - options["timelimit"].as(), - options["delay"].as(), - options.count("continue_on_errors"), - options["iterations"].as()); + Runner runner(valueToOptional(options["concurrency"]), + options["config"].as(), + options["hosts"].as(), + valueToOptional(options["time-limit"]), + valueToOptional(options["report-delay"]), + options.count("continue_on_errors") ? std::optional(true) : std::nullopt, + valueToOptional(options["iterations"])); - runner.runBenchmark(); + try + { + runner.runBenchmark(); + } + catch (const DB::Exception & e) + { + std::cout << "Got exception while trying to run benchmark: " << e.message() << std::endl; + } return 0; } diff --git a/utils/list-licenses/list-licenses.sh b/utils/list-licenses/list-licenses.sh index db3eb5e59e8f..dd23e6321c8e 100755 --- a/utils/list-licenses/list-licenses.sh +++ b/utils/list-licenses/list-licenses.sh @@ -40,14 +40,21 @@ ls -1 -d ${LIBS_PATH}/*/ | ${GREP_CMD} -F -v -- '-cmake' | LC_ALL=C sort | while ${GREP_CMD} -q -i -F 'Altered source versions must be plainly marked as such' "$LIB_LICENSE" && ${GREP_CMD} -q -i -F 'This notice may not be removed or altered' "$LIB_LICENSE" && echo "zLib") || + (${GREP_CMD} -q -i -F 'This program, "bzip2", the associated library "libbzip2"' "$LIB_LICENSE" && + echo "bzip2") || (${GREP_CMD} -q -i -F 'Permission is hereby granted, free of charge, to any person' "$LIB_LICENSE" && - ${GREP_CMD} -q -i -F 'The above copyright notice and this permission notice shall be included' "$LIB_LICENSE" && + ${GREP_CMD} -q -i -F 'The above copyright notice and this permission notice shall be' "$LIB_LICENSE" && ${GREP_CMD} -q -i -F 'THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND' "$LIB_LICENSE" && echo "MIT") || + (${GREP_CMD} -q -F 'PostgreSQL' "$LIB_LICENSE" && + echo "PostgreSQL") || (${GREP_CMD} -q -i -F 'Permission to use, copy, modify, and distribute this software for any purpose' "$LIB_LICENSE" && ${GREP_CMD} -q -i -F 'the name of a copyright holder shall not' "$LIB_LICENSE" && ${GREP_CMD} -q -i -F 'THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND' "$LIB_LICENSE" && echo "MIT/curl") || + (${GREP_CMD} -q -i -F 'OpenLDAP Public License' "$LIB_LICENSE" && + ${GREP_CMD} -q -i -F 'Version 2.8' "$LIB_LICENSE" && + echo "OpenLDAP Version 2.8") || (${GREP_CMD} -q -i -F 'Redistributions of source code must retain the above copyright' "$LIB_LICENSE" && ${GREP_CMD} -q -i -F 'Redistributions in binary form must reproduce' "$LIB_LICENSE" && ${GREP_CMD} -q -i -F 'Neither the name' "$LIB_LICENSE" && @@ -55,6 +62,14 @@ ls -1 -d ${LIBS_PATH}/*/ | ${GREP_CMD} -F -v -- '-cmake' | LC_ALL=C sort | while (${GREP_CMD} -q -i -F 'Redistributions of source code must retain the above copyright' "$LIB_LICENSE" && ${GREP_CMD} -q -i -F 'Redistributions in binary form must reproduce' "$LIB_LICENSE" && echo "BSD 2-clause") || + (${GREP_CMD} -q -i -F 'Permission to use, copy, modify, and distribute this software' "$LIB_LICENSE" && + ${GREP_CMD} -q -i -F 'documentation for any purpose and without fee is hereby granted' "$LIB_LICENSE" && + ${GREP_CMD} -q -i -F 'the above copyright notice appear in all copies and that both that copyright' "$LIB_LICENSE" && + ${GREP_CMD} -q -i -F 'notice and this permission notice appear in supporting documentation' "$LIB_LICENSE" && + ${GREP_CMD} -q -i -F 'not be used in advertising or publicity pertaining' "$LIB_LICENSE" && + ${GREP_CMD} -q -i -F 'distribution of the software without specific, written prior permission' "$LIB_LICENSE" && + ${GREP_CMD} -q -i -F 'makes no representations about the suitability of this software' "$LIB_LICENSE" && + echo "HPND") || echo "Unknown") RELATIVE_PATH=$(echo "$LIB_LICENSE" | sed -r -e 's!^.+/contrib/!/contrib/!') diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index 5be458488a82..653a0cd53883 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -1,3 +1,4 @@ +v23.4.2.11-stable 2023-05-02 v23.4.1.1943-stable 2023-04-27 v23.3.2.37-lts 2023-04-22 v23.3.1.2823-lts 2023-03-31 diff --git a/utils/security-generator/generate_security.py b/utils/security-generator/generate_security.py index d25612e8bc6e..83180ccce1cd 100755 --- a/utils/security-generator/generate_security.py +++ b/utils/security-generator/generate_security.py @@ -48,17 +48,20 @@ """ -def generate_supported_versions(): +def generate_supported_versions() -> str: with open(VERSIONS_FILE, "r", encoding="utf-8") as fd: versions = [line.split(maxsplit=1)[0][1:] for line in fd.readlines()] # The versions in VERSIONS_FILE are ordered ascending, so the first one is # the greatest one. We may have supported versions in the previous year - unsupported_year = int(versions[0].split(".", maxsplit=1)[0]) - 2 - # 3 supported versions - supported = [] # type: List[str] - # 2 LTS versions, one of them could be in supported + greatest_year = int(versions[0].split(".", maxsplit=1)[0]) + unsupported_year = greatest_year - 2 + # 3 regular versions + regular = [] # type: List[str] + max_regular = 3 + # 2 LTS versions, one of them could be in regular lts = [] # type: List[str] + max_lts = 2 # The rest are unsupported unsupported = [] # type: List[str] table = [ @@ -69,18 +72,21 @@ def generate_supported_versions(): year = int(version.split(".")[0]) month = int(version.split(".")[1]) version = f"{year}.{month}" - if version in supported or version in lts: + to_append = "" + if version in regular or version in lts: continue - if len(supported) < 3: - supported.append(version) - if len(lts) < 2 and month in [3, 8]: - # The version can be LTS as well - lts.append(version) - table.append(f"| {version} | ✔️ |") - continue - if len(lts) < 2 and month in [3, 8]: + if len(regular) < max_regular: + regular.append(version) + to_append = f"| {version} | ✔️ |" + if len(lts) < max_lts and month in [3, 8]: lts.append(version) - table.append(f"| {version} | ✔️ |") + to_append = f"| {version} | ✔️ |" + if to_append: + if len(regular) == max_regular and len(lts) == max_lts: + # if we reached the max number of supported versions, the rest + # are unsopported, so year.* will be used + unsupported_year = min(greatest_year - 1, year) + table.append(to_append) continue if year <= unsupported_year: # The whole year is unsopported @@ -92,7 +98,7 @@ def generate_supported_versions(): return "\n".join(table) + "\n" -def main(): +def main() -> None: print(HEADER) print(generate_supported_versions()) print(FOOTER) diff --git a/utils/tests-visualizer/index.html b/utils/tests-visualizer/index.html index 11b2d6504e44..b2db5dbed338 100644 --- a/utils/tests-visualizer/index.html +++ b/utils/tests-visualizer/index.html @@ -20,9 +20,7 @@ width: 130px; display: block; margin: 30px auto; - -webkit-animation: spin 2s ease-in-out infinite; - -moz-animation: spin 2s ease-in-out infinite; - animation: spin 2s ease-in-out infinite; + animation: spin 10s ease-in-out infinite; } h1 { @@ -45,16 +43,9 @@ cursor: pointer; } - @-moz-keyframes spin { - 100% { -moz-transform: rotate(360deg); } - } - - @-webkit-keyframes spin { - 100% { -webkit-transform: rotate(360deg); } - } - @keyframes spin { - 100% { transform:rotate(360deg); } + 50% { transform:scale(150%); } + 100% { transform:scale(100%); } } @@ -67,33 +58,26 @@

Loading (~10 seconds, ~20 MB)